1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 4 * 5 * Copyright (C) 2014 Marvell 6 * 7 * Marcin Wojtas <mw@semihalf.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/kernel.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/platform_device.h> 15 #include <linux/skbuff.h> 16 #include <linux/inetdevice.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/interrupt.h> 21 #include <linux/cpumask.h> 22 #include <linux/of.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_address.h> 27 #include <linux/of_device.h> 28 #include <linux/phy.h> 29 #include <linux/phylink.h> 30 #include <linux/phy/phy.h> 31 #include <linux/ptp_classify.h> 32 #include <linux/clk.h> 33 #include <linux/hrtimer.h> 34 #include <linux/ktime.h> 35 #include <linux/regmap.h> 36 #include <uapi/linux/ppp_defs.h> 37 #include <net/ip.h> 38 #include <net/ipv6.h> 39 #include <net/tso.h> 40 #include <linux/bpf_trace.h> 41 42 #include "mvpp2.h" 43 #include "mvpp2_prs.h" 44 #include "mvpp2_cls.h" 45 46 enum mvpp2_bm_pool_log_num { 47 MVPP2_BM_SHORT, 48 MVPP2_BM_LONG, 49 MVPP2_BM_JUMBO, 50 MVPP2_BM_POOLS_NUM 51 }; 52 53 static struct { 54 int pkt_size; 55 int buf_num; 56 } mvpp2_pools[MVPP2_BM_POOLS_NUM]; 57 58 /* The prototype is added here to be used in start_dev when using ACPI. This 59 * will be removed once phylink is used for all modes (dt+ACPI). 60 */ 61 static void mvpp2_acpi_start(struct mvpp2_port *port); 62 63 /* Queue modes */ 64 #define MVPP2_QDIST_SINGLE_MODE 0 65 #define MVPP2_QDIST_MULTI_MODE 1 66 67 static int queue_mode = MVPP2_QDIST_MULTI_MODE; 68 69 module_param(queue_mode, int, 0444); 70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); 71 72 /* Utility/helper methods */ 73 74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 75 { 76 writel(data, priv->swth_base[0] + offset); 77 } 78 79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 80 { 81 return readl(priv->swth_base[0] + offset); 82 } 83 84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) 85 { 86 return readl_relaxed(priv->swth_base[0] + offset); 87 } 88 89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) 90 { 91 return cpu % priv->nthreads; 92 } 93 94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) 95 { 96 writel(data, priv->cm3_base + offset); 97 } 98 99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) 100 { 101 return readl(priv->cm3_base + offset); 102 } 103 104 static struct page_pool * 105 mvpp2_create_page_pool(struct device *dev, int num, int len, 106 enum dma_data_direction dma_dir) 107 { 108 struct page_pool_params pp_params = { 109 /* internal DMA mapping in page_pool */ 110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 111 .pool_size = num, 112 .nid = NUMA_NO_NODE, 113 .dev = dev, 114 .dma_dir = dma_dir, 115 .offset = MVPP2_SKB_HEADROOM, 116 .max_len = len, 117 }; 118 119 return page_pool_create(&pp_params); 120 } 121 122 /* These accessors should be used to access: 123 * 124 * - per-thread registers, where each thread has its own copy of the 125 * register. 126 * 127 * MVPP2_BM_VIRT_ALLOC_REG 128 * MVPP2_BM_ADDR_HIGH_ALLOC 129 * MVPP22_BM_ADDR_HIGH_RLS_REG 130 * MVPP2_BM_VIRT_RLS_REG 131 * MVPP2_ISR_RX_TX_CAUSE_REG 132 * MVPP2_ISR_RX_TX_MASK_REG 133 * MVPP2_TXQ_NUM_REG 134 * MVPP2_AGGR_TXQ_UPDATE_REG 135 * MVPP2_TXQ_RSVD_REQ_REG 136 * MVPP2_TXQ_RSVD_RSLT_REG 137 * MVPP2_TXQ_SENT_REG 138 * MVPP2_RXQ_NUM_REG 139 * 140 * - global registers that must be accessed through a specific thread 141 * window, because they are related to an access to a per-thread 142 * register 143 * 144 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) 145 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) 146 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) 147 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) 148 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) 149 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) 150 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 151 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) 152 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) 153 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) 154 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) 155 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 156 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) 157 */ 158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, 159 u32 offset, u32 data) 160 { 161 writel(data, priv->swth_base[thread] + offset); 162 } 163 164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, 165 u32 offset) 166 { 167 return readl(priv->swth_base[thread] + offset); 168 } 169 170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, 171 u32 offset, u32 data) 172 { 173 writel_relaxed(data, priv->swth_base[thread] + offset); 174 } 175 176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, 177 u32 offset) 178 { 179 return readl_relaxed(priv->swth_base[thread] + offset); 180 } 181 182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, 183 struct mvpp2_tx_desc *tx_desc) 184 { 185 if (port->priv->hw_version == MVPP21) 186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr); 187 else 188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & 189 MVPP2_DESC_DMA_MASK; 190 } 191 192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 193 struct mvpp2_tx_desc *tx_desc, 194 dma_addr_t dma_addr) 195 { 196 dma_addr_t addr, offset; 197 198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; 199 offset = dma_addr & MVPP2_TX_DESC_ALIGN; 200 201 if (port->priv->hw_version == MVPP21) { 202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); 203 tx_desc->pp21.packet_offset = offset; 204 } else { 205 __le64 val = cpu_to_le64(addr); 206 207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); 208 tx_desc->pp22.buf_dma_addr_ptp |= val; 209 tx_desc->pp22.packet_offset = offset; 210 } 211 } 212 213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, 214 struct mvpp2_tx_desc *tx_desc) 215 { 216 if (port->priv->hw_version == MVPP21) 217 return le16_to_cpu(tx_desc->pp21.data_size); 218 else 219 return le16_to_cpu(tx_desc->pp22.data_size); 220 } 221 222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 223 struct mvpp2_tx_desc *tx_desc, 224 size_t size) 225 { 226 if (port->priv->hw_version == MVPP21) 227 tx_desc->pp21.data_size = cpu_to_le16(size); 228 else 229 tx_desc->pp22.data_size = cpu_to_le16(size); 230 } 231 232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 233 struct mvpp2_tx_desc *tx_desc, 234 unsigned int txq) 235 { 236 if (port->priv->hw_version == MVPP21) 237 tx_desc->pp21.phys_txq = txq; 238 else 239 tx_desc->pp22.phys_txq = txq; 240 } 241 242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 243 struct mvpp2_tx_desc *tx_desc, 244 unsigned int command) 245 { 246 if (port->priv->hw_version == MVPP21) 247 tx_desc->pp21.command = cpu_to_le32(command); 248 else 249 tx_desc->pp22.command = cpu_to_le32(command); 250 } 251 252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, 253 struct mvpp2_tx_desc *tx_desc) 254 { 255 if (port->priv->hw_version == MVPP21) 256 return tx_desc->pp21.packet_offset; 257 else 258 return tx_desc->pp22.packet_offset; 259 } 260 261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 262 struct mvpp2_rx_desc *rx_desc) 263 { 264 if (port->priv->hw_version == MVPP21) 265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr); 266 else 267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & 268 MVPP2_DESC_DMA_MASK; 269 } 270 271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 272 struct mvpp2_rx_desc *rx_desc) 273 { 274 if (port->priv->hw_version == MVPP21) 275 return le32_to_cpu(rx_desc->pp21.buf_cookie); 276 else 277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & 278 MVPP2_DESC_DMA_MASK; 279 } 280 281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 282 struct mvpp2_rx_desc *rx_desc) 283 { 284 if (port->priv->hw_version == MVPP21) 285 return le16_to_cpu(rx_desc->pp21.data_size); 286 else 287 return le16_to_cpu(rx_desc->pp22.data_size); 288 } 289 290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 291 struct mvpp2_rx_desc *rx_desc) 292 { 293 if (port->priv->hw_version == MVPP21) 294 return le32_to_cpu(rx_desc->pp21.status); 295 else 296 return le32_to_cpu(rx_desc->pp22.status); 297 } 298 299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 300 { 301 txq_pcpu->txq_get_index++; 302 if (txq_pcpu->txq_get_index == txq_pcpu->size) 303 txq_pcpu->txq_get_index = 0; 304 } 305 306 static void mvpp2_txq_inc_put(struct mvpp2_port *port, 307 struct mvpp2_txq_pcpu *txq_pcpu, 308 void *data, 309 struct mvpp2_tx_desc *tx_desc, 310 enum mvpp2_tx_buf_type buf_type) 311 { 312 struct mvpp2_txq_pcpu_buf *tx_buf = 313 txq_pcpu->buffs + txq_pcpu->txq_put_index; 314 tx_buf->type = buf_type; 315 if (buf_type == MVPP2_TYPE_SKB) 316 tx_buf->skb = data; 317 else 318 tx_buf->xdpf = data; 319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); 320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + 321 mvpp2_txdesc_offset_get(port, tx_desc); 322 txq_pcpu->txq_put_index++; 323 if (txq_pcpu->txq_put_index == txq_pcpu->size) 324 txq_pcpu->txq_put_index = 0; 325 } 326 327 /* Get number of maximum RXQ */ 328 static int mvpp2_get_nrxqs(struct mvpp2 *priv) 329 { 330 unsigned int nrxqs; 331 332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) 333 return 1; 334 335 /* According to the PPv2.2 datasheet and our experiments on 336 * PPv2.1, RX queues have an allocation granularity of 4 (when 337 * more than a single one on PPv2.2). 338 * Round up to nearest multiple of 4. 339 */ 340 nrxqs = (num_possible_cpus() + 3) & ~0x3; 341 if (nrxqs > MVPP2_PORT_MAX_RXQ) 342 nrxqs = MVPP2_PORT_MAX_RXQ; 343 344 return nrxqs; 345 } 346 347 /* Get number of physical egress port */ 348 static inline int mvpp2_egress_port(struct mvpp2_port *port) 349 { 350 return MVPP2_MAX_TCONT + port->id; 351 } 352 353 /* Get number of physical TXQ */ 354 static inline int mvpp2_txq_phys(int port, int txq) 355 { 356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 357 } 358 359 /* Returns a struct page if page_pool is set, otherwise a buffer */ 360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, 361 struct page_pool *page_pool) 362 { 363 if (page_pool) 364 return page_pool_dev_alloc_pages(page_pool); 365 366 if (likely(pool->frag_size <= PAGE_SIZE)) 367 return netdev_alloc_frag(pool->frag_size); 368 369 return kmalloc(pool->frag_size, GFP_ATOMIC); 370 } 371 372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, 373 struct page_pool *page_pool, void *data) 374 { 375 if (page_pool) 376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false); 377 else if (likely(pool->frag_size <= PAGE_SIZE)) 378 skb_free_frag(data); 379 else 380 kfree(data); 381 } 382 383 /* Buffer Manager configuration routines */ 384 385 /* Create pool */ 386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, 387 struct mvpp2_bm_pool *bm_pool, int size) 388 { 389 u32 val; 390 391 /* Number of buffer pointers must be a multiple of 16, as per 392 * hardware constraints 393 */ 394 if (!IS_ALIGNED(size, 16)) 395 return -EINVAL; 396 397 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 398 * bytes per buffer pointer 399 */ 400 if (priv->hw_version == MVPP21) 401 bm_pool->size_bytes = 2 * sizeof(u32) * size; 402 else 403 bm_pool->size_bytes = 2 * sizeof(u64) * size; 404 405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, 406 &bm_pool->dma_addr, 407 GFP_KERNEL); 408 if (!bm_pool->virt_addr) 409 return -ENOMEM; 410 411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 412 MVPP2_BM_POOL_PTR_ALIGN)) { 413 dma_free_coherent(dev, bm_pool->size_bytes, 414 bm_pool->virt_addr, bm_pool->dma_addr); 415 dev_err(dev, "BM pool %d is not %d bytes aligned\n", 416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 417 return -ENOMEM; 418 } 419 420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 421 lower_32_bits(bm_pool->dma_addr)); 422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 423 424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 425 val |= MVPP2_BM_START_MASK; 426 427 val &= ~MVPP2_BM_LOW_THRESH_MASK; 428 val &= ~MVPP2_BM_HIGH_THRESH_MASK; 429 430 /* Set 8 Pools BPPI threshold for MVPP23 */ 431 if (priv->hw_version == MVPP23) { 432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); 433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); 434 } else { 435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); 436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); 437 } 438 439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 440 441 bm_pool->size = size; 442 bm_pool->pkt_size = 0; 443 bm_pool->buf_num = 0; 444 445 return 0; 446 } 447 448 /* Set pool buffer size */ 449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 450 struct mvpp2_bm_pool *bm_pool, 451 int buf_size) 452 { 453 u32 val; 454 455 bm_pool->buf_size = buf_size; 456 457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 459 } 460 461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, 462 struct mvpp2_bm_pool *bm_pool, 463 dma_addr_t *dma_addr, 464 phys_addr_t *phys_addr) 465 { 466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 467 468 *dma_addr = mvpp2_thread_read(priv, thread, 469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); 471 472 if (priv->hw_version >= MVPP22) { 473 u32 val; 474 u32 dma_addr_highbits, phys_addr_highbits; 475 476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); 477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); 478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> 479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; 480 481 if (sizeof(dma_addr_t) == 8) 482 *dma_addr |= (u64)dma_addr_highbits << 32; 483 484 if (sizeof(phys_addr_t) == 8) 485 *phys_addr |= (u64)phys_addr_highbits << 32; 486 } 487 488 put_cpu(); 489 } 490 491 /* Free all buffers from the pool */ 492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, 493 struct mvpp2_bm_pool *bm_pool, int buf_num) 494 { 495 struct page_pool *pp = NULL; 496 int i; 497 498 if (buf_num > bm_pool->buf_num) { 499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", 500 bm_pool->id, buf_num); 501 buf_num = bm_pool->buf_num; 502 } 503 504 if (priv->percpu_pools) 505 pp = priv->page_pool[bm_pool->id]; 506 507 for (i = 0; i < buf_num; i++) { 508 dma_addr_t buf_dma_addr; 509 phys_addr_t buf_phys_addr; 510 void *data; 511 512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, 513 &buf_dma_addr, &buf_phys_addr); 514 515 if (!pp) 516 dma_unmap_single(dev, buf_dma_addr, 517 bm_pool->buf_size, DMA_FROM_DEVICE); 518 519 data = (void *)phys_to_virt(buf_phys_addr); 520 if (!data) 521 break; 522 523 mvpp2_frag_free(bm_pool, pp, data); 524 } 525 526 /* Update BM driver with number of buffers removed from pool */ 527 bm_pool->buf_num -= i; 528 } 529 530 /* Check number of buffers in BM pool */ 531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 532 { 533 int buf_num = 0; 534 535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & 536 MVPP22_BM_POOL_PTRS_NUM_MASK; 537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & 538 MVPP2_BM_BPPI_PTR_NUM_MASK; 539 540 /* HW has one buffer ready which is not reflected in the counters */ 541 if (buf_num) 542 buf_num += 1; 543 544 return buf_num; 545 } 546 547 /* Cleanup pool */ 548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, 549 struct mvpp2_bm_pool *bm_pool) 550 { 551 int buf_num; 552 u32 val; 553 554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); 556 557 /* Check buffer counters after free */ 558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); 559 if (buf_num) { 560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", 561 bm_pool->id, bm_pool->buf_num); 562 return 0; 563 } 564 565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 566 val |= MVPP2_BM_STOP_MASK; 567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 568 569 if (priv->percpu_pools) { 570 page_pool_destroy(priv->page_pool[bm_pool->id]); 571 priv->page_pool[bm_pool->id] = NULL; 572 } 573 574 dma_free_coherent(dev, bm_pool->size_bytes, 575 bm_pool->virt_addr, 576 bm_pool->dma_addr); 577 return 0; 578 } 579 580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) 581 { 582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; 583 struct mvpp2_bm_pool *bm_pool; 584 585 if (priv->percpu_pools) 586 poolnum = mvpp2_get_nrxqs(priv) * 2; 587 588 /* Create all pools with maximum size */ 589 size = MVPP2_BM_POOL_SIZE_MAX; 590 for (i = 0; i < poolnum; i++) { 591 bm_pool = &priv->bm_pools[i]; 592 bm_pool->id = i; 593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 594 if (err) 595 goto err_unroll_pools; 596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 597 } 598 return 0; 599 600 err_unroll_pools: 601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); 602 for (i = i - 1; i >= 0; i--) 603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 604 return err; 605 } 606 607 /* Routine enable PPv23 8 pool mode */ 608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) 609 { 610 int val; 611 612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); 613 val |= MVPP23_BM_8POOL_MODE; 614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); 615 } 616 617 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 618 { 619 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 620 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 621 struct mvpp2_port *port; 622 623 if (priv->percpu_pools) { 624 for (i = 0; i < priv->port_count; i++) { 625 port = priv->port_list[i]; 626 if (port->xdp_prog) { 627 dma_dir = DMA_BIDIRECTIONAL; 628 break; 629 } 630 } 631 632 poolnum = mvpp2_get_nrxqs(priv) * 2; 633 for (i = 0; i < poolnum; i++) { 634 /* the pool in use */ 635 int pn = i / (poolnum / 2); 636 637 priv->page_pool[i] = 638 mvpp2_create_page_pool(dev, 639 mvpp2_pools[pn].buf_num, 640 mvpp2_pools[pn].pkt_size, 641 dma_dir); 642 if (IS_ERR(priv->page_pool[i])) { 643 int j; 644 645 for (j = 0; j < i; j++) { 646 page_pool_destroy(priv->page_pool[j]); 647 priv->page_pool[j] = NULL; 648 } 649 return PTR_ERR(priv->page_pool[i]); 650 } 651 } 652 } 653 654 dev_info(dev, "using %d %s buffers\n", poolnum, 655 priv->percpu_pools ? "per-cpu" : "shared"); 656 657 for (i = 0; i < poolnum; i++) { 658 /* Mask BM all interrupts */ 659 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 660 /* Clear BM cause register */ 661 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 662 } 663 664 /* Allocate and initialize BM pools */ 665 priv->bm_pools = devm_kcalloc(dev, poolnum, 666 sizeof(*priv->bm_pools), GFP_KERNEL); 667 if (!priv->bm_pools) 668 return -ENOMEM; 669 670 if (priv->hw_version == MVPP23) 671 mvpp23_bm_set_8pool_mode(priv); 672 673 err = mvpp2_bm_pools_init(dev, priv); 674 if (err < 0) 675 return err; 676 return 0; 677 } 678 679 static void mvpp2_setup_bm_pool(void) 680 { 681 /* Short pool */ 682 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; 683 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; 684 685 /* Long pool */ 686 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; 687 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; 688 689 /* Jumbo pool */ 690 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; 691 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; 692 } 693 694 /* Attach long pool to rxq */ 695 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 696 int lrxq, int long_pool) 697 { 698 u32 val, mask; 699 int prxq; 700 701 /* Get queue physical ID */ 702 prxq = port->rxqs[lrxq]->id; 703 704 if (port->priv->hw_version == MVPP21) 705 mask = MVPP21_RXQ_POOL_LONG_MASK; 706 else 707 mask = MVPP22_RXQ_POOL_LONG_MASK; 708 709 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 710 val &= ~mask; 711 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 712 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 713 } 714 715 /* Attach short pool to rxq */ 716 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, 717 int lrxq, int short_pool) 718 { 719 u32 val, mask; 720 int prxq; 721 722 /* Get queue physical ID */ 723 prxq = port->rxqs[lrxq]->id; 724 725 if (port->priv->hw_version == MVPP21) 726 mask = MVPP21_RXQ_POOL_SHORT_MASK; 727 else 728 mask = MVPP22_RXQ_POOL_SHORT_MASK; 729 730 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 731 val &= ~mask; 732 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; 733 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 734 } 735 736 static void *mvpp2_buf_alloc(struct mvpp2_port *port, 737 struct mvpp2_bm_pool *bm_pool, 738 struct page_pool *page_pool, 739 dma_addr_t *buf_dma_addr, 740 phys_addr_t *buf_phys_addr, 741 gfp_t gfp_mask) 742 { 743 dma_addr_t dma_addr; 744 struct page *page; 745 void *data; 746 747 data = mvpp2_frag_alloc(bm_pool, page_pool); 748 if (!data) 749 return NULL; 750 751 if (page_pool) { 752 page = (struct page *)data; 753 dma_addr = page_pool_get_dma_addr(page); 754 data = page_to_virt(page); 755 } else { 756 dma_addr = dma_map_single(port->dev->dev.parent, data, 757 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), 758 DMA_FROM_DEVICE); 759 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 760 mvpp2_frag_free(bm_pool, NULL, data); 761 return NULL; 762 } 763 } 764 *buf_dma_addr = dma_addr; 765 *buf_phys_addr = virt_to_phys(data); 766 767 return data; 768 } 769 770 /* Routine enable flow control for RXQs condition */ 771 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) 772 { 773 int val, cm3_state, host_id, q; 774 int fq = port->first_rxq; 775 unsigned long flags; 776 777 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 778 779 /* Remove Flow control enable bit to prevent race between FW and Kernel 780 * If Flow control was enabled, it would be re-enabled. 781 */ 782 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 783 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 784 val &= ~FLOW_CONTROL_ENABLE_BIT; 785 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 786 787 /* Set same Flow control for all RXQs */ 788 for (q = 0; q < port->nrxqs; q++) { 789 /* Set stop and start Flow control RXQ thresholds */ 790 val = MSS_THRESHOLD_START; 791 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); 792 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 793 794 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 795 /* Set RXQ port ID */ 796 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 797 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); 798 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 799 + MSS_RXQ_ASS_HOSTID_OFFS)); 800 801 /* Calculate RXQ host ID: 802 * In Single queue mode: Host ID equal to Host ID used for 803 * shared RX interrupt 804 * In Multi queue mode: Host ID equal to number of 805 * RXQ ID / number of CoS queues 806 * In Single resource mode: Host ID always equal to 0 807 */ 808 if (queue_mode == MVPP2_QDIST_SINGLE_MODE) 809 host_id = port->nqvecs; 810 else if (queue_mode == MVPP2_QDIST_MULTI_MODE) 811 host_id = q; 812 else 813 host_id = 0; 814 815 /* Set RXQ host ID */ 816 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) 817 + MSS_RXQ_ASS_HOSTID_OFFS)); 818 819 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 820 } 821 822 /* Notify Firmware that Flow control config space ready for update */ 823 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 824 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 825 val |= cm3_state; 826 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 827 828 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 829 } 830 831 /* Routine disable flow control for RXQs condition */ 832 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) 833 { 834 int val, cm3_state, q; 835 unsigned long flags; 836 int fq = port->first_rxq; 837 838 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 839 840 /* Remove Flow control enable bit to prevent race between FW and Kernel 841 * If Flow control was enabled, it would be re-enabled. 842 */ 843 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 844 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 845 val &= ~FLOW_CONTROL_ENABLE_BIT; 846 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 847 848 /* Disable Flow control for all RXQs */ 849 for (q = 0; q < port->nrxqs; q++) { 850 /* Set threshold 0 to disable Flow control */ 851 val = 0; 852 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); 853 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); 854 855 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); 856 857 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); 858 859 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) 860 + MSS_RXQ_ASS_HOSTID_OFFS)); 861 862 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); 863 } 864 865 /* Notify Firmware that Flow control config space ready for update */ 866 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 867 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 868 val |= cm3_state; 869 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 870 871 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 872 } 873 874 /* Routine disable/enable flow control for BM pool condition */ 875 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, 876 struct mvpp2_bm_pool *pool, 877 bool en) 878 { 879 int val, cm3_state; 880 unsigned long flags; 881 882 spin_lock_irqsave(&port->priv->mss_spinlock, flags); 883 884 /* Remove Flow control enable bit to prevent race between FW and Kernel 885 * If Flow control were enabled, it would be re-enabled. 886 */ 887 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 888 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); 889 val &= ~FLOW_CONTROL_ENABLE_BIT; 890 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 891 892 /* Check if BM pool should be enabled/disable */ 893 if (en) { 894 /* Set BM pool start and stop thresholds per port */ 895 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 896 val |= MSS_BUF_POOL_PORT_OFFS(port->id); 897 val &= ~MSS_BUF_POOL_START_MASK; 898 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); 899 val &= ~MSS_BUF_POOL_STOP_MASK; 900 val |= MSS_THRESHOLD_STOP; 901 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 902 } else { 903 /* Remove BM pool from the port */ 904 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); 905 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); 906 907 /* Zero BM pool start and stop thresholds to disable pool 908 * flow control if pool empty (not used by any port) 909 */ 910 if (!pool->buf_num) { 911 val &= ~MSS_BUF_POOL_START_MASK; 912 val &= ~MSS_BUF_POOL_STOP_MASK; 913 } 914 915 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); 916 } 917 918 /* Notify Firmware that Flow control config space ready for update */ 919 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); 920 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 921 val |= cm3_state; 922 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); 923 924 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); 925 } 926 927 /* disable/enable flow control for BM pool on all ports */ 928 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) 929 { 930 struct mvpp2_port *port; 931 int i; 932 933 for (i = 0; i < priv->port_count; i++) { 934 port = priv->port_list[i]; 935 if (port->priv->percpu_pools) { 936 for (i = 0; i < port->nrxqs; i++) 937 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], 938 port->tx_fc & en); 939 } else { 940 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); 941 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); 942 } 943 } 944 } 945 946 static int mvpp2_enable_global_fc(struct mvpp2 *priv) 947 { 948 int val, timeout = 0; 949 950 /* Enable global flow control. In this stage global 951 * flow control enabled, but still disabled per port. 952 */ 953 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 954 val |= FLOW_CONTROL_ENABLE_BIT; 955 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 956 957 /* Check if Firmware running and disable FC if not*/ 958 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; 959 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); 960 961 while (timeout < MSS_FC_MAX_TIMEOUT) { 962 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); 963 964 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) 965 return 0; 966 usleep_range(10, 20); 967 timeout++; 968 } 969 970 priv->global_tx_fc = false; 971 return -EOPNOTSUPP; 972 } 973 974 /* Release buffer to BM */ 975 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 976 dma_addr_t buf_dma_addr, 977 phys_addr_t buf_phys_addr) 978 { 979 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 980 unsigned long flags = 0; 981 982 if (test_bit(thread, &port->priv->lock_map)) 983 spin_lock_irqsave(&port->bm_lock[thread], flags); 984 985 if (port->priv->hw_version >= MVPP22) { 986 u32 val = 0; 987 988 if (sizeof(dma_addr_t) == 8) 989 val |= upper_32_bits(buf_dma_addr) & 990 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 991 992 if (sizeof(phys_addr_t) == 8) 993 val |= (upper_32_bits(buf_phys_addr) 994 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 995 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 996 997 mvpp2_thread_write_relaxed(port->priv, thread, 998 MVPP22_BM_ADDR_HIGH_RLS_REG, val); 999 } 1000 1001 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 1002 * returned in the "cookie" field of the RX 1003 * descriptor. Instead of storing the virtual address, we 1004 * store the physical address 1005 */ 1006 mvpp2_thread_write_relaxed(port->priv, thread, 1007 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 1008 mvpp2_thread_write_relaxed(port->priv, thread, 1009 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 1010 1011 if (test_bit(thread, &port->priv->lock_map)) 1012 spin_unlock_irqrestore(&port->bm_lock[thread], flags); 1013 1014 put_cpu(); 1015 } 1016 1017 /* Allocate buffers for the pool */ 1018 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 1019 struct mvpp2_bm_pool *bm_pool, int buf_num) 1020 { 1021 int i, buf_size, total_size; 1022 dma_addr_t dma_addr; 1023 phys_addr_t phys_addr; 1024 struct page_pool *pp = NULL; 1025 void *buf; 1026 1027 if (port->priv->percpu_pools && 1028 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1029 netdev_err(port->dev, 1030 "attempted to use jumbo frames with per-cpu pools"); 1031 return 0; 1032 } 1033 1034 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); 1035 total_size = MVPP2_RX_TOTAL_SIZE(buf_size); 1036 1037 if (buf_num < 0 || 1038 (buf_num + bm_pool->buf_num > bm_pool->size)) { 1039 netdev_err(port->dev, 1040 "cannot allocate %d buffers for pool %d\n", 1041 buf_num, bm_pool->id); 1042 return 0; 1043 } 1044 1045 if (port->priv->percpu_pools) 1046 pp = port->priv->page_pool[bm_pool->id]; 1047 for (i = 0; i < buf_num; i++) { 1048 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, 1049 &phys_addr, GFP_KERNEL); 1050 if (!buf) 1051 break; 1052 1053 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, 1054 phys_addr); 1055 } 1056 1057 /* Update BM driver with number of buffers added to pool */ 1058 bm_pool->buf_num += i; 1059 1060 netdev_dbg(port->dev, 1061 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", 1062 bm_pool->id, bm_pool->pkt_size, buf_size, total_size); 1063 1064 netdev_dbg(port->dev, 1065 "pool %d: %d of %d buffers added\n", 1066 bm_pool->id, i, buf_num); 1067 return i; 1068 } 1069 1070 /* Notify the driver that BM pool is being used as specific type and return the 1071 * pool pointer on success 1072 */ 1073 static struct mvpp2_bm_pool * 1074 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) 1075 { 1076 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1077 int num; 1078 1079 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || 1080 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { 1081 netdev_err(port->dev, "Invalid pool %d\n", pool); 1082 return NULL; 1083 } 1084 1085 /* Allocate buffers in case BM pool is used as long pool, but packet 1086 * size doesn't match MTU or BM pool hasn't being used yet 1087 */ 1088 if (new_pool->pkt_size == 0) { 1089 int pkts_num; 1090 1091 /* Set default buffer number or free all the buffers in case 1092 * the pool is not empty 1093 */ 1094 pkts_num = new_pool->buf_num; 1095 if (pkts_num == 0) { 1096 if (port->priv->percpu_pools) { 1097 if (pool < port->nrxqs) 1098 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; 1099 else 1100 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; 1101 } else { 1102 pkts_num = mvpp2_pools[pool].buf_num; 1103 } 1104 } else { 1105 mvpp2_bm_bufs_free(port->dev->dev.parent, 1106 port->priv, new_pool, pkts_num); 1107 } 1108 1109 new_pool->pkt_size = pkt_size; 1110 new_pool->frag_size = 1111 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1112 MVPP2_SKB_SHINFO_SIZE; 1113 1114 /* Allocate buffers for this pool */ 1115 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1116 if (num != pkts_num) { 1117 WARN(1, "pool %d: %d of %d allocated\n", 1118 new_pool->id, num, pkts_num); 1119 return NULL; 1120 } 1121 } 1122 1123 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1124 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1125 1126 return new_pool; 1127 } 1128 1129 static struct mvpp2_bm_pool * 1130 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, 1131 unsigned int pool, int pkt_size) 1132 { 1133 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 1134 int num; 1135 1136 if (pool > port->nrxqs * 2) { 1137 netdev_err(port->dev, "Invalid pool %d\n", pool); 1138 return NULL; 1139 } 1140 1141 /* Allocate buffers in case BM pool is used as long pool, but packet 1142 * size doesn't match MTU or BM pool hasn't being used yet 1143 */ 1144 if (new_pool->pkt_size == 0) { 1145 int pkts_num; 1146 1147 /* Set default buffer number or free all the buffers in case 1148 * the pool is not empty 1149 */ 1150 pkts_num = new_pool->buf_num; 1151 if (pkts_num == 0) 1152 pkts_num = mvpp2_pools[type].buf_num; 1153 else 1154 mvpp2_bm_bufs_free(port->dev->dev.parent, 1155 port->priv, new_pool, pkts_num); 1156 1157 new_pool->pkt_size = pkt_size; 1158 new_pool->frag_size = 1159 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + 1160 MVPP2_SKB_SHINFO_SIZE; 1161 1162 /* Allocate buffers for this pool */ 1163 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 1164 if (num != pkts_num) { 1165 WARN(1, "pool %d: %d of %d allocated\n", 1166 new_pool->id, num, pkts_num); 1167 return NULL; 1168 } 1169 } 1170 1171 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 1172 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 1173 1174 return new_pool; 1175 } 1176 1177 /* Initialize pools for swf, shared buffers variant */ 1178 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) 1179 { 1180 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; 1181 int rxq; 1182 1183 /* If port pkt_size is higher than 1518B: 1184 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1185 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1186 */ 1187 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { 1188 long_log_pool = MVPP2_BM_JUMBO; 1189 short_log_pool = MVPP2_BM_LONG; 1190 } else { 1191 long_log_pool = MVPP2_BM_LONG; 1192 short_log_pool = MVPP2_BM_SHORT; 1193 } 1194 1195 if (!port->pool_long) { 1196 port->pool_long = 1197 mvpp2_bm_pool_use(port, long_log_pool, 1198 mvpp2_pools[long_log_pool].pkt_size); 1199 if (!port->pool_long) 1200 return -ENOMEM; 1201 1202 port->pool_long->port_map |= BIT(port->id); 1203 1204 for (rxq = 0; rxq < port->nrxqs; rxq++) 1205 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 1206 } 1207 1208 if (!port->pool_short) { 1209 port->pool_short = 1210 mvpp2_bm_pool_use(port, short_log_pool, 1211 mvpp2_pools[short_log_pool].pkt_size); 1212 if (!port->pool_short) 1213 return -ENOMEM; 1214 1215 port->pool_short->port_map |= BIT(port->id); 1216 1217 for (rxq = 0; rxq < port->nrxqs; rxq++) 1218 mvpp2_rxq_short_pool_set(port, rxq, 1219 port->pool_short->id); 1220 } 1221 1222 return 0; 1223 } 1224 1225 /* Initialize pools for swf, percpu buffers variant */ 1226 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) 1227 { 1228 struct mvpp2_bm_pool *bm_pool; 1229 int i; 1230 1231 for (i = 0; i < port->nrxqs; i++) { 1232 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, 1233 mvpp2_pools[MVPP2_BM_SHORT].pkt_size); 1234 if (!bm_pool) 1235 return -ENOMEM; 1236 1237 bm_pool->port_map |= BIT(port->id); 1238 mvpp2_rxq_short_pool_set(port, i, bm_pool->id); 1239 } 1240 1241 for (i = 0; i < port->nrxqs; i++) { 1242 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, 1243 mvpp2_pools[MVPP2_BM_LONG].pkt_size); 1244 if (!bm_pool) 1245 return -ENOMEM; 1246 1247 bm_pool->port_map |= BIT(port->id); 1248 mvpp2_rxq_long_pool_set(port, i, bm_pool->id); 1249 } 1250 1251 port->pool_long = NULL; 1252 port->pool_short = NULL; 1253 1254 return 0; 1255 } 1256 1257 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 1258 { 1259 if (port->priv->percpu_pools) 1260 return mvpp2_swf_bm_pool_init_percpu(port); 1261 else 1262 return mvpp2_swf_bm_pool_init_shared(port); 1263 } 1264 1265 static void mvpp2_set_hw_csum(struct mvpp2_port *port, 1266 enum mvpp2_bm_pool_log_num new_long_pool) 1267 { 1268 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1269 1270 /* Update L4 checksum when jumbo enable/disable on port. 1271 * Only port 0 supports hardware checksum offload due to 1272 * the Tx FIFO size limitation. 1273 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor 1274 * has 7 bits, so the maximum L3 offset is 128. 1275 */ 1276 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1277 port->dev->features &= ~csums; 1278 port->dev->hw_features &= ~csums; 1279 } else { 1280 port->dev->features |= csums; 1281 port->dev->hw_features |= csums; 1282 } 1283 } 1284 1285 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) 1286 { 1287 struct mvpp2_port *port = netdev_priv(dev); 1288 enum mvpp2_bm_pool_log_num new_long_pool; 1289 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 1290 1291 if (port->priv->percpu_pools) 1292 goto out_set; 1293 1294 /* If port MTU is higher than 1518B: 1295 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool 1296 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool 1297 */ 1298 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1299 new_long_pool = MVPP2_BM_JUMBO; 1300 else 1301 new_long_pool = MVPP2_BM_LONG; 1302 1303 if (new_long_pool != port->pool_long->id) { 1304 if (port->tx_fc) { 1305 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1306 mvpp2_bm_pool_update_fc(port, 1307 port->pool_short, 1308 false); 1309 else 1310 mvpp2_bm_pool_update_fc(port, port->pool_long, 1311 false); 1312 } 1313 1314 /* Remove port from old short & long pool */ 1315 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, 1316 port->pool_long->pkt_size); 1317 port->pool_long->port_map &= ~BIT(port->id); 1318 port->pool_long = NULL; 1319 1320 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, 1321 port->pool_short->pkt_size); 1322 port->pool_short->port_map &= ~BIT(port->id); 1323 port->pool_short = NULL; 1324 1325 port->pkt_size = pkt_size; 1326 1327 /* Add port to new short & long pool */ 1328 mvpp2_swf_bm_pool_init(port); 1329 1330 mvpp2_set_hw_csum(port, new_long_pool); 1331 1332 if (port->tx_fc) { 1333 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) 1334 mvpp2_bm_pool_update_fc(port, port->pool_long, 1335 true); 1336 else 1337 mvpp2_bm_pool_update_fc(port, port->pool_short, 1338 true); 1339 } 1340 1341 /* Update L4 checksum when jumbo enable/disable on port */ 1342 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { 1343 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1344 dev->hw_features &= ~(NETIF_F_IP_CSUM | 1345 NETIF_F_IPV6_CSUM); 1346 } else { 1347 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1348 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1349 } 1350 } 1351 1352 out_set: 1353 dev->mtu = mtu; 1354 dev->wanted_features = dev->features; 1355 1356 netdev_update_features(dev); 1357 return 0; 1358 } 1359 1360 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) 1361 { 1362 int i, sw_thread_mask = 0; 1363 1364 for (i = 0; i < port->nqvecs; i++) 1365 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1366 1367 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1368 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); 1369 } 1370 1371 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) 1372 { 1373 int i, sw_thread_mask = 0; 1374 1375 for (i = 0; i < port->nqvecs; i++) 1376 sw_thread_mask |= port->qvecs[i].sw_thread_mask; 1377 1378 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1379 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); 1380 } 1381 1382 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) 1383 { 1384 struct mvpp2_port *port = qvec->port; 1385 1386 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1387 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); 1388 } 1389 1390 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) 1391 { 1392 struct mvpp2_port *port = qvec->port; 1393 1394 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), 1395 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); 1396 } 1397 1398 /* Mask the current thread's Rx/Tx interrupts 1399 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1400 * using smp_processor_id() is OK. 1401 */ 1402 static void mvpp2_interrupts_mask(void *arg) 1403 { 1404 struct mvpp2_port *port = arg; 1405 int cpu = smp_processor_id(); 1406 u32 thread; 1407 1408 /* If the thread isn't used, don't do anything */ 1409 if (cpu > port->priv->nthreads) 1410 return; 1411 1412 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1413 1414 mvpp2_thread_write(port->priv, thread, 1415 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); 1416 mvpp2_thread_write(port->priv, thread, 1417 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); 1418 } 1419 1420 /* Unmask the current thread's Rx/Tx interrupts. 1421 * Called by on_each_cpu(), guaranteed to run with migration disabled, 1422 * using smp_processor_id() is OK. 1423 */ 1424 static void mvpp2_interrupts_unmask(void *arg) 1425 { 1426 struct mvpp2_port *port = arg; 1427 int cpu = smp_processor_id(); 1428 u32 val, thread; 1429 1430 /* If the thread isn't used, don't do anything */ 1431 if (cpu >= port->priv->nthreads) 1432 return; 1433 1434 thread = mvpp2_cpu_to_thread(port->priv, cpu); 1435 1436 val = MVPP2_CAUSE_MISC_SUM_MASK | 1437 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 1438 if (port->has_tx_irqs) 1439 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 1440 1441 mvpp2_thread_write(port->priv, thread, 1442 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1443 mvpp2_thread_write(port->priv, thread, 1444 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1445 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1446 } 1447 1448 static void 1449 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) 1450 { 1451 u32 val; 1452 int i; 1453 1454 if (port->priv->hw_version == MVPP21) 1455 return; 1456 1457 if (mask) 1458 val = 0; 1459 else 1460 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); 1461 1462 for (i = 0; i < port->nqvecs; i++) { 1463 struct mvpp2_queue_vector *v = port->qvecs + i; 1464 1465 if (v->type != MVPP2_QUEUE_VECTOR_SHARED) 1466 continue; 1467 1468 mvpp2_thread_write(port->priv, v->sw_thread_id, 1469 MVPP2_ISR_RX_TX_MASK_REG(port->id), val); 1470 mvpp2_thread_write(port->priv, v->sw_thread_id, 1471 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 1472 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); 1473 } 1474 } 1475 1476 /* Only GOP port 0 has an XLG MAC */ 1477 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) 1478 { 1479 return port->gop_id == 0; 1480 } 1481 1482 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) 1483 { 1484 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); 1485 } 1486 1487 /* Port configuration routines */ 1488 static bool mvpp2_is_xlg(phy_interface_t interface) 1489 { 1490 return interface == PHY_INTERFACE_MODE_10GBASER || 1491 interface == PHY_INTERFACE_MODE_XAUI; 1492 } 1493 1494 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) 1495 { 1496 u32 old, val; 1497 1498 old = val = readl(ptr); 1499 val &= ~mask; 1500 val |= set; 1501 if (old != val) 1502 writel(val, ptr); 1503 } 1504 1505 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) 1506 { 1507 struct mvpp2 *priv = port->priv; 1508 u32 val; 1509 1510 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1511 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; 1512 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1513 1514 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1515 if (port->gop_id == 2) 1516 val |= GENCONF_CTRL0_PORT2_RGMII; 1517 else if (port->gop_id == 3) 1518 val |= GENCONF_CTRL0_PORT3_RGMII_MII; 1519 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1520 } 1521 1522 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) 1523 { 1524 struct mvpp2 *priv = port->priv; 1525 u32 val; 1526 1527 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1528 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | 1529 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; 1530 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1531 1532 if (port->gop_id > 1) { 1533 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); 1534 if (port->gop_id == 2) 1535 val &= ~GENCONF_CTRL0_PORT2_RGMII; 1536 else if (port->gop_id == 3) 1537 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; 1538 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); 1539 } 1540 } 1541 1542 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) 1543 { 1544 struct mvpp2 *priv = port->priv; 1545 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 1546 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 1547 u32 val; 1548 1549 val = readl(xpcs + MVPP22_XPCS_CFG0); 1550 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | 1551 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); 1552 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); 1553 writel(val, xpcs + MVPP22_XPCS_CFG0); 1554 1555 val = readl(mpcs + MVPP22_MPCS_CTRL); 1556 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; 1557 writel(val, mpcs + MVPP22_MPCS_CTRL); 1558 1559 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 1560 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); 1561 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); 1562 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 1563 } 1564 1565 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) 1566 { 1567 struct mvpp2 *priv = port->priv; 1568 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1569 u32 val; 1570 1571 val = readl(fca + MVPP22_FCA_CONTROL_REG); 1572 val &= ~MVPP22_FCA_ENABLE_PERIODIC; 1573 if (en) 1574 val |= MVPP22_FCA_ENABLE_PERIODIC; 1575 writel(val, fca + MVPP22_FCA_CONTROL_REG); 1576 } 1577 1578 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) 1579 { 1580 struct mvpp2 *priv = port->priv; 1581 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); 1582 u32 lsb, msb; 1583 1584 lsb = timer & MVPP22_FCA_REG_MASK; 1585 msb = timer >> MVPP22_FCA_REG_SIZE; 1586 1587 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG); 1588 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG); 1589 } 1590 1591 /* Set Flow Control timer x100 faster than pause quanta to ensure that link 1592 * partner won't send traffic if port is in XOFF mode. 1593 */ 1594 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) 1595 { 1596 u32 timer; 1597 1598 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) 1599 * FC_QUANTA; 1600 1601 mvpp22_gop_fca_enable_periodic(port, false); 1602 1603 mvpp22_gop_fca_set_timer(port, timer); 1604 1605 mvpp22_gop_fca_enable_periodic(port, true); 1606 } 1607 1608 static int mvpp22_gop_init(struct mvpp2_port *port) 1609 { 1610 struct mvpp2 *priv = port->priv; 1611 u32 val; 1612 1613 if (!priv->sysctrl_base) 1614 return 0; 1615 1616 switch (port->phy_interface) { 1617 case PHY_INTERFACE_MODE_RGMII: 1618 case PHY_INTERFACE_MODE_RGMII_ID: 1619 case PHY_INTERFACE_MODE_RGMII_RXID: 1620 case PHY_INTERFACE_MODE_RGMII_TXID: 1621 if (!mvpp2_port_supports_rgmii(port)) 1622 goto invalid_conf; 1623 mvpp22_gop_init_rgmii(port); 1624 break; 1625 case PHY_INTERFACE_MODE_SGMII: 1626 case PHY_INTERFACE_MODE_1000BASEX: 1627 case PHY_INTERFACE_MODE_2500BASEX: 1628 mvpp22_gop_init_sgmii(port); 1629 break; 1630 case PHY_INTERFACE_MODE_10GBASER: 1631 if (!mvpp2_port_supports_xlg(port)) 1632 goto invalid_conf; 1633 mvpp22_gop_init_10gkr(port); 1634 break; 1635 default: 1636 goto unsupported_conf; 1637 } 1638 1639 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); 1640 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | 1641 GENCONF_PORT_CTRL1_EN(port->gop_id); 1642 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); 1643 1644 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); 1645 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; 1646 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); 1647 1648 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); 1649 val |= GENCONF_SOFT_RESET1_GOP; 1650 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); 1651 1652 mvpp22_gop_fca_set_periodic_timer(port); 1653 1654 unsupported_conf: 1655 return 0; 1656 1657 invalid_conf: 1658 netdev_err(port->dev, "Invalid port configuration\n"); 1659 return -EINVAL; 1660 } 1661 1662 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) 1663 { 1664 u32 val; 1665 1666 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1667 phy_interface_mode_is_8023z(port->phy_interface) || 1668 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1669 /* Enable the GMAC link status irq for this port */ 1670 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1671 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1672 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1673 } 1674 1675 if (mvpp2_port_supports_xlg(port)) { 1676 /* Enable the XLG/GIG irqs for this port */ 1677 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1678 if (mvpp2_is_xlg(port->phy_interface)) 1679 val |= MVPP22_XLG_EXT_INT_MASK_XLG; 1680 else 1681 val |= MVPP22_XLG_EXT_INT_MASK_GIG; 1682 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1683 } 1684 } 1685 1686 static void mvpp22_gop_mask_irq(struct mvpp2_port *port) 1687 { 1688 u32 val; 1689 1690 if (mvpp2_port_supports_xlg(port)) { 1691 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); 1692 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | 1693 MVPP22_XLG_EXT_INT_MASK_GIG); 1694 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); 1695 } 1696 1697 if (phy_interface_mode_is_rgmii(port->phy_interface) || 1698 phy_interface_mode_is_8023z(port->phy_interface) || 1699 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1700 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); 1701 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; 1702 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); 1703 } 1704 } 1705 1706 static void mvpp22_gop_setup_irq(struct mvpp2_port *port) 1707 { 1708 u32 val; 1709 1710 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, 1711 MVPP22_GMAC_INT_SUM_MASK_PTP, 1712 MVPP22_GMAC_INT_SUM_MASK_PTP); 1713 1714 if (port->phylink || 1715 phy_interface_mode_is_rgmii(port->phy_interface) || 1716 phy_interface_mode_is_8023z(port->phy_interface) || 1717 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 1718 val = readl(port->base + MVPP22_GMAC_INT_MASK); 1719 val |= MVPP22_GMAC_INT_MASK_LINK_STAT; 1720 writel(val, port->base + MVPP22_GMAC_INT_MASK); 1721 } 1722 1723 if (mvpp2_port_supports_xlg(port)) { 1724 val = readl(port->base + MVPP22_XLG_INT_MASK); 1725 val |= MVPP22_XLG_INT_MASK_LINK; 1726 writel(val, port->base + MVPP22_XLG_INT_MASK); 1727 1728 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, 1729 MVPP22_XLG_EXT_INT_MASK_PTP, 1730 MVPP22_XLG_EXT_INT_MASK_PTP); 1731 } 1732 1733 mvpp22_gop_unmask_irq(port); 1734 } 1735 1736 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). 1737 * 1738 * The PHY mode used by the PPv2 driver comes from the network subsystem, while 1739 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they 1740 * differ. 1741 * 1742 * The COMPHY configures the serdes lanes regardless of the actual use of the 1743 * lanes by the physical layer. This is why configurations like 1744 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. 1745 */ 1746 static int mvpp22_comphy_init(struct mvpp2_port *port) 1747 { 1748 int ret; 1749 1750 if (!port->comphy) 1751 return 0; 1752 1753 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, 1754 port->phy_interface); 1755 if (ret) 1756 return ret; 1757 1758 return phy_power_on(port->comphy); 1759 } 1760 1761 static void mvpp2_port_enable(struct mvpp2_port *port) 1762 { 1763 u32 val; 1764 1765 if (mvpp2_port_supports_xlg(port) && 1766 mvpp2_is_xlg(port->phy_interface)) { 1767 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1768 val |= MVPP22_XLG_CTRL0_PORT_EN; 1769 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; 1770 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1771 } else { 1772 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1773 val |= MVPP2_GMAC_PORT_EN_MASK; 1774 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 1775 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1776 } 1777 } 1778 1779 static void mvpp2_port_disable(struct mvpp2_port *port) 1780 { 1781 u32 val; 1782 1783 if (mvpp2_port_supports_xlg(port) && 1784 mvpp2_is_xlg(port->phy_interface)) { 1785 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 1786 val &= ~MVPP22_XLG_CTRL0_PORT_EN; 1787 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 1788 } 1789 1790 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 1791 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 1792 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 1793 } 1794 1795 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 1796 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 1797 { 1798 u32 val; 1799 1800 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 1801 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 1802 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1803 } 1804 1805 /* Configure loopback port */ 1806 static void mvpp2_port_loopback_set(struct mvpp2_port *port, 1807 const struct phylink_link_state *state) 1808 { 1809 u32 val; 1810 1811 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 1812 1813 if (state->speed == 1000) 1814 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 1815 else 1816 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 1817 1818 if (phy_interface_mode_is_8023z(state->interface) || 1819 state->interface == PHY_INTERFACE_MODE_SGMII) 1820 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 1821 else 1822 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 1823 1824 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 1825 } 1826 1827 enum { 1828 ETHTOOL_XDP_REDIRECT, 1829 ETHTOOL_XDP_PASS, 1830 ETHTOOL_XDP_DROP, 1831 ETHTOOL_XDP_TX, 1832 ETHTOOL_XDP_TX_ERR, 1833 ETHTOOL_XDP_XMIT, 1834 ETHTOOL_XDP_XMIT_ERR, 1835 }; 1836 1837 struct mvpp2_ethtool_counter { 1838 unsigned int offset; 1839 const char string[ETH_GSTRING_LEN]; 1840 bool reg_is_64b; 1841 }; 1842 1843 static u64 mvpp2_read_count(struct mvpp2_port *port, 1844 const struct mvpp2_ethtool_counter *counter) 1845 { 1846 u64 val; 1847 1848 val = readl(port->stats_base + counter->offset); 1849 if (counter->reg_is_64b) 1850 val += (u64)readl(port->stats_base + counter->offset + 4) << 32; 1851 1852 return val; 1853 } 1854 1855 /* Some counters are accessed indirectly by first writing an index to 1856 * MVPP2_CTRS_IDX. The index can represent various resources depending on the 1857 * register we access, it can be a hit counter for some classification tables, 1858 * a counter specific to a rxq, a txq or a buffer pool. 1859 */ 1860 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) 1861 { 1862 mvpp2_write(priv, MVPP2_CTRS_IDX, index); 1863 return mvpp2_read(priv, reg); 1864 } 1865 1866 /* Due to the fact that software statistics and hardware statistics are, by 1867 * design, incremented at different moments in the chain of packet processing, 1868 * it is very likely that incoming packets could have been dropped after being 1869 * counted by hardware but before reaching software statistics (most probably 1870 * multicast packets), and in the oppposite way, during transmission, FCS bytes 1871 * are added in between as well as TSO skb will be split and header bytes added. 1872 * Hence, statistics gathered from userspace with ifconfig (software) and 1873 * ethtool (hardware) cannot be compared. 1874 */ 1875 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { 1876 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, 1877 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, 1878 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, 1879 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, 1880 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, 1881 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, 1882 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, 1883 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, 1884 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, 1885 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, 1886 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, 1887 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, 1888 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, 1889 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, 1890 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, 1891 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, 1892 { MVPP2_MIB_FC_SENT, "fc_sent" }, 1893 { MVPP2_MIB_FC_RCVD, "fc_received" }, 1894 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, 1895 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, 1896 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, 1897 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, 1898 { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, 1899 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, 1900 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, 1901 { MVPP2_MIB_COLLISION, "collision" }, 1902 { MVPP2_MIB_LATE_COLLISION, "late_collision" }, 1903 }; 1904 1905 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { 1906 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, 1907 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, 1908 }; 1909 1910 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { 1911 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, 1912 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, 1913 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, 1914 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, 1915 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, 1916 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, 1917 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, 1918 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, 1919 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, 1920 }; 1921 1922 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { 1923 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, 1924 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, 1925 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, 1926 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, 1927 }; 1928 1929 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { 1930 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, 1931 { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, 1932 { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, 1933 { ETHTOOL_XDP_TX, "rx_xdp_tx", }, 1934 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, 1935 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, 1936 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, 1937 }; 1938 1939 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ 1940 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ 1941 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ 1942 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ 1943 ARRAY_SIZE(mvpp2_ethtool_xdp)) 1944 1945 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, 1946 u8 *data) 1947 { 1948 struct mvpp2_port *port = netdev_priv(netdev); 1949 int i, q; 1950 1951 if (sset != ETH_SS_STATS) 1952 return; 1953 1954 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { 1955 strscpy(data, mvpp2_ethtool_mib_regs[i].string, 1956 ETH_GSTRING_LEN); 1957 data += ETH_GSTRING_LEN; 1958 } 1959 1960 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { 1961 strscpy(data, mvpp2_ethtool_port_regs[i].string, 1962 ETH_GSTRING_LEN); 1963 data += ETH_GSTRING_LEN; 1964 } 1965 1966 for (q = 0; q < port->ntxqs; q++) { 1967 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { 1968 snprintf(data, ETH_GSTRING_LEN, 1969 mvpp2_ethtool_txq_regs[i].string, q); 1970 data += ETH_GSTRING_LEN; 1971 } 1972 } 1973 1974 for (q = 0; q < port->nrxqs; q++) { 1975 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { 1976 snprintf(data, ETH_GSTRING_LEN, 1977 mvpp2_ethtool_rxq_regs[i].string, 1978 q); 1979 data += ETH_GSTRING_LEN; 1980 } 1981 } 1982 1983 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { 1984 strscpy(data, mvpp2_ethtool_xdp[i].string, 1985 ETH_GSTRING_LEN); 1986 data += ETH_GSTRING_LEN; 1987 } 1988 } 1989 1990 static void 1991 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) 1992 { 1993 unsigned int start; 1994 unsigned int cpu; 1995 1996 /* Gather XDP Statistics */ 1997 for_each_possible_cpu(cpu) { 1998 struct mvpp2_pcpu_stats *cpu_stats; 1999 u64 xdp_redirect; 2000 u64 xdp_pass; 2001 u64 xdp_drop; 2002 u64 xdp_xmit; 2003 u64 xdp_xmit_err; 2004 u64 xdp_tx; 2005 u64 xdp_tx_err; 2006 2007 cpu_stats = per_cpu_ptr(port->stats, cpu); 2008 do { 2009 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2010 xdp_redirect = cpu_stats->xdp_redirect; 2011 xdp_pass = cpu_stats->xdp_pass; 2012 xdp_drop = cpu_stats->xdp_drop; 2013 xdp_xmit = cpu_stats->xdp_xmit; 2014 xdp_xmit_err = cpu_stats->xdp_xmit_err; 2015 xdp_tx = cpu_stats->xdp_tx; 2016 xdp_tx_err = cpu_stats->xdp_tx_err; 2017 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2018 2019 xdp_stats->xdp_redirect += xdp_redirect; 2020 xdp_stats->xdp_pass += xdp_pass; 2021 xdp_stats->xdp_drop += xdp_drop; 2022 xdp_stats->xdp_xmit += xdp_xmit; 2023 xdp_stats->xdp_xmit_err += xdp_xmit_err; 2024 xdp_stats->xdp_tx += xdp_tx; 2025 xdp_stats->xdp_tx_err += xdp_tx_err; 2026 } 2027 } 2028 2029 static void mvpp2_read_stats(struct mvpp2_port *port) 2030 { 2031 struct mvpp2_pcpu_stats xdp_stats = {}; 2032 const struct mvpp2_ethtool_counter *s; 2033 u64 *pstats; 2034 int i, q; 2035 2036 pstats = port->ethtool_stats; 2037 2038 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) 2039 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); 2040 2041 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) 2042 *pstats++ += mvpp2_read(port->priv, 2043 mvpp2_ethtool_port_regs[i].offset + 2044 4 * port->id); 2045 2046 for (q = 0; q < port->ntxqs; q++) 2047 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) 2048 *pstats++ += mvpp2_read_index(port->priv, 2049 MVPP22_CTRS_TX_CTR(port->id, q), 2050 mvpp2_ethtool_txq_regs[i].offset); 2051 2052 /* Rxqs are numbered from 0 from the user standpoint, but not from the 2053 * driver's. We need to add the port->first_rxq offset. 2054 */ 2055 for (q = 0; q < port->nrxqs; q++) 2056 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) 2057 *pstats++ += mvpp2_read_index(port->priv, 2058 port->first_rxq + q, 2059 mvpp2_ethtool_rxq_regs[i].offset); 2060 2061 /* Gather XDP Statistics */ 2062 mvpp2_get_xdp_stats(port, &xdp_stats); 2063 2064 for (i = 0, s = mvpp2_ethtool_xdp; 2065 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); 2066 s++, i++) { 2067 switch (s->offset) { 2068 case ETHTOOL_XDP_REDIRECT: 2069 *pstats++ = xdp_stats.xdp_redirect; 2070 break; 2071 case ETHTOOL_XDP_PASS: 2072 *pstats++ = xdp_stats.xdp_pass; 2073 break; 2074 case ETHTOOL_XDP_DROP: 2075 *pstats++ = xdp_stats.xdp_drop; 2076 break; 2077 case ETHTOOL_XDP_TX: 2078 *pstats++ = xdp_stats.xdp_tx; 2079 break; 2080 case ETHTOOL_XDP_TX_ERR: 2081 *pstats++ = xdp_stats.xdp_tx_err; 2082 break; 2083 case ETHTOOL_XDP_XMIT: 2084 *pstats++ = xdp_stats.xdp_xmit; 2085 break; 2086 case ETHTOOL_XDP_XMIT_ERR: 2087 *pstats++ = xdp_stats.xdp_xmit_err; 2088 break; 2089 } 2090 } 2091 } 2092 2093 static void mvpp2_gather_hw_statistics(struct work_struct *work) 2094 { 2095 struct delayed_work *del_work = to_delayed_work(work); 2096 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, 2097 stats_work); 2098 2099 mutex_lock(&port->gather_stats_lock); 2100 2101 mvpp2_read_stats(port); 2102 2103 /* No need to read again the counters right after this function if it 2104 * was called asynchronously by the user (ie. use of ethtool). 2105 */ 2106 cancel_delayed_work(&port->stats_work); 2107 queue_delayed_work(port->priv->stats_queue, &port->stats_work, 2108 MVPP2_MIB_COUNTERS_STATS_DELAY); 2109 2110 mutex_unlock(&port->gather_stats_lock); 2111 } 2112 2113 static void mvpp2_ethtool_get_stats(struct net_device *dev, 2114 struct ethtool_stats *stats, u64 *data) 2115 { 2116 struct mvpp2_port *port = netdev_priv(dev); 2117 2118 /* Update statistics for the given port, then take the lock to avoid 2119 * concurrent accesses on the ethtool_stats structure during its copy. 2120 */ 2121 mvpp2_gather_hw_statistics(&port->stats_work.work); 2122 2123 mutex_lock(&port->gather_stats_lock); 2124 memcpy(data, port->ethtool_stats, 2125 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); 2126 mutex_unlock(&port->gather_stats_lock); 2127 } 2128 2129 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) 2130 { 2131 struct mvpp2_port *port = netdev_priv(dev); 2132 2133 if (sset == ETH_SS_STATS) 2134 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); 2135 2136 return -EOPNOTSUPP; 2137 } 2138 2139 static void mvpp2_mac_reset_assert(struct mvpp2_port *port) 2140 { 2141 u32 val; 2142 2143 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | 2144 MVPP2_GMAC_PORT_RESET_MASK; 2145 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2146 2147 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { 2148 val = readl(port->base + MVPP22_XLG_CTRL0_REG) & 2149 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; 2150 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 2151 } 2152 } 2153 2154 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) 2155 { 2156 struct mvpp2 *priv = port->priv; 2157 void __iomem *mpcs, *xpcs; 2158 u32 val; 2159 2160 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2161 return; 2162 2163 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2164 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2165 2166 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2167 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); 2168 val |= MVPP22_MPCS_CLK_RESET_DIV_SET; 2169 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2170 2171 val = readl(xpcs + MVPP22_XPCS_CFG0); 2172 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2173 } 2174 2175 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) 2176 { 2177 struct mvpp2 *priv = port->priv; 2178 void __iomem *mpcs, *xpcs; 2179 u32 val; 2180 2181 if (port->priv->hw_version == MVPP21 || port->gop_id != 0) 2182 return; 2183 2184 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); 2185 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); 2186 2187 switch (port->phy_interface) { 2188 case PHY_INTERFACE_MODE_10GBASER: 2189 val = readl(mpcs + MVPP22_MPCS_CLK_RESET); 2190 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | 2191 MAC_CLK_RESET_SD_TX; 2192 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; 2193 writel(val, mpcs + MVPP22_MPCS_CLK_RESET); 2194 break; 2195 case PHY_INTERFACE_MODE_XAUI: 2196 case PHY_INTERFACE_MODE_RXAUI: 2197 val = readl(xpcs + MVPP22_XPCS_CFG0); 2198 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); 2199 break; 2200 default: 2201 break; 2202 } 2203 } 2204 2205 /* Change maximum receive size of the port */ 2206 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2207 { 2208 u32 val; 2209 2210 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2211 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2212 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2213 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2214 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2215 } 2216 2217 /* Change maximum receive size of the port */ 2218 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) 2219 { 2220 u32 val; 2221 2222 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 2223 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; 2224 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2225 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; 2226 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 2227 } 2228 2229 /* Set defaults to the MVPP2 port */ 2230 static void mvpp2_defaults_set(struct mvpp2_port *port) 2231 { 2232 int tx_port_num, val, queue, lrxq; 2233 2234 if (port->priv->hw_version == MVPP21) { 2235 /* Update TX FIFO MIN Threshold */ 2236 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2237 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 2238 /* Min. TX threshold must be less than minimal packet length */ 2239 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 2240 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2241 } 2242 2243 /* Disable Legacy WRR, Disable EJP, Release from reset */ 2244 tx_port_num = mvpp2_egress_port(port); 2245 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 2246 tx_port_num); 2247 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 2248 2249 /* Set TXQ scheduling to Round-Robin */ 2250 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); 2251 2252 /* Close bandwidth for all queues */ 2253 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) 2254 mvpp2_write(port->priv, 2255 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); 2256 2257 /* Set refill period to 1 usec, refill tokens 2258 * and bucket size to maximum 2259 */ 2260 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 2261 port->priv->tclk / USEC_PER_SEC); 2262 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 2263 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 2264 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 2265 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 2266 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 2267 val = MVPP2_TXP_TOKEN_SIZE_MAX; 2268 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2269 2270 /* Set MaximumLowLatencyPacketSize value to 256 */ 2271 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 2272 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 2273 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 2274 2275 /* Enable Rx cache snoop */ 2276 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2277 queue = port->rxqs[lrxq]->id; 2278 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2279 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 2280 MVPP2_SNOOP_BUF_HDR_MASK; 2281 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2282 } 2283 2284 /* At default, mask all interrupts to all present cpus */ 2285 mvpp2_interrupts_disable(port); 2286 } 2287 2288 /* Enable/disable receiving packets */ 2289 static void mvpp2_ingress_enable(struct mvpp2_port *port) 2290 { 2291 u32 val; 2292 int lrxq, queue; 2293 2294 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2295 queue = port->rxqs[lrxq]->id; 2296 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2297 val &= ~MVPP2_RXQ_DISABLE_MASK; 2298 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2299 } 2300 } 2301 2302 static void mvpp2_ingress_disable(struct mvpp2_port *port) 2303 { 2304 u32 val; 2305 int lrxq, queue; 2306 2307 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { 2308 queue = port->rxqs[lrxq]->id; 2309 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2310 val |= MVPP2_RXQ_DISABLE_MASK; 2311 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2312 } 2313 } 2314 2315 /* Enable transmit via physical egress queue 2316 * - HW starts take descriptors from DRAM 2317 */ 2318 static void mvpp2_egress_enable(struct mvpp2_port *port) 2319 { 2320 u32 qmap; 2321 int queue; 2322 int tx_port_num = mvpp2_egress_port(port); 2323 2324 /* Enable all initialized TXs. */ 2325 qmap = 0; 2326 for (queue = 0; queue < port->ntxqs; queue++) { 2327 struct mvpp2_tx_queue *txq = port->txqs[queue]; 2328 2329 if (txq->descs) 2330 qmap |= (1 << queue); 2331 } 2332 2333 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2334 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 2335 } 2336 2337 /* Disable transmit via physical egress queue 2338 * - HW doesn't take descriptors from DRAM 2339 */ 2340 static void mvpp2_egress_disable(struct mvpp2_port *port) 2341 { 2342 u32 reg_data; 2343 int delay; 2344 int tx_port_num = mvpp2_egress_port(port); 2345 2346 /* Issue stop command for active channels only */ 2347 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2348 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 2349 MVPP2_TXP_SCHED_ENQ_MASK; 2350 if (reg_data != 0) 2351 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 2352 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 2353 2354 /* Wait for all Tx activity to terminate. */ 2355 delay = 0; 2356 do { 2357 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 2358 netdev_warn(port->dev, 2359 "Tx stop timed out, status=0x%08x\n", 2360 reg_data); 2361 break; 2362 } 2363 mdelay(1); 2364 delay++; 2365 2366 /* Check port TX Command register that all 2367 * Tx queues are stopped 2368 */ 2369 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 2370 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 2371 } 2372 2373 /* Rx descriptors helper methods */ 2374 2375 /* Get number of Rx descriptors occupied by received packets */ 2376 static inline int 2377 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 2378 { 2379 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 2380 2381 return val & MVPP2_RXQ_OCCUPIED_MASK; 2382 } 2383 2384 /* Update Rx queue status with the number of occupied and available 2385 * Rx descriptor slots. 2386 */ 2387 static inline void 2388 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 2389 int used_count, int free_count) 2390 { 2391 /* Decrement the number of used descriptors and increment count 2392 * increment the number of free descriptors. 2393 */ 2394 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 2395 2396 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 2397 } 2398 2399 /* Get pointer to next RX descriptor to be processed by SW */ 2400 static inline struct mvpp2_rx_desc * 2401 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 2402 { 2403 int rx_desc = rxq->next_desc_to_proc; 2404 2405 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 2406 prefetch(rxq->descs + rxq->next_desc_to_proc); 2407 return rxq->descs + rx_desc; 2408 } 2409 2410 /* Set rx queue offset */ 2411 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 2412 int prxq, int offset) 2413 { 2414 u32 val; 2415 2416 /* Convert offset from bytes to units of 32 bytes */ 2417 offset = offset >> 5; 2418 2419 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2420 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 2421 2422 /* Offset is in */ 2423 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 2424 MVPP2_RXQ_PACKET_OFFSET_MASK); 2425 2426 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2427 } 2428 2429 /* Tx descriptors helper methods */ 2430 2431 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 2432 static struct mvpp2_tx_desc * 2433 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 2434 { 2435 int tx_desc = txq->next_desc_to_proc; 2436 2437 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 2438 return txq->descs + tx_desc; 2439 } 2440 2441 /* Update HW with number of aggregated Tx descriptors to be sent 2442 * 2443 * Called only from mvpp2_tx(), so migration is disabled, using 2444 * smp_processor_id() is OK. 2445 */ 2446 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 2447 { 2448 /* aggregated access - relevant TXQ number is written in TX desc */ 2449 mvpp2_thread_write(port->priv, 2450 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2451 MVPP2_AGGR_TXQ_UPDATE_REG, pending); 2452 } 2453 2454 /* Check if there are enough free descriptors in aggregated txq. 2455 * If not, update the number of occupied descriptors and repeat the check. 2456 * 2457 * Called only from mvpp2_tx(), so migration is disabled, using 2458 * smp_processor_id() is OK. 2459 */ 2460 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, 2461 struct mvpp2_tx_queue *aggr_txq, int num) 2462 { 2463 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { 2464 /* Update number of occupied aggregated Tx descriptors */ 2465 unsigned int thread = 2466 mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2467 u32 val = mvpp2_read_relaxed(port->priv, 2468 MVPP2_AGGR_TXQ_STATUS_REG(thread)); 2469 2470 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; 2471 2472 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) 2473 return -ENOMEM; 2474 } 2475 return 0; 2476 } 2477 2478 /* Reserved Tx descriptors allocation request 2479 * 2480 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called 2481 * only by mvpp2_tx(), so migration is disabled, using 2482 * smp_processor_id() is OK. 2483 */ 2484 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, 2485 struct mvpp2_tx_queue *txq, int num) 2486 { 2487 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 2488 struct mvpp2 *priv = port->priv; 2489 u32 val; 2490 2491 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; 2492 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); 2493 2494 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); 2495 2496 return val & MVPP2_TXQ_RSVD_RSLT_MASK; 2497 } 2498 2499 /* Check if there are enough reserved descriptors for transmission. 2500 * If not, request chunk of reserved descriptors and check again. 2501 */ 2502 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, 2503 struct mvpp2_tx_queue *txq, 2504 struct mvpp2_txq_pcpu *txq_pcpu, 2505 int num) 2506 { 2507 int req, desc_count; 2508 unsigned int thread; 2509 2510 if (txq_pcpu->reserved_num >= num) 2511 return 0; 2512 2513 /* Not enough descriptors reserved! Update the reserved descriptor 2514 * count and check again. 2515 */ 2516 2517 desc_count = 0; 2518 /* Compute total of used descriptors */ 2519 for (thread = 0; thread < port->priv->nthreads; thread++) { 2520 struct mvpp2_txq_pcpu *txq_pcpu_aux; 2521 2522 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); 2523 desc_count += txq_pcpu_aux->count; 2524 desc_count += txq_pcpu_aux->reserved_num; 2525 } 2526 2527 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); 2528 desc_count += req; 2529 2530 if (desc_count > 2531 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) 2532 return -ENOMEM; 2533 2534 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); 2535 2536 /* OK, the descriptor could have been updated: check again. */ 2537 if (txq_pcpu->reserved_num < num) 2538 return -ENOMEM; 2539 return 0; 2540 } 2541 2542 /* Release the last allocated Tx descriptor. Useful to handle DMA 2543 * mapping failures in the Tx path. 2544 */ 2545 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) 2546 { 2547 if (txq->next_desc_to_proc == 0) 2548 txq->next_desc_to_proc = txq->last_desc - 1; 2549 else 2550 txq->next_desc_to_proc--; 2551 } 2552 2553 /* Set Tx descriptors fields relevant for CSUM calculation */ 2554 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, 2555 int ip_hdr_len, int l4_proto) 2556 { 2557 u32 command; 2558 2559 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, 2560 * G_L4_chk, L4_type required only for checksum calculation 2561 */ 2562 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); 2563 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); 2564 command |= MVPP2_TXD_IP_CSUM_DISABLE; 2565 2566 if (l3_proto == htons(ETH_P_IP)) { 2567 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 2568 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 2569 } else { 2570 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 2571 } 2572 2573 if (l4_proto == IPPROTO_TCP) { 2574 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 2575 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2576 } else if (l4_proto == IPPROTO_UDP) { 2577 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 2578 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 2579 } else { 2580 command |= MVPP2_TXD_L4_CSUM_NOT; 2581 } 2582 2583 return command; 2584 } 2585 2586 /* Get number of sent descriptors and decrement counter. 2587 * The number of sent descriptors is returned. 2588 * Per-thread access 2589 * 2590 * Called only from mvpp2_txq_done(), called from mvpp2_tx() 2591 * (migration disabled) and from the TX completion tasklet (migration 2592 * disabled) so using smp_processor_id() is OK. 2593 */ 2594 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 2595 struct mvpp2_tx_queue *txq) 2596 { 2597 u32 val; 2598 2599 /* Reading status reg resets transmitted descriptor counter */ 2600 val = mvpp2_thread_read_relaxed(port->priv, 2601 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2602 MVPP2_TXQ_SENT_REG(txq->id)); 2603 2604 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 2605 MVPP2_TRANSMITTED_COUNT_OFFSET; 2606 } 2607 2608 /* Called through on_each_cpu(), so runs on all CPUs, with migration 2609 * disabled, therefore using smp_processor_id() is OK. 2610 */ 2611 static void mvpp2_txq_sent_counter_clear(void *arg) 2612 { 2613 struct mvpp2_port *port = arg; 2614 int queue; 2615 2616 /* If the thread isn't used, don't do anything */ 2617 if (smp_processor_id() >= port->priv->nthreads) 2618 return; 2619 2620 for (queue = 0; queue < port->ntxqs; queue++) { 2621 int id = port->txqs[queue]->id; 2622 2623 mvpp2_thread_read(port->priv, 2624 mvpp2_cpu_to_thread(port->priv, smp_processor_id()), 2625 MVPP2_TXQ_SENT_REG(id)); 2626 } 2627 } 2628 2629 /* Set max sizes for Tx queues */ 2630 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 2631 { 2632 u32 val, size, mtu; 2633 int txq, tx_port_num; 2634 2635 mtu = port->pkt_size * 8; 2636 if (mtu > MVPP2_TXP_MTU_MAX) 2637 mtu = MVPP2_TXP_MTU_MAX; 2638 2639 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 2640 mtu = 3 * mtu; 2641 2642 /* Indirect access to registers */ 2643 tx_port_num = mvpp2_egress_port(port); 2644 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2645 2646 /* Set MTU */ 2647 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 2648 val &= ~MVPP2_TXP_MTU_MAX; 2649 val |= mtu; 2650 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 2651 2652 /* TXP token size and all TXQs token size must be larger that MTU */ 2653 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 2654 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 2655 if (size < mtu) { 2656 size = mtu; 2657 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 2658 val |= size; 2659 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2660 } 2661 2662 for (txq = 0; txq < port->ntxqs; txq++) { 2663 val = mvpp2_read(port->priv, 2664 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 2665 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 2666 2667 if (size < mtu) { 2668 size = mtu; 2669 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 2670 val |= size; 2671 mvpp2_write(port->priv, 2672 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 2673 val); 2674 } 2675 } 2676 } 2677 2678 /* Set the number of non-occupied descriptors threshold */ 2679 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, 2680 struct mvpp2_rx_queue *rxq) 2681 { 2682 u32 val; 2683 2684 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 2685 2686 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); 2687 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; 2688 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; 2689 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); 2690 } 2691 2692 /* Set the number of packets that will be received before Rx interrupt 2693 * will be generated by HW. 2694 */ 2695 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 2696 struct mvpp2_rx_queue *rxq) 2697 { 2698 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2699 2700 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 2701 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 2702 2703 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2704 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, 2705 rxq->pkts_coal); 2706 2707 put_cpu(); 2708 } 2709 2710 /* For some reason in the LSP this is done on each CPU. Why ? */ 2711 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, 2712 struct mvpp2_tx_queue *txq) 2713 { 2714 unsigned int thread; 2715 u32 val; 2716 2717 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) 2718 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; 2719 2720 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); 2721 /* PKT-coalescing registers are per-queue + per-thread */ 2722 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { 2723 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 2724 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); 2725 } 2726 } 2727 2728 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 2729 { 2730 u64 tmp = (u64)clk_hz * usec; 2731 2732 do_div(tmp, USEC_PER_SEC); 2733 2734 return tmp > U32_MAX ? U32_MAX : tmp; 2735 } 2736 2737 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) 2738 { 2739 u64 tmp = (u64)cycles * USEC_PER_SEC; 2740 2741 do_div(tmp, clk_hz); 2742 2743 return tmp > U32_MAX ? U32_MAX : tmp; 2744 } 2745 2746 /* Set the time delay in usec before Rx interrupt */ 2747 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, 2748 struct mvpp2_rx_queue *rxq) 2749 { 2750 unsigned long freq = port->priv->tclk; 2751 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2752 2753 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { 2754 rxq->time_coal = 2755 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); 2756 2757 /* re-evaluate to get actual register value */ 2758 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); 2759 } 2760 2761 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); 2762 } 2763 2764 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) 2765 { 2766 unsigned long freq = port->priv->tclk; 2767 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2768 2769 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { 2770 port->tx_time_coal = 2771 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); 2772 2773 /* re-evaluate to get actual register value */ 2774 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); 2775 } 2776 2777 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); 2778 } 2779 2780 /* Free Tx queue skbuffs */ 2781 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 2782 struct mvpp2_tx_queue *txq, 2783 struct mvpp2_txq_pcpu *txq_pcpu, int num) 2784 { 2785 struct xdp_frame_bulk bq; 2786 int i; 2787 2788 xdp_frame_bulk_init(&bq); 2789 2790 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 2791 2792 for (i = 0; i < num; i++) { 2793 struct mvpp2_txq_pcpu_buf *tx_buf = 2794 txq_pcpu->buffs + txq_pcpu->txq_get_index; 2795 2796 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && 2797 tx_buf->type != MVPP2_TYPE_XDP_TX) 2798 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 2799 tx_buf->size, DMA_TO_DEVICE); 2800 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) 2801 dev_kfree_skb_any(tx_buf->skb); 2802 else if (tx_buf->type == MVPP2_TYPE_XDP_TX || 2803 tx_buf->type == MVPP2_TYPE_XDP_NDO) 2804 xdp_return_frame_bulk(tx_buf->xdpf, &bq); 2805 2806 mvpp2_txq_inc_get(txq_pcpu); 2807 } 2808 xdp_flush_frame_bulk(&bq); 2809 2810 rcu_read_unlock(); 2811 } 2812 2813 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 2814 u32 cause) 2815 { 2816 int queue = fls(cause) - 1; 2817 2818 return port->rxqs[queue]; 2819 } 2820 2821 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 2822 u32 cause) 2823 { 2824 int queue = fls(cause) - 1; 2825 2826 return port->txqs[queue]; 2827 } 2828 2829 /* Handle end of transmission */ 2830 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 2831 struct mvpp2_txq_pcpu *txq_pcpu) 2832 { 2833 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); 2834 int tx_done; 2835 2836 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) 2837 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); 2838 2839 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 2840 if (!tx_done) 2841 return; 2842 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); 2843 2844 txq_pcpu->count -= tx_done; 2845 2846 if (netif_tx_queue_stopped(nq)) 2847 if (txq_pcpu->count <= txq_pcpu->wake_threshold) 2848 netif_tx_wake_queue(nq); 2849 } 2850 2851 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, 2852 unsigned int thread) 2853 { 2854 struct mvpp2_tx_queue *txq; 2855 struct mvpp2_txq_pcpu *txq_pcpu; 2856 unsigned int tx_todo = 0; 2857 2858 while (cause) { 2859 txq = mvpp2_get_tx_queue(port, cause); 2860 if (!txq) 2861 break; 2862 2863 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 2864 2865 if (txq_pcpu->count) { 2866 mvpp2_txq_done(port, txq, txq_pcpu); 2867 tx_todo += txq_pcpu->count; 2868 } 2869 2870 cause &= ~(1 << txq->log_id); 2871 } 2872 return tx_todo; 2873 } 2874 2875 /* Rx/Tx queue initialization/cleanup methods */ 2876 2877 /* Allocate and initialize descriptors for aggr TXQ */ 2878 static int mvpp2_aggr_txq_init(struct platform_device *pdev, 2879 struct mvpp2_tx_queue *aggr_txq, 2880 unsigned int thread, struct mvpp2 *priv) 2881 { 2882 u32 txq_dma; 2883 2884 /* Allocate memory for TX descriptors */ 2885 aggr_txq->descs = dma_alloc_coherent(&pdev->dev, 2886 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2887 &aggr_txq->descs_dma, GFP_KERNEL); 2888 if (!aggr_txq->descs) 2889 return -ENOMEM; 2890 2891 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; 2892 2893 /* Aggr TXQ no reset WA */ 2894 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 2895 MVPP2_AGGR_TXQ_INDEX_REG(thread)); 2896 2897 /* Set Tx descriptors queue starting address indirect 2898 * access 2899 */ 2900 if (priv->hw_version == MVPP21) 2901 txq_dma = aggr_txq->descs_dma; 2902 else 2903 txq_dma = aggr_txq->descs_dma >> 2904 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 2905 2906 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); 2907 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), 2908 MVPP2_AGGR_TXQ_SIZE); 2909 2910 return 0; 2911 } 2912 2913 /* Create a specified Rx queue */ 2914 static int mvpp2_rxq_init(struct mvpp2_port *port, 2915 struct mvpp2_rx_queue *rxq) 2916 { 2917 struct mvpp2 *priv = port->priv; 2918 unsigned int thread; 2919 u32 rxq_dma; 2920 int err; 2921 2922 rxq->size = port->rx_ring_size; 2923 2924 /* Allocate memory for RX descriptors */ 2925 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, 2926 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2927 &rxq->descs_dma, GFP_KERNEL); 2928 if (!rxq->descs) 2929 return -ENOMEM; 2930 2931 rxq->last_desc = rxq->size - 1; 2932 2933 /* Zero occupied and non-occupied counters - direct access */ 2934 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 2935 2936 /* Set Rx descriptors queue starting address - indirect access */ 2937 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 2938 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 2939 if (port->priv->hw_version == MVPP21) 2940 rxq_dma = rxq->descs_dma; 2941 else 2942 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 2943 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 2944 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 2945 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); 2946 put_cpu(); 2947 2948 /* Set Offset */ 2949 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); 2950 2951 /* Set coalescing pkts and time */ 2952 mvpp2_rx_pkts_coal_set(port, rxq); 2953 mvpp2_rx_time_coal_set(port, rxq); 2954 2955 /* Set the number of non occupied descriptors threshold */ 2956 mvpp2_set_rxq_free_tresh(port, rxq); 2957 2958 /* Add number of descriptors ready for receiving packets */ 2959 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 2960 2961 if (priv->percpu_pools) { 2962 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0); 2963 if (err < 0) 2964 goto err_free_dma; 2965 2966 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0); 2967 if (err < 0) 2968 goto err_unregister_rxq_short; 2969 2970 /* Every RXQ has a pool for short and another for long packets */ 2971 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, 2972 MEM_TYPE_PAGE_POOL, 2973 priv->page_pool[rxq->logic_rxq]); 2974 if (err < 0) 2975 goto err_unregister_rxq_long; 2976 2977 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, 2978 MEM_TYPE_PAGE_POOL, 2979 priv->page_pool[rxq->logic_rxq + 2980 port->nrxqs]); 2981 if (err < 0) 2982 goto err_unregister_mem_rxq_short; 2983 } 2984 2985 return 0; 2986 2987 err_unregister_mem_rxq_short: 2988 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); 2989 err_unregister_rxq_long: 2990 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 2991 err_unregister_rxq_short: 2992 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 2993 err_free_dma: 2994 dma_free_coherent(port->dev->dev.parent, 2995 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 2996 rxq->descs, rxq->descs_dma); 2997 return err; 2998 } 2999 3000 /* Push packets received by the RXQ to BM pool */ 3001 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 3002 struct mvpp2_rx_queue *rxq) 3003 { 3004 int rx_received, i; 3005 3006 rx_received = mvpp2_rxq_received(port, rxq->id); 3007 if (!rx_received) 3008 return; 3009 3010 for (i = 0; i < rx_received; i++) { 3011 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3012 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3013 int pool; 3014 3015 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3016 MVPP2_RXD_BM_POOL_ID_OFFS; 3017 3018 mvpp2_bm_pool_put(port, pool, 3019 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 3020 mvpp2_rxdesc_cookie_get(port, rx_desc)); 3021 } 3022 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 3023 } 3024 3025 /* Cleanup Rx queue */ 3026 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 3027 struct mvpp2_rx_queue *rxq) 3028 { 3029 unsigned int thread; 3030 3031 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) 3032 xdp_rxq_info_unreg(&rxq->xdp_rxq_short); 3033 3034 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) 3035 xdp_rxq_info_unreg(&rxq->xdp_rxq_long); 3036 3037 mvpp2_rxq_drop_pkts(port, rxq); 3038 3039 if (rxq->descs) 3040 dma_free_coherent(port->dev->dev.parent, 3041 rxq->size * MVPP2_DESC_ALIGNED_SIZE, 3042 rxq->descs, 3043 rxq->descs_dma); 3044 3045 rxq->descs = NULL; 3046 rxq->last_desc = 0; 3047 rxq->next_desc_to_proc = 0; 3048 rxq->descs_dma = 0; 3049 3050 /* Clear Rx descriptors queue starting address and size; 3051 * free descriptor number 3052 */ 3053 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 3054 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3055 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); 3056 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); 3057 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); 3058 put_cpu(); 3059 } 3060 3061 /* Create and initialize a Tx queue */ 3062 static int mvpp2_txq_init(struct mvpp2_port *port, 3063 struct mvpp2_tx_queue *txq) 3064 { 3065 u32 val; 3066 unsigned int thread; 3067 int desc, desc_per_txq, tx_port_num; 3068 struct mvpp2_txq_pcpu *txq_pcpu; 3069 3070 txq->size = port->tx_ring_size; 3071 3072 /* Allocate memory for Tx descriptors */ 3073 txq->descs = dma_alloc_coherent(port->dev->dev.parent, 3074 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3075 &txq->descs_dma, GFP_KERNEL); 3076 if (!txq->descs) 3077 return -ENOMEM; 3078 3079 txq->last_desc = txq->size - 1; 3080 3081 /* Set Tx descriptors queue starting address - indirect access */ 3082 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3083 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3084 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 3085 txq->descs_dma); 3086 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 3087 txq->size & MVPP2_TXQ_DESC_SIZE_MASK); 3088 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); 3089 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, 3090 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 3091 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); 3092 val &= ~MVPP2_TXQ_PENDING_MASK; 3093 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); 3094 3095 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 3096 * for each existing TXQ. 3097 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 3098 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS 3099 */ 3100 desc_per_txq = 16; 3101 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 3102 (txq->log_id * desc_per_txq); 3103 3104 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, 3105 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 3106 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 3107 put_cpu(); 3108 3109 /* WRR / EJP configuration - indirect access */ 3110 tx_port_num = mvpp2_egress_port(port); 3111 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3112 3113 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 3114 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 3115 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 3116 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 3117 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 3118 3119 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 3120 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 3121 val); 3122 3123 for (thread = 0; thread < port->priv->nthreads; thread++) { 3124 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3125 txq_pcpu->size = txq->size; 3126 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, 3127 sizeof(*txq_pcpu->buffs), 3128 GFP_KERNEL); 3129 if (!txq_pcpu->buffs) 3130 return -ENOMEM; 3131 3132 txq_pcpu->count = 0; 3133 txq_pcpu->reserved_num = 0; 3134 txq_pcpu->txq_put_index = 0; 3135 txq_pcpu->txq_get_index = 0; 3136 txq_pcpu->tso_headers = NULL; 3137 3138 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; 3139 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; 3140 3141 txq_pcpu->tso_headers = 3142 dma_alloc_coherent(port->dev->dev.parent, 3143 txq_pcpu->size * TSO_HEADER_SIZE, 3144 &txq_pcpu->tso_headers_dma, 3145 GFP_KERNEL); 3146 if (!txq_pcpu->tso_headers) 3147 return -ENOMEM; 3148 } 3149 3150 return 0; 3151 } 3152 3153 /* Free allocated TXQ resources */ 3154 static void mvpp2_txq_deinit(struct mvpp2_port *port, 3155 struct mvpp2_tx_queue *txq) 3156 { 3157 struct mvpp2_txq_pcpu *txq_pcpu; 3158 unsigned int thread; 3159 3160 for (thread = 0; thread < port->priv->nthreads; thread++) { 3161 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3162 kfree(txq_pcpu->buffs); 3163 3164 if (txq_pcpu->tso_headers) 3165 dma_free_coherent(port->dev->dev.parent, 3166 txq_pcpu->size * TSO_HEADER_SIZE, 3167 txq_pcpu->tso_headers, 3168 txq_pcpu->tso_headers_dma); 3169 3170 txq_pcpu->tso_headers = NULL; 3171 } 3172 3173 if (txq->descs) 3174 dma_free_coherent(port->dev->dev.parent, 3175 txq->size * MVPP2_DESC_ALIGNED_SIZE, 3176 txq->descs, txq->descs_dma); 3177 3178 txq->descs = NULL; 3179 txq->last_desc = 0; 3180 txq->next_desc_to_proc = 0; 3181 txq->descs_dma = 0; 3182 3183 /* Set minimum bandwidth for disabled TXQs */ 3184 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); 3185 3186 /* Set Tx descriptors queue starting address and size */ 3187 thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3188 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3189 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); 3190 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); 3191 put_cpu(); 3192 } 3193 3194 /* Cleanup Tx ports */ 3195 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 3196 { 3197 struct mvpp2_txq_pcpu *txq_pcpu; 3198 int delay, pending; 3199 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); 3200 u32 val; 3201 3202 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); 3203 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); 3204 val |= MVPP2_TXQ_DRAIN_EN_MASK; 3205 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3206 3207 /* The napi queue has been stopped so wait for all packets 3208 * to be transmitted. 3209 */ 3210 delay = 0; 3211 do { 3212 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 3213 netdev_warn(port->dev, 3214 "port %d: cleaning queue %d timed out\n", 3215 port->id, txq->log_id); 3216 break; 3217 } 3218 mdelay(1); 3219 delay++; 3220 3221 pending = mvpp2_thread_read(port->priv, thread, 3222 MVPP2_TXQ_PENDING_REG); 3223 pending &= MVPP2_TXQ_PENDING_MASK; 3224 } while (pending); 3225 3226 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 3227 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); 3228 put_cpu(); 3229 3230 for (thread = 0; thread < port->priv->nthreads; thread++) { 3231 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3232 3233 /* Release all packets */ 3234 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 3235 3236 /* Reset queue */ 3237 txq_pcpu->count = 0; 3238 txq_pcpu->txq_put_index = 0; 3239 txq_pcpu->txq_get_index = 0; 3240 } 3241 } 3242 3243 /* Cleanup all Tx queues */ 3244 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 3245 { 3246 struct mvpp2_tx_queue *txq; 3247 int queue; 3248 u32 val; 3249 3250 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 3251 3252 /* Reset Tx ports and delete Tx queues */ 3253 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 3254 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3255 3256 for (queue = 0; queue < port->ntxqs; queue++) { 3257 txq = port->txqs[queue]; 3258 mvpp2_txq_clean(port, txq); 3259 mvpp2_txq_deinit(port, txq); 3260 } 3261 3262 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3263 3264 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 3265 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3266 } 3267 3268 /* Cleanup all Rx queues */ 3269 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 3270 { 3271 int queue; 3272 3273 for (queue = 0; queue < port->nrxqs; queue++) 3274 mvpp2_rxq_deinit(port, port->rxqs[queue]); 3275 3276 if (port->tx_fc) 3277 mvpp2_rxq_disable_fc(port); 3278 } 3279 3280 /* Init all Rx queues for port */ 3281 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 3282 { 3283 int queue, err; 3284 3285 for (queue = 0; queue < port->nrxqs; queue++) { 3286 err = mvpp2_rxq_init(port, port->rxqs[queue]); 3287 if (err) 3288 goto err_cleanup; 3289 } 3290 3291 if (port->tx_fc) 3292 mvpp2_rxq_enable_fc(port); 3293 3294 return 0; 3295 3296 err_cleanup: 3297 mvpp2_cleanup_rxqs(port); 3298 return err; 3299 } 3300 3301 /* Init all tx queues for port */ 3302 static int mvpp2_setup_txqs(struct mvpp2_port *port) 3303 { 3304 struct mvpp2_tx_queue *txq; 3305 int queue, err; 3306 3307 for (queue = 0; queue < port->ntxqs; queue++) { 3308 txq = port->txqs[queue]; 3309 err = mvpp2_txq_init(port, txq); 3310 if (err) 3311 goto err_cleanup; 3312 3313 /* Assign this queue to a CPU */ 3314 if (queue < num_possible_cpus()) 3315 netif_set_xps_queue(port->dev, cpumask_of(queue), queue); 3316 } 3317 3318 if (port->has_tx_irqs) { 3319 mvpp2_tx_time_coal_set(port); 3320 for (queue = 0; queue < port->ntxqs; queue++) { 3321 txq = port->txqs[queue]; 3322 mvpp2_tx_pkts_coal_set(port, txq); 3323 } 3324 } 3325 3326 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 3327 return 0; 3328 3329 err_cleanup: 3330 mvpp2_cleanup_txqs(port); 3331 return err; 3332 } 3333 3334 /* The callback for per-port interrupt */ 3335 static irqreturn_t mvpp2_isr(int irq, void *dev_id) 3336 { 3337 struct mvpp2_queue_vector *qv = dev_id; 3338 3339 mvpp2_qvec_interrupt_disable(qv); 3340 3341 napi_schedule(&qv->napi); 3342 3343 return IRQ_HANDLED; 3344 } 3345 3346 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) 3347 { 3348 struct skb_shared_hwtstamps shhwtstamps; 3349 struct mvpp2_hwtstamp_queue *queue; 3350 struct sk_buff *skb; 3351 void __iomem *ptp_q; 3352 unsigned int id; 3353 u32 r0, r1, r2; 3354 3355 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3356 if (nq) 3357 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; 3358 3359 queue = &port->tx_hwtstamp_queue[nq]; 3360 3361 while (1) { 3362 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; 3363 if (!r0) 3364 break; 3365 3366 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; 3367 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; 3368 3369 id = (r0 >> 1) & 31; 3370 3371 skb = queue->skb[id]; 3372 queue->skb[id] = NULL; 3373 if (skb) { 3374 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; 3375 3376 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); 3377 skb_tstamp_tx(skb, &shhwtstamps); 3378 dev_kfree_skb_any(skb); 3379 } 3380 } 3381 } 3382 3383 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) 3384 { 3385 void __iomem *ptp; 3386 u32 val; 3387 3388 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 3389 val = readl(ptp + MVPP22_PTP_INT_CAUSE); 3390 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) 3391 mvpp2_isr_handle_ptp_queue(port, 0); 3392 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) 3393 mvpp2_isr_handle_ptp_queue(port, 1); 3394 } 3395 3396 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) 3397 { 3398 struct net_device *dev = port->dev; 3399 3400 if (port->phylink) { 3401 phylink_mac_change(port->phylink, link); 3402 return; 3403 } 3404 3405 if (!netif_running(dev)) 3406 return; 3407 3408 if (link) { 3409 mvpp2_interrupts_enable(port); 3410 3411 mvpp2_egress_enable(port); 3412 mvpp2_ingress_enable(port); 3413 netif_carrier_on(dev); 3414 netif_tx_wake_all_queues(dev); 3415 } else { 3416 netif_tx_stop_all_queues(dev); 3417 netif_carrier_off(dev); 3418 mvpp2_ingress_disable(port); 3419 mvpp2_egress_disable(port); 3420 3421 mvpp2_interrupts_disable(port); 3422 } 3423 } 3424 3425 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) 3426 { 3427 bool link; 3428 u32 val; 3429 3430 val = readl(port->base + MVPP22_XLG_INT_STAT); 3431 if (val & MVPP22_XLG_INT_STAT_LINK) { 3432 val = readl(port->base + MVPP22_XLG_STATUS); 3433 link = (val & MVPP22_XLG_STATUS_LINK_UP); 3434 mvpp2_isr_handle_link(port, link); 3435 } 3436 } 3437 3438 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) 3439 { 3440 bool link; 3441 u32 val; 3442 3443 if (phy_interface_mode_is_rgmii(port->phy_interface) || 3444 phy_interface_mode_is_8023z(port->phy_interface) || 3445 port->phy_interface == PHY_INTERFACE_MODE_SGMII) { 3446 val = readl(port->base + MVPP22_GMAC_INT_STAT); 3447 if (val & MVPP22_GMAC_INT_STAT_LINK) { 3448 val = readl(port->base + MVPP2_GMAC_STATUS0); 3449 link = (val & MVPP2_GMAC_STATUS0_LINK_UP); 3450 mvpp2_isr_handle_link(port, link); 3451 } 3452 } 3453 } 3454 3455 /* Per-port interrupt for link status changes */ 3456 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) 3457 { 3458 struct mvpp2_port *port = (struct mvpp2_port *)dev_id; 3459 u32 val; 3460 3461 mvpp22_gop_mask_irq(port); 3462 3463 if (mvpp2_port_supports_xlg(port) && 3464 mvpp2_is_xlg(port->phy_interface)) { 3465 /* Check the external status register */ 3466 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); 3467 if (val & MVPP22_XLG_EXT_INT_STAT_XLG) 3468 mvpp2_isr_handle_xlg(port); 3469 if (val & MVPP22_XLG_EXT_INT_STAT_PTP) 3470 mvpp2_isr_handle_ptp(port); 3471 } else { 3472 /* If it's not the XLG, we must be using the GMAC. 3473 * Check the summary status. 3474 */ 3475 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); 3476 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) 3477 mvpp2_isr_handle_gmac_internal(port); 3478 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) 3479 mvpp2_isr_handle_ptp(port); 3480 } 3481 3482 mvpp22_gop_unmask_irq(port); 3483 return IRQ_HANDLED; 3484 } 3485 3486 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) 3487 { 3488 struct net_device *dev; 3489 struct mvpp2_port *port; 3490 struct mvpp2_port_pcpu *port_pcpu; 3491 unsigned int tx_todo, cause; 3492 3493 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); 3494 dev = port_pcpu->dev; 3495 3496 if (!netif_running(dev)) 3497 return HRTIMER_NORESTART; 3498 3499 port_pcpu->timer_scheduled = false; 3500 port = netdev_priv(dev); 3501 3502 /* Process all the Tx queues */ 3503 cause = (1 << port->ntxqs) - 1; 3504 tx_todo = mvpp2_tx_done(port, cause, 3505 mvpp2_cpu_to_thread(port->priv, smp_processor_id())); 3506 3507 /* Set the timer in case not all the packets were processed */ 3508 if (tx_todo && !port_pcpu->timer_scheduled) { 3509 port_pcpu->timer_scheduled = true; 3510 hrtimer_forward_now(&port_pcpu->tx_done_timer, 3511 MVPP2_TXDONE_HRTIMER_PERIOD_NS); 3512 3513 return HRTIMER_RESTART; 3514 } 3515 return HRTIMER_NORESTART; 3516 } 3517 3518 /* Main RX/TX processing routines */ 3519 3520 /* Display more error info */ 3521 static void mvpp2_rx_error(struct mvpp2_port *port, 3522 struct mvpp2_rx_desc *rx_desc) 3523 { 3524 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3525 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 3526 char *err_str = NULL; 3527 3528 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 3529 case MVPP2_RXD_ERR_CRC: 3530 err_str = "crc"; 3531 break; 3532 case MVPP2_RXD_ERR_OVERRUN: 3533 err_str = "overrun"; 3534 break; 3535 case MVPP2_RXD_ERR_RESOURCE: 3536 err_str = "resource"; 3537 break; 3538 } 3539 if (err_str && net_ratelimit()) 3540 netdev_err(port->dev, 3541 "bad rx status %08x (%s error), size=%zu\n", 3542 status, err_str, sz); 3543 } 3544 3545 /* Handle RX checksum offload */ 3546 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, 3547 struct sk_buff *skb) 3548 { 3549 if (((status & MVPP2_RXD_L3_IP4) && 3550 !(status & MVPP2_RXD_IP4_HEADER_ERR)) || 3551 (status & MVPP2_RXD_L3_IP6)) 3552 if (((status & MVPP2_RXD_L4_UDP) || 3553 (status & MVPP2_RXD_L4_TCP)) && 3554 (status & MVPP2_RXD_L4_CSUM_OK)) { 3555 skb->csum = 0; 3556 skb->ip_summed = CHECKSUM_UNNECESSARY; 3557 return; 3558 } 3559 3560 skb->ip_summed = CHECKSUM_NONE; 3561 } 3562 3563 /* Allocate a new skb and add it to BM pool */ 3564 static int mvpp2_rx_refill(struct mvpp2_port *port, 3565 struct mvpp2_bm_pool *bm_pool, 3566 struct page_pool *page_pool, int pool) 3567 { 3568 dma_addr_t dma_addr; 3569 phys_addr_t phys_addr; 3570 void *buf; 3571 3572 buf = mvpp2_buf_alloc(port, bm_pool, page_pool, 3573 &dma_addr, &phys_addr, GFP_ATOMIC); 3574 if (!buf) 3575 return -ENOMEM; 3576 3577 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3578 3579 return 0; 3580 } 3581 3582 /* Handle tx checksum */ 3583 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) 3584 { 3585 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3586 int ip_hdr_len = 0; 3587 u8 l4_proto; 3588 __be16 l3_proto = vlan_get_protocol(skb); 3589 3590 if (l3_proto == htons(ETH_P_IP)) { 3591 struct iphdr *ip4h = ip_hdr(skb); 3592 3593 /* Calculate IPv4 checksum and L4 checksum */ 3594 ip_hdr_len = ip4h->ihl; 3595 l4_proto = ip4h->protocol; 3596 } else if (l3_proto == htons(ETH_P_IPV6)) { 3597 struct ipv6hdr *ip6h = ipv6_hdr(skb); 3598 3599 /* Read l4_protocol from one of IPv6 extra headers */ 3600 if (skb_network_header_len(skb) > 0) 3601 ip_hdr_len = (skb_network_header_len(skb) >> 2); 3602 l4_proto = ip6h->nexthdr; 3603 } else { 3604 return MVPP2_TXD_L4_CSUM_NOT; 3605 } 3606 3607 return mvpp2_txq_desc_csum(skb_network_offset(skb), 3608 l3_proto, ip_hdr_len, l4_proto); 3609 } 3610 3611 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 3612 } 3613 3614 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) 3615 { 3616 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3617 struct mvpp2_tx_queue *aggr_txq; 3618 struct mvpp2_txq_pcpu *txq_pcpu; 3619 struct mvpp2_tx_queue *txq; 3620 struct netdev_queue *nq; 3621 3622 txq = port->txqs[txq_id]; 3623 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3624 nq = netdev_get_tx_queue(port->dev, txq_id); 3625 aggr_txq = &port->priv->aggr_txqs[thread]; 3626 3627 txq_pcpu->reserved_num -= nxmit; 3628 txq_pcpu->count += nxmit; 3629 aggr_txq->count += nxmit; 3630 3631 /* Enable transmit */ 3632 wmb(); 3633 mvpp2_aggr_txq_pend_desc_add(port, nxmit); 3634 3635 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 3636 netif_tx_stop_queue(nq); 3637 3638 /* Finalize TX processing */ 3639 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 3640 mvpp2_txq_done(port, txq, txq_pcpu); 3641 } 3642 3643 static int 3644 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, 3645 struct xdp_frame *xdpf, bool dma_map) 3646 { 3647 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 3648 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | 3649 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 3650 enum mvpp2_tx_buf_type buf_type; 3651 struct mvpp2_txq_pcpu *txq_pcpu; 3652 struct mvpp2_tx_queue *aggr_txq; 3653 struct mvpp2_tx_desc *tx_desc; 3654 struct mvpp2_tx_queue *txq; 3655 int ret = MVPP2_XDP_TX; 3656 dma_addr_t dma_addr; 3657 3658 txq = port->txqs[txq_id]; 3659 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 3660 aggr_txq = &port->priv->aggr_txqs[thread]; 3661 3662 /* Check number of available descriptors */ 3663 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || 3664 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { 3665 ret = MVPP2_XDP_DROPPED; 3666 goto out; 3667 } 3668 3669 /* Get a descriptor for the first part of the packet */ 3670 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 3671 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 3672 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); 3673 3674 if (dma_map) { 3675 /* XDP_REDIRECT or AF_XDP */ 3676 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, 3677 xdpf->len, DMA_TO_DEVICE); 3678 3679 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { 3680 mvpp2_txq_desc_put(txq); 3681 ret = MVPP2_XDP_DROPPED; 3682 goto out; 3683 } 3684 3685 buf_type = MVPP2_TYPE_XDP_NDO; 3686 } else { 3687 /* XDP_TX */ 3688 struct page *page = virt_to_page(xdpf->data); 3689 3690 dma_addr = page_pool_get_dma_addr(page) + 3691 sizeof(*xdpf) + xdpf->headroom; 3692 dma_sync_single_for_device(port->dev->dev.parent, dma_addr, 3693 xdpf->len, DMA_BIDIRECTIONAL); 3694 3695 buf_type = MVPP2_TYPE_XDP_TX; 3696 } 3697 3698 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); 3699 3700 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 3701 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); 3702 3703 out: 3704 return ret; 3705 } 3706 3707 static int 3708 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) 3709 { 3710 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 3711 struct xdp_frame *xdpf; 3712 u16 txq_id; 3713 int ret; 3714 3715 xdpf = xdp_convert_buff_to_frame(xdp); 3716 if (unlikely(!xdpf)) 3717 return MVPP2_XDP_DROPPED; 3718 3719 /* The first of the TX queues are used for XPS, 3720 * the second half for XDP_TX 3721 */ 3722 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3723 3724 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); 3725 if (ret == MVPP2_XDP_TX) { 3726 u64_stats_update_begin(&stats->syncp); 3727 stats->tx_bytes += xdpf->len; 3728 stats->tx_packets++; 3729 stats->xdp_tx++; 3730 u64_stats_update_end(&stats->syncp); 3731 3732 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); 3733 } else { 3734 u64_stats_update_begin(&stats->syncp); 3735 stats->xdp_tx_err++; 3736 u64_stats_update_end(&stats->syncp); 3737 } 3738 3739 return ret; 3740 } 3741 3742 static int 3743 mvpp2_xdp_xmit(struct net_device *dev, int num_frame, 3744 struct xdp_frame **frames, u32 flags) 3745 { 3746 struct mvpp2_port *port = netdev_priv(dev); 3747 int i, nxmit_byte = 0, nxmit = num_frame; 3748 struct mvpp2_pcpu_stats *stats; 3749 u16 txq_id; 3750 u32 ret; 3751 3752 if (unlikely(test_bit(0, &port->state))) 3753 return -ENETDOWN; 3754 3755 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 3756 return -EINVAL; 3757 3758 /* The first of the TX queues are used for XPS, 3759 * the second half for XDP_TX 3760 */ 3761 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); 3762 3763 for (i = 0; i < num_frame; i++) { 3764 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); 3765 if (ret == MVPP2_XDP_TX) { 3766 nxmit_byte += frames[i]->len; 3767 } else { 3768 xdp_return_frame_rx_napi(frames[i]); 3769 nxmit--; 3770 } 3771 } 3772 3773 if (likely(nxmit > 0)) 3774 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); 3775 3776 stats = this_cpu_ptr(port->stats); 3777 u64_stats_update_begin(&stats->syncp); 3778 stats->tx_bytes += nxmit_byte; 3779 stats->tx_packets += nxmit; 3780 stats->xdp_xmit += nxmit; 3781 stats->xdp_xmit_err += num_frame - nxmit; 3782 u64_stats_update_end(&stats->syncp); 3783 3784 return nxmit; 3785 } 3786 3787 static int 3788 mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, 3789 struct bpf_prog *prog, struct xdp_buff *xdp, 3790 struct page_pool *pp, struct mvpp2_pcpu_stats *stats) 3791 { 3792 unsigned int len, sync, err; 3793 struct page *page; 3794 u32 ret, act; 3795 3796 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3797 act = bpf_prog_run_xdp(prog, xdp); 3798 3799 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 3800 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; 3801 sync = max(sync, len); 3802 3803 switch (act) { 3804 case XDP_PASS: 3805 stats->xdp_pass++; 3806 ret = MVPP2_XDP_PASS; 3807 break; 3808 case XDP_REDIRECT: 3809 err = xdp_do_redirect(port->dev, xdp, prog); 3810 if (unlikely(err)) { 3811 ret = MVPP2_XDP_DROPPED; 3812 page = virt_to_head_page(xdp->data); 3813 page_pool_put_page(pp, page, sync, true); 3814 } else { 3815 ret = MVPP2_XDP_REDIR; 3816 stats->xdp_redirect++; 3817 } 3818 break; 3819 case XDP_TX: 3820 ret = mvpp2_xdp_xmit_back(port, xdp); 3821 if (ret != MVPP2_XDP_TX) { 3822 page = virt_to_head_page(xdp->data); 3823 page_pool_put_page(pp, page, sync, true); 3824 } 3825 break; 3826 default: 3827 bpf_warn_invalid_xdp_action(act); 3828 fallthrough; 3829 case XDP_ABORTED: 3830 trace_xdp_exception(port->dev, prog, act); 3831 fallthrough; 3832 case XDP_DROP: 3833 page = virt_to_head_page(xdp->data); 3834 page_pool_put_page(pp, page, sync, true); 3835 ret = MVPP2_XDP_DROPPED; 3836 stats->xdp_drop++; 3837 break; 3838 } 3839 3840 return ret; 3841 } 3842 3843 /* Main rx processing */ 3844 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, 3845 int rx_todo, struct mvpp2_rx_queue *rxq) 3846 { 3847 struct net_device *dev = port->dev; 3848 struct mvpp2_pcpu_stats ps = {}; 3849 enum dma_data_direction dma_dir; 3850 struct bpf_prog *xdp_prog; 3851 struct xdp_buff xdp; 3852 int rx_received; 3853 int rx_done = 0; 3854 u32 xdp_ret = 0; 3855 3856 rcu_read_lock(); 3857 3858 xdp_prog = READ_ONCE(port->xdp_prog); 3859 3860 /* Get number of received packets and clamp the to-do */ 3861 rx_received = mvpp2_rxq_received(port, rxq->id); 3862 if (rx_todo > rx_received) 3863 rx_todo = rx_received; 3864 3865 while (rx_done < rx_todo) { 3866 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3867 struct mvpp2_bm_pool *bm_pool; 3868 struct page_pool *pp = NULL; 3869 struct sk_buff *skb; 3870 unsigned int frag_size; 3871 dma_addr_t dma_addr; 3872 phys_addr_t phys_addr; 3873 u32 rx_status, timestamp; 3874 int pool, rx_bytes, err, ret; 3875 void *data; 3876 3877 rx_done++; 3878 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 3879 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 3880 rx_bytes -= MVPP2_MH_SIZE; 3881 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3882 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 3883 data = (void *)phys_to_virt(phys_addr); 3884 3885 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 3886 MVPP2_RXD_BM_POOL_ID_OFFS; 3887 bm_pool = &port->priv->bm_pools[pool]; 3888 3889 /* In case of an error, release the requested buffer pointer 3890 * to the Buffer Manager. This request process is controlled 3891 * by the hardware, and the information about the buffer is 3892 * comprised by the RX descriptor. 3893 */ 3894 if (rx_status & MVPP2_RXD_ERR_SUMMARY) 3895 goto err_drop_frame; 3896 3897 if (port->priv->percpu_pools) { 3898 pp = port->priv->page_pool[pool]; 3899 dma_dir = page_pool_get_dma_dir(pp); 3900 } else { 3901 dma_dir = DMA_FROM_DEVICE; 3902 } 3903 3904 dma_sync_single_for_cpu(dev->dev.parent, dma_addr, 3905 rx_bytes + MVPP2_MH_SIZE, 3906 dma_dir); 3907 3908 /* Prefetch header */ 3909 prefetch(data); 3910 3911 if (bm_pool->frag_size > PAGE_SIZE) 3912 frag_size = 0; 3913 else 3914 frag_size = bm_pool->frag_size; 3915 3916 if (xdp_prog) { 3917 struct xdp_rxq_info *xdp_rxq; 3918 3919 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) 3920 xdp_rxq = &rxq->xdp_rxq_short; 3921 else 3922 xdp_rxq = &rxq->xdp_rxq_long; 3923 3924 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); 3925 xdp_prepare_buff(&xdp, data, 3926 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, 3927 rx_bytes, false); 3928 3929 ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); 3930 3931 if (ret) { 3932 xdp_ret |= ret; 3933 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 3934 if (err) { 3935 netdev_err(port->dev, "failed to refill BM pools\n"); 3936 goto err_drop_frame; 3937 } 3938 3939 ps.rx_packets++; 3940 ps.rx_bytes += rx_bytes; 3941 continue; 3942 } 3943 } 3944 3945 skb = build_skb(data, frag_size); 3946 if (!skb) { 3947 netdev_warn(port->dev, "skb build failed\n"); 3948 goto err_drop_frame; 3949 } 3950 3951 /* If we have RX hardware timestamping enabled, grab the 3952 * timestamp from the queue and convert. 3953 */ 3954 if (mvpp22_rx_hwtstamping(port)) { 3955 timestamp = le32_to_cpu(rx_desc->pp22.timestamp); 3956 mvpp22_tai_tstamp(port->priv->tai, timestamp, 3957 skb_hwtstamps(skb)); 3958 } 3959 3960 err = mvpp2_rx_refill(port, bm_pool, pp, pool); 3961 if (err) { 3962 netdev_err(port->dev, "failed to refill BM pools\n"); 3963 dev_kfree_skb_any(skb); 3964 goto err_drop_frame; 3965 } 3966 3967 if (pp) 3968 page_pool_release_page(pp, virt_to_page(data)); 3969 else 3970 dma_unmap_single_attrs(dev->dev.parent, dma_addr, 3971 bm_pool->buf_size, DMA_FROM_DEVICE, 3972 DMA_ATTR_SKIP_CPU_SYNC); 3973 3974 ps.rx_packets++; 3975 ps.rx_bytes += rx_bytes; 3976 3977 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); 3978 skb_put(skb, rx_bytes); 3979 skb->protocol = eth_type_trans(skb, dev); 3980 mvpp2_rx_csum(port, rx_status, skb); 3981 3982 napi_gro_receive(napi, skb); 3983 continue; 3984 3985 err_drop_frame: 3986 dev->stats.rx_errors++; 3987 mvpp2_rx_error(port, rx_desc); 3988 /* Return the buffer to the pool */ 3989 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3990 } 3991 3992 rcu_read_unlock(); 3993 3994 if (xdp_ret & MVPP2_XDP_REDIR) 3995 xdp_do_flush_map(); 3996 3997 if (ps.rx_packets) { 3998 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); 3999 4000 u64_stats_update_begin(&stats->syncp); 4001 stats->rx_packets += ps.rx_packets; 4002 stats->rx_bytes += ps.rx_bytes; 4003 /* xdp */ 4004 stats->xdp_redirect += ps.xdp_redirect; 4005 stats->xdp_pass += ps.xdp_pass; 4006 stats->xdp_drop += ps.xdp_drop; 4007 u64_stats_update_end(&stats->syncp); 4008 } 4009 4010 /* Update Rx queue management counters */ 4011 wmb(); 4012 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); 4013 4014 return rx_todo; 4015 } 4016 4017 static inline void 4018 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 4019 struct mvpp2_tx_desc *desc) 4020 { 4021 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4022 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4023 4024 dma_addr_t buf_dma_addr = 4025 mvpp2_txdesc_dma_addr_get(port, desc); 4026 size_t buf_sz = 4027 mvpp2_txdesc_size_get(port, desc); 4028 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) 4029 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 4030 buf_sz, DMA_TO_DEVICE); 4031 mvpp2_txq_desc_put(txq); 4032 } 4033 4034 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, 4035 struct mvpp2_tx_desc *desc) 4036 { 4037 /* We only need to clear the low bits */ 4038 if (port->priv->hw_version >= MVPP22) 4039 desc->pp22.ptp_descriptor &= 4040 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4041 } 4042 4043 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, 4044 struct mvpp2_tx_desc *tx_desc, 4045 struct sk_buff *skb) 4046 { 4047 struct mvpp2_hwtstamp_queue *queue; 4048 unsigned int mtype, type, i; 4049 struct ptp_header *hdr; 4050 u64 ptpdesc; 4051 4052 if (port->priv->hw_version == MVPP21 || 4053 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) 4054 return false; 4055 4056 type = ptp_classify_raw(skb); 4057 if (!type) 4058 return false; 4059 4060 hdr = ptp_parse_header(skb, type); 4061 if (!hdr) 4062 return false; 4063 4064 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4065 4066 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | 4067 MVPP22_PTP_ACTION_CAPTURE; 4068 queue = &port->tx_hwtstamp_queue[0]; 4069 4070 switch (type & PTP_CLASS_VMASK) { 4071 case PTP_CLASS_V1: 4072 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); 4073 break; 4074 4075 case PTP_CLASS_V2: 4076 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); 4077 mtype = hdr->tsmt & 15; 4078 /* Direct PTP Sync messages to queue 1 */ 4079 if (mtype == 0) { 4080 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; 4081 queue = &port->tx_hwtstamp_queue[1]; 4082 } 4083 break; 4084 } 4085 4086 /* Take a reference on the skb and insert into our queue */ 4087 i = queue->next; 4088 queue->next = (i + 1) & 31; 4089 if (queue->skb[i]) 4090 dev_kfree_skb_any(queue->skb[i]); 4091 queue->skb[i] = skb_get(skb); 4092 4093 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); 4094 4095 /* 4096 * 3:0 - PTPAction 4097 * 6:4 - PTPPacketFormat 4098 * 7 - PTP_CF_WraparoundCheckEn 4099 * 9:8 - IngressTimestampSeconds[1:0] 4100 * 10 - Reserved 4101 * 11 - MACTimestampingEn 4102 * 17:12 - PTP_TimestampQueueEntryID[5:0] 4103 * 18 - PTPTimestampQueueSelect 4104 * 19 - UDPChecksumUpdateEn 4105 * 27:20 - TimestampOffset 4106 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header 4107 * NTPTs, Y.1731 - L3 to timestamp entry 4108 * 35:28 - UDP Checksum Offset 4109 * 4110 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) 4111 */ 4112 tx_desc->pp22.ptp_descriptor &= 4113 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); 4114 tx_desc->pp22.ptp_descriptor |= 4115 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); 4116 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); 4117 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); 4118 4119 return true; 4120 } 4121 4122 /* Handle tx fragmentation processing */ 4123 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, 4124 struct mvpp2_tx_queue *aggr_txq, 4125 struct mvpp2_tx_queue *txq) 4126 { 4127 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4128 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4129 struct mvpp2_tx_desc *tx_desc; 4130 int i; 4131 dma_addr_t buf_dma_addr; 4132 4133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4134 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4135 void *addr = skb_frag_address(frag); 4136 4137 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4138 mvpp2_txdesc_clear_ptp(port, tx_desc); 4139 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4140 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); 4141 4142 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, 4143 skb_frag_size(frag), 4144 DMA_TO_DEVICE); 4145 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { 4146 mvpp2_txq_desc_put(txq); 4147 goto cleanup; 4148 } 4149 4150 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4151 4152 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 4153 /* Last descriptor */ 4154 mvpp2_txdesc_cmd_set(port, tx_desc, 4155 MVPP2_TXD_L_DESC); 4156 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4157 } else { 4158 /* Descriptor in the middle: Not First, Not Last */ 4159 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4160 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4161 } 4162 } 4163 4164 return 0; 4165 cleanup: 4166 /* Release all descriptors that were used to map fragments of 4167 * this packet, as well as the corresponding DMA mappings 4168 */ 4169 for (i = i - 1; i >= 0; i--) { 4170 tx_desc = txq->descs + i; 4171 tx_desc_unmap_put(port, txq, tx_desc); 4172 } 4173 4174 return -ENOMEM; 4175 } 4176 4177 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, 4178 struct net_device *dev, 4179 struct mvpp2_tx_queue *txq, 4180 struct mvpp2_tx_queue *aggr_txq, 4181 struct mvpp2_txq_pcpu *txq_pcpu, 4182 int hdr_sz) 4183 { 4184 struct mvpp2_port *port = netdev_priv(dev); 4185 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4186 dma_addr_t addr; 4187 4188 mvpp2_txdesc_clear_ptp(port, tx_desc); 4189 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4190 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); 4191 4192 addr = txq_pcpu->tso_headers_dma + 4193 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4194 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); 4195 4196 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | 4197 MVPP2_TXD_F_DESC | 4198 MVPP2_TXD_PADDING_DISABLE); 4199 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4200 } 4201 4202 static inline int mvpp2_tso_put_data(struct sk_buff *skb, 4203 struct net_device *dev, struct tso_t *tso, 4204 struct mvpp2_tx_queue *txq, 4205 struct mvpp2_tx_queue *aggr_txq, 4206 struct mvpp2_txq_pcpu *txq_pcpu, 4207 int sz, bool left, bool last) 4208 { 4209 struct mvpp2_port *port = netdev_priv(dev); 4210 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4211 dma_addr_t buf_dma_addr; 4212 4213 mvpp2_txdesc_clear_ptp(port, tx_desc); 4214 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4215 mvpp2_txdesc_size_set(port, tx_desc, sz); 4216 4217 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, 4218 DMA_TO_DEVICE); 4219 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4220 mvpp2_txq_desc_put(txq); 4221 return -ENOMEM; 4222 } 4223 4224 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4225 4226 if (!left) { 4227 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); 4228 if (last) { 4229 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4230 return 0; 4231 } 4232 } else { 4233 mvpp2_txdesc_cmd_set(port, tx_desc, 0); 4234 } 4235 4236 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4237 return 0; 4238 } 4239 4240 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, 4241 struct mvpp2_tx_queue *txq, 4242 struct mvpp2_tx_queue *aggr_txq, 4243 struct mvpp2_txq_pcpu *txq_pcpu) 4244 { 4245 struct mvpp2_port *port = netdev_priv(dev); 4246 int hdr_sz, i, len, descs = 0; 4247 struct tso_t tso; 4248 4249 /* Check number of available descriptors */ 4250 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || 4251 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 4252 tso_count_descs(skb))) 4253 return 0; 4254 4255 hdr_sz = tso_start(skb, &tso); 4256 4257 len = skb->len - hdr_sz; 4258 while (len > 0) { 4259 int left = min_t(int, skb_shinfo(skb)->gso_size, len); 4260 char *hdr = txq_pcpu->tso_headers + 4261 txq_pcpu->txq_put_index * TSO_HEADER_SIZE; 4262 4263 len -= left; 4264 descs++; 4265 4266 tso_build_hdr(skb, hdr, &tso, left, len == 0); 4267 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); 4268 4269 while (left > 0) { 4270 int sz = min_t(int, tso.size, left); 4271 left -= sz; 4272 descs++; 4273 4274 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, 4275 txq_pcpu, sz, left, len == 0)) 4276 goto release; 4277 tso_build_data(skb, &tso, sz); 4278 } 4279 } 4280 4281 return descs; 4282 4283 release: 4284 for (i = descs - 1; i >= 0; i--) { 4285 struct mvpp2_tx_desc *tx_desc = txq->descs + i; 4286 tx_desc_unmap_put(port, txq, tx_desc); 4287 } 4288 return 0; 4289 } 4290 4291 /* Main tx processing */ 4292 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) 4293 { 4294 struct mvpp2_port *port = netdev_priv(dev); 4295 struct mvpp2_tx_queue *txq, *aggr_txq; 4296 struct mvpp2_txq_pcpu *txq_pcpu; 4297 struct mvpp2_tx_desc *tx_desc; 4298 dma_addr_t buf_dma_addr; 4299 unsigned long flags = 0; 4300 unsigned int thread; 4301 int frags = 0; 4302 u16 txq_id; 4303 u32 tx_cmd; 4304 4305 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4306 4307 txq_id = skb_get_queue_mapping(skb); 4308 txq = port->txqs[txq_id]; 4309 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 4310 aggr_txq = &port->priv->aggr_txqs[thread]; 4311 4312 if (test_bit(thread, &port->priv->lock_map)) 4313 spin_lock_irqsave(&port->tx_lock[thread], flags); 4314 4315 if (skb_is_gso(skb)) { 4316 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); 4317 goto out; 4318 } 4319 frags = skb_shinfo(skb)->nr_frags + 1; 4320 4321 /* Check number of available descriptors */ 4322 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || 4323 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { 4324 frags = 0; 4325 goto out; 4326 } 4327 4328 /* Get a descriptor for the first part of the packet */ 4329 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4330 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || 4331 !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) 4332 mvpp2_txdesc_clear_ptp(port, tx_desc); 4333 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4334 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); 4335 4336 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, 4337 skb_headlen(skb), DMA_TO_DEVICE); 4338 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { 4339 mvpp2_txq_desc_put(txq); 4340 frags = 0; 4341 goto out; 4342 } 4343 4344 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); 4345 4346 tx_cmd = mvpp2_skb_tx_csum(port, skb); 4347 4348 if (frags == 1) { 4349 /* First and Last descriptor */ 4350 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 4351 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4352 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); 4353 } else { 4354 /* First but not Last */ 4355 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 4356 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); 4357 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); 4358 4359 /* Continue with other skb fragments */ 4360 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 4361 tx_desc_unmap_put(port, txq, tx_desc); 4362 frags = 0; 4363 } 4364 } 4365 4366 out: 4367 if (frags > 0) { 4368 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); 4369 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); 4370 4371 txq_pcpu->reserved_num -= frags; 4372 txq_pcpu->count += frags; 4373 aggr_txq->count += frags; 4374 4375 /* Enable transmit */ 4376 wmb(); 4377 mvpp2_aggr_txq_pend_desc_add(port, frags); 4378 4379 if (txq_pcpu->count >= txq_pcpu->stop_threshold) 4380 netif_tx_stop_queue(nq); 4381 4382 u64_stats_update_begin(&stats->syncp); 4383 stats->tx_packets++; 4384 stats->tx_bytes += skb->len; 4385 u64_stats_update_end(&stats->syncp); 4386 } else { 4387 dev->stats.tx_dropped++; 4388 dev_kfree_skb_any(skb); 4389 } 4390 4391 /* Finalize TX processing */ 4392 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) 4393 mvpp2_txq_done(port, txq, txq_pcpu); 4394 4395 /* Set the timer in case not all frags were processed */ 4396 if (!port->has_tx_irqs && txq_pcpu->count <= frags && 4397 txq_pcpu->count > 0) { 4398 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); 4399 4400 if (!port_pcpu->timer_scheduled) { 4401 port_pcpu->timer_scheduled = true; 4402 hrtimer_start(&port_pcpu->tx_done_timer, 4403 MVPP2_TXDONE_HRTIMER_PERIOD_NS, 4404 HRTIMER_MODE_REL_PINNED_SOFT); 4405 } 4406 } 4407 4408 if (test_bit(thread, &port->priv->lock_map)) 4409 spin_unlock_irqrestore(&port->tx_lock[thread], flags); 4410 4411 return NETDEV_TX_OK; 4412 } 4413 4414 static inline void mvpp2_cause_error(struct net_device *dev, int cause) 4415 { 4416 if (cause & MVPP2_CAUSE_FCS_ERR_MASK) 4417 netdev_err(dev, "FCS error\n"); 4418 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) 4419 netdev_err(dev, "rx fifo overrun error\n"); 4420 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) 4421 netdev_err(dev, "tx fifo underrun error\n"); 4422 } 4423 4424 static int mvpp2_poll(struct napi_struct *napi, int budget) 4425 { 4426 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; 4427 int rx_done = 0; 4428 struct mvpp2_port *port = netdev_priv(napi->dev); 4429 struct mvpp2_queue_vector *qv; 4430 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); 4431 4432 qv = container_of(napi, struct mvpp2_queue_vector, napi); 4433 4434 /* Rx/Tx cause register 4435 * 4436 * Bits 0-15: each bit indicates received packets on the Rx queue 4437 * (bit 0 is for Rx queue 0). 4438 * 4439 * Bits 16-23: each bit indicates transmitted packets on the Tx queue 4440 * (bit 16 is for Tx queue 0). 4441 * 4442 * Each CPU has its own Rx/Tx cause register 4443 */ 4444 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, 4445 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 4446 4447 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 4448 if (cause_misc) { 4449 mvpp2_cause_error(port->dev, cause_misc); 4450 4451 /* Clear the cause register */ 4452 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); 4453 mvpp2_thread_write(port->priv, thread, 4454 MVPP2_ISR_RX_TX_CAUSE_REG(port->id), 4455 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 4456 } 4457 4458 if (port->has_tx_irqs) { 4459 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 4460 if (cause_tx) { 4461 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 4462 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 4463 } 4464 } 4465 4466 /* Process RX packets */ 4467 cause_rx = cause_rx_tx & 4468 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); 4469 cause_rx <<= qv->first_rxq; 4470 cause_rx |= qv->pending_cause_rx; 4471 while (cause_rx && budget > 0) { 4472 int count; 4473 struct mvpp2_rx_queue *rxq; 4474 4475 rxq = mvpp2_get_rx_queue(port, cause_rx); 4476 if (!rxq) 4477 break; 4478 4479 count = mvpp2_rx(port, napi, budget, rxq); 4480 rx_done += count; 4481 budget -= count; 4482 if (budget > 0) { 4483 /* Clear the bit associated to this Rx queue 4484 * so that next iteration will continue from 4485 * the next Rx queue. 4486 */ 4487 cause_rx &= ~(1 << rxq->logic_rxq); 4488 } 4489 } 4490 4491 if (budget > 0) { 4492 cause_rx = 0; 4493 napi_complete_done(napi, rx_done); 4494 4495 mvpp2_qvec_interrupt_enable(qv); 4496 } 4497 qv->pending_cause_rx = cause_rx; 4498 return rx_done; 4499 } 4500 4501 static void mvpp22_mode_reconfigure(struct mvpp2_port *port) 4502 { 4503 u32 ctrl3; 4504 4505 /* Set the GMAC & XLG MAC in reset */ 4506 mvpp2_mac_reset_assert(port); 4507 4508 /* Set the MPCS and XPCS in reset */ 4509 mvpp22_pcs_reset_assert(port); 4510 4511 /* comphy reconfiguration */ 4512 mvpp22_comphy_init(port); 4513 4514 /* gop reconfiguration */ 4515 mvpp22_gop_init(port); 4516 4517 mvpp22_pcs_reset_deassert(port); 4518 4519 if (mvpp2_port_supports_xlg(port)) { 4520 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); 4521 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 4522 4523 if (mvpp2_is_xlg(port->phy_interface)) 4524 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; 4525 else 4526 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 4527 4528 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); 4529 } 4530 4531 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) 4532 mvpp2_xlg_max_rx_size_set(port); 4533 else 4534 mvpp2_gmac_max_rx_size_set(port); 4535 } 4536 4537 /* Set hw internals when starting port */ 4538 static void mvpp2_start_dev(struct mvpp2_port *port) 4539 { 4540 int i; 4541 4542 mvpp2_txp_max_tx_size_set(port); 4543 4544 for (i = 0; i < port->nqvecs; i++) 4545 napi_enable(&port->qvecs[i].napi); 4546 4547 /* Enable interrupts on all threads */ 4548 mvpp2_interrupts_enable(port); 4549 4550 if (port->priv->hw_version >= MVPP22) 4551 mvpp22_mode_reconfigure(port); 4552 4553 if (port->phylink) { 4554 phylink_start(port->phylink); 4555 } else { 4556 mvpp2_acpi_start(port); 4557 } 4558 4559 netif_tx_start_all_queues(port->dev); 4560 4561 clear_bit(0, &port->state); 4562 } 4563 4564 /* Set hw internals when stopping port */ 4565 static void mvpp2_stop_dev(struct mvpp2_port *port) 4566 { 4567 int i; 4568 4569 set_bit(0, &port->state); 4570 4571 /* Disable interrupts on all threads */ 4572 mvpp2_interrupts_disable(port); 4573 4574 for (i = 0; i < port->nqvecs; i++) 4575 napi_disable(&port->qvecs[i].napi); 4576 4577 if (port->phylink) 4578 phylink_stop(port->phylink); 4579 phy_power_off(port->comphy); 4580 } 4581 4582 static int mvpp2_check_ringparam_valid(struct net_device *dev, 4583 struct ethtool_ringparam *ring) 4584 { 4585 u16 new_rx_pending = ring->rx_pending; 4586 u16 new_tx_pending = ring->tx_pending; 4587 4588 if (ring->rx_pending == 0 || ring->tx_pending == 0) 4589 return -EINVAL; 4590 4591 if (ring->rx_pending > MVPP2_MAX_RXD_MAX) 4592 new_rx_pending = MVPP2_MAX_RXD_MAX; 4593 else if (ring->rx_pending < MSS_THRESHOLD_START) 4594 new_rx_pending = MSS_THRESHOLD_START; 4595 else if (!IS_ALIGNED(ring->rx_pending, 16)) 4596 new_rx_pending = ALIGN(ring->rx_pending, 16); 4597 4598 if (ring->tx_pending > MVPP2_MAX_TXD_MAX) 4599 new_tx_pending = MVPP2_MAX_TXD_MAX; 4600 else if (!IS_ALIGNED(ring->tx_pending, 32)) 4601 new_tx_pending = ALIGN(ring->tx_pending, 32); 4602 4603 /* The Tx ring size cannot be smaller than the minimum number of 4604 * descriptors needed for TSO. 4605 */ 4606 if (new_tx_pending < MVPP2_MAX_SKB_DESCS) 4607 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); 4608 4609 if (ring->rx_pending != new_rx_pending) { 4610 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 4611 ring->rx_pending, new_rx_pending); 4612 ring->rx_pending = new_rx_pending; 4613 } 4614 4615 if (ring->tx_pending != new_tx_pending) { 4616 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", 4617 ring->tx_pending, new_tx_pending); 4618 ring->tx_pending = new_tx_pending; 4619 } 4620 4621 return 0; 4622 } 4623 4624 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) 4625 { 4626 u32 mac_addr_l, mac_addr_m, mac_addr_h; 4627 4628 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 4629 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); 4630 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); 4631 addr[0] = (mac_addr_h >> 24) & 0xFF; 4632 addr[1] = (mac_addr_h >> 16) & 0xFF; 4633 addr[2] = (mac_addr_h >> 8) & 0xFF; 4634 addr[3] = mac_addr_h & 0xFF; 4635 addr[4] = mac_addr_m & 0xFF; 4636 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; 4637 } 4638 4639 static int mvpp2_irqs_init(struct mvpp2_port *port) 4640 { 4641 int err, i; 4642 4643 for (i = 0; i < port->nqvecs; i++) { 4644 struct mvpp2_queue_vector *qv = port->qvecs + i; 4645 4646 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4647 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); 4648 if (!qv->mask) { 4649 err = -ENOMEM; 4650 goto err; 4651 } 4652 4653 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); 4654 } 4655 4656 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); 4657 if (err) 4658 goto err; 4659 4660 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { 4661 unsigned int cpu; 4662 4663 for_each_present_cpu(cpu) { 4664 if (mvpp2_cpu_to_thread(port->priv, cpu) == 4665 qv->sw_thread_id) 4666 cpumask_set_cpu(cpu, qv->mask); 4667 } 4668 4669 irq_set_affinity_hint(qv->irq, qv->mask); 4670 } 4671 } 4672 4673 return 0; 4674 err: 4675 for (i = 0; i < port->nqvecs; i++) { 4676 struct mvpp2_queue_vector *qv = port->qvecs + i; 4677 4678 irq_set_affinity_hint(qv->irq, NULL); 4679 kfree(qv->mask); 4680 qv->mask = NULL; 4681 free_irq(qv->irq, qv); 4682 } 4683 4684 return err; 4685 } 4686 4687 static void mvpp2_irqs_deinit(struct mvpp2_port *port) 4688 { 4689 int i; 4690 4691 for (i = 0; i < port->nqvecs; i++) { 4692 struct mvpp2_queue_vector *qv = port->qvecs + i; 4693 4694 irq_set_affinity_hint(qv->irq, NULL); 4695 kfree(qv->mask); 4696 qv->mask = NULL; 4697 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); 4698 free_irq(qv->irq, qv); 4699 } 4700 } 4701 4702 static bool mvpp22_rss_is_supported(struct mvpp2_port *port) 4703 { 4704 return (queue_mode == MVPP2_QDIST_MULTI_MODE) && 4705 !(port->flags & MVPP2_F_LOOPBACK); 4706 } 4707 4708 static int mvpp2_open(struct net_device *dev) 4709 { 4710 struct mvpp2_port *port = netdev_priv(dev); 4711 struct mvpp2 *priv = port->priv; 4712 unsigned char mac_bcast[ETH_ALEN] = { 4713 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4714 bool valid = false; 4715 int err; 4716 4717 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); 4718 if (err) { 4719 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4720 return err; 4721 } 4722 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); 4723 if (err) { 4724 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); 4725 return err; 4726 } 4727 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); 4728 if (err) { 4729 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); 4730 return err; 4731 } 4732 err = mvpp2_prs_def_flow(port); 4733 if (err) { 4734 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4735 return err; 4736 } 4737 4738 /* Allocate the Rx/Tx queues */ 4739 err = mvpp2_setup_rxqs(port); 4740 if (err) { 4741 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4742 return err; 4743 } 4744 4745 err = mvpp2_setup_txqs(port); 4746 if (err) { 4747 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4748 goto err_cleanup_rxqs; 4749 } 4750 4751 err = mvpp2_irqs_init(port); 4752 if (err) { 4753 netdev_err(port->dev, "cannot init IRQs\n"); 4754 goto err_cleanup_txqs; 4755 } 4756 4757 /* Phylink isn't supported yet in ACPI mode */ 4758 if (port->of_node) { 4759 err = phylink_of_phy_connect(port->phylink, port->of_node, 0); 4760 if (err) { 4761 netdev_err(port->dev, "could not attach PHY (%d)\n", 4762 err); 4763 goto err_free_irq; 4764 } 4765 4766 valid = true; 4767 } 4768 4769 if (priv->hw_version >= MVPP22 && port->port_irq) { 4770 err = request_irq(port->port_irq, mvpp2_port_isr, 0, 4771 dev->name, port); 4772 if (err) { 4773 netdev_err(port->dev, 4774 "cannot request port link/ptp IRQ %d\n", 4775 port->port_irq); 4776 goto err_free_irq; 4777 } 4778 4779 mvpp22_gop_setup_irq(port); 4780 4781 /* In default link is down */ 4782 netif_carrier_off(port->dev); 4783 4784 valid = true; 4785 } else { 4786 port->port_irq = 0; 4787 } 4788 4789 if (!valid) { 4790 netdev_err(port->dev, 4791 "invalid configuration: no dt or link IRQ"); 4792 err = -ENOENT; 4793 goto err_free_irq; 4794 } 4795 4796 /* Unmask interrupts on all CPUs */ 4797 on_each_cpu(mvpp2_interrupts_unmask, port, 1); 4798 mvpp2_shared_interrupt_mask_unmask(port, false); 4799 4800 mvpp2_start_dev(port); 4801 4802 /* Start hardware statistics gathering */ 4803 queue_delayed_work(priv->stats_queue, &port->stats_work, 4804 MVPP2_MIB_COUNTERS_STATS_DELAY); 4805 4806 return 0; 4807 4808 err_free_irq: 4809 mvpp2_irqs_deinit(port); 4810 err_cleanup_txqs: 4811 mvpp2_cleanup_txqs(port); 4812 err_cleanup_rxqs: 4813 mvpp2_cleanup_rxqs(port); 4814 return err; 4815 } 4816 4817 static int mvpp2_stop(struct net_device *dev) 4818 { 4819 struct mvpp2_port *port = netdev_priv(dev); 4820 struct mvpp2_port_pcpu *port_pcpu; 4821 unsigned int thread; 4822 4823 mvpp2_stop_dev(port); 4824 4825 /* Mask interrupts on all threads */ 4826 on_each_cpu(mvpp2_interrupts_mask, port, 1); 4827 mvpp2_shared_interrupt_mask_unmask(port, true); 4828 4829 if (port->phylink) 4830 phylink_disconnect_phy(port->phylink); 4831 if (port->port_irq) 4832 free_irq(port->port_irq, port); 4833 4834 mvpp2_irqs_deinit(port); 4835 if (!port->has_tx_irqs) { 4836 for (thread = 0; thread < port->priv->nthreads; thread++) { 4837 port_pcpu = per_cpu_ptr(port->pcpu, thread); 4838 4839 hrtimer_cancel(&port_pcpu->tx_done_timer); 4840 port_pcpu->timer_scheduled = false; 4841 } 4842 } 4843 mvpp2_cleanup_rxqs(port); 4844 mvpp2_cleanup_txqs(port); 4845 4846 cancel_delayed_work_sync(&port->stats_work); 4847 4848 mvpp2_mac_reset_assert(port); 4849 mvpp22_pcs_reset_assert(port); 4850 4851 return 0; 4852 } 4853 4854 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, 4855 struct netdev_hw_addr_list *list) 4856 { 4857 struct netdev_hw_addr *ha; 4858 int ret; 4859 4860 netdev_hw_addr_list_for_each(ha, list) { 4861 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); 4862 if (ret) 4863 return ret; 4864 } 4865 4866 return 0; 4867 } 4868 4869 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) 4870 { 4871 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 4872 mvpp2_prs_vid_enable_filtering(port); 4873 else 4874 mvpp2_prs_vid_disable_filtering(port); 4875 4876 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4877 MVPP2_PRS_L2_UNI_CAST, enable); 4878 4879 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4880 MVPP2_PRS_L2_MULTI_CAST, enable); 4881 } 4882 4883 static void mvpp2_set_rx_mode(struct net_device *dev) 4884 { 4885 struct mvpp2_port *port = netdev_priv(dev); 4886 4887 /* Clear the whole UC and MC list */ 4888 mvpp2_prs_mac_del_all(port); 4889 4890 if (dev->flags & IFF_PROMISC) { 4891 mvpp2_set_rx_promisc(port, true); 4892 return; 4893 } 4894 4895 mvpp2_set_rx_promisc(port, false); 4896 4897 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || 4898 mvpp2_prs_mac_da_accept_list(port, &dev->uc)) 4899 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4900 MVPP2_PRS_L2_UNI_CAST, true); 4901 4902 if (dev->flags & IFF_ALLMULTI) { 4903 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4904 MVPP2_PRS_L2_MULTI_CAST, true); 4905 return; 4906 } 4907 4908 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || 4909 mvpp2_prs_mac_da_accept_list(port, &dev->mc)) 4910 mvpp2_prs_mac_promisc_set(port->priv, port->id, 4911 MVPP2_PRS_L2_MULTI_CAST, true); 4912 } 4913 4914 static int mvpp2_set_mac_address(struct net_device *dev, void *p) 4915 { 4916 const struct sockaddr *addr = p; 4917 int err; 4918 4919 if (!is_valid_ether_addr(addr->sa_data)) 4920 return -EADDRNOTAVAIL; 4921 4922 err = mvpp2_prs_update_mac_da(dev, addr->sa_data); 4923 if (err) { 4924 /* Reconfigure parser accept the original MAC address */ 4925 mvpp2_prs_update_mac_da(dev, dev->dev_addr); 4926 netdev_err(dev, "failed to change MAC address\n"); 4927 } 4928 return err; 4929 } 4930 4931 /* Shut down all the ports, reconfigure the pools as percpu or shared, 4932 * then bring up again all ports. 4933 */ 4934 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) 4935 { 4936 bool change_percpu = (percpu != priv->percpu_pools); 4937 int numbufs = MVPP2_BM_POOLS_NUM, i; 4938 struct mvpp2_port *port = NULL; 4939 bool status[MVPP2_MAX_PORTS]; 4940 4941 for (i = 0; i < priv->port_count; i++) { 4942 port = priv->port_list[i]; 4943 status[i] = netif_running(port->dev); 4944 if (status[i]) 4945 mvpp2_stop(port->dev); 4946 } 4947 4948 /* nrxqs is the same for all ports */ 4949 if (priv->percpu_pools) 4950 numbufs = port->nrxqs * 2; 4951 4952 if (change_percpu) 4953 mvpp2_bm_pool_update_priv_fc(priv, false); 4954 4955 for (i = 0; i < numbufs; i++) 4956 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); 4957 4958 devm_kfree(port->dev->dev.parent, priv->bm_pools); 4959 priv->percpu_pools = percpu; 4960 mvpp2_bm_init(port->dev->dev.parent, priv); 4961 4962 for (i = 0; i < priv->port_count; i++) { 4963 port = priv->port_list[i]; 4964 mvpp2_swf_bm_pool_init(port); 4965 if (status[i]) 4966 mvpp2_open(port->dev); 4967 } 4968 4969 if (change_percpu) 4970 mvpp2_bm_pool_update_priv_fc(priv, true); 4971 4972 return 0; 4973 } 4974 4975 static int mvpp2_change_mtu(struct net_device *dev, int mtu) 4976 { 4977 struct mvpp2_port *port = netdev_priv(dev); 4978 bool running = netif_running(dev); 4979 struct mvpp2 *priv = port->priv; 4980 int err; 4981 4982 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { 4983 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, 4984 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); 4985 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); 4986 } 4987 4988 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { 4989 if (port->xdp_prog) { 4990 netdev_err(dev, "Jumbo frames are not supported with XDP\n"); 4991 return -EINVAL; 4992 } 4993 if (priv->percpu_pools) { 4994 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); 4995 mvpp2_bm_switch_buffers(priv, false); 4996 } 4997 } else { 4998 bool jumbo = false; 4999 int i; 5000 5001 for (i = 0; i < priv->port_count; i++) 5002 if (priv->port_list[i] != port && 5003 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > 5004 MVPP2_BM_LONG_PKT_SIZE) { 5005 jumbo = true; 5006 break; 5007 } 5008 5009 /* No port is using jumbo frames */ 5010 if (!jumbo) { 5011 dev_info(port->dev->dev.parent, 5012 "all ports have a low MTU, switching to per-cpu buffers"); 5013 mvpp2_bm_switch_buffers(priv, true); 5014 } 5015 } 5016 5017 if (running) 5018 mvpp2_stop_dev(port); 5019 5020 err = mvpp2_bm_update_mtu(dev, mtu); 5021 if (err) { 5022 netdev_err(dev, "failed to change MTU\n"); 5023 /* Reconfigure BM to the original MTU */ 5024 mvpp2_bm_update_mtu(dev, dev->mtu); 5025 } else { 5026 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); 5027 } 5028 5029 if (running) { 5030 mvpp2_start_dev(port); 5031 mvpp2_egress_enable(port); 5032 mvpp2_ingress_enable(port); 5033 } 5034 5035 return err; 5036 } 5037 5038 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) 5039 { 5040 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 5041 struct mvpp2 *priv = port->priv; 5042 int err = -1, i; 5043 5044 if (!priv->percpu_pools) 5045 return err; 5046 5047 if (!priv->page_pool[0]) 5048 return -ENOMEM; 5049 5050 for (i = 0; i < priv->port_count; i++) { 5051 port = priv->port_list[i]; 5052 if (port->xdp_prog) { 5053 dma_dir = DMA_BIDIRECTIONAL; 5054 break; 5055 } 5056 } 5057 5058 /* All pools are equal in terms of DMA direction */ 5059 if (priv->page_pool[0]->p.dma_dir != dma_dir) 5060 err = mvpp2_bm_switch_buffers(priv, true); 5061 5062 return err; 5063 } 5064 5065 static void 5066 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 5067 { 5068 struct mvpp2_port *port = netdev_priv(dev); 5069 unsigned int start; 5070 unsigned int cpu; 5071 5072 for_each_possible_cpu(cpu) { 5073 struct mvpp2_pcpu_stats *cpu_stats; 5074 u64 rx_packets; 5075 u64 rx_bytes; 5076 u64 tx_packets; 5077 u64 tx_bytes; 5078 5079 cpu_stats = per_cpu_ptr(port->stats, cpu); 5080 do { 5081 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 5082 rx_packets = cpu_stats->rx_packets; 5083 rx_bytes = cpu_stats->rx_bytes; 5084 tx_packets = cpu_stats->tx_packets; 5085 tx_bytes = cpu_stats->tx_bytes; 5086 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 5087 5088 stats->rx_packets += rx_packets; 5089 stats->rx_bytes += rx_bytes; 5090 stats->tx_packets += tx_packets; 5091 stats->tx_bytes += tx_bytes; 5092 } 5093 5094 stats->rx_errors = dev->stats.rx_errors; 5095 stats->rx_dropped = dev->stats.rx_dropped; 5096 stats->tx_dropped = dev->stats.tx_dropped; 5097 } 5098 5099 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5100 { 5101 struct hwtstamp_config config; 5102 void __iomem *ptp; 5103 u32 gcr, int_mask; 5104 5105 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5106 return -EFAULT; 5107 5108 if (config.flags) 5109 return -EINVAL; 5110 5111 if (config.tx_type != HWTSTAMP_TX_OFF && 5112 config.tx_type != HWTSTAMP_TX_ON) 5113 return -ERANGE; 5114 5115 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); 5116 5117 int_mask = gcr = 0; 5118 if (config.tx_type != HWTSTAMP_TX_OFF) { 5119 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; 5120 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | 5121 MVPP22_PTP_INT_MASK_QUEUE0; 5122 } 5123 5124 /* It seems we must also release the TX reset when enabling the TSU */ 5125 if (config.rx_filter != HWTSTAMP_FILTER_NONE) 5126 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | 5127 MVPP22_PTP_GCR_TX_RESET; 5128 5129 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) 5130 mvpp22_tai_start(port->priv->tai); 5131 5132 if (config.rx_filter != HWTSTAMP_FILTER_NONE) { 5133 config.rx_filter = HWTSTAMP_FILTER_ALL; 5134 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5135 MVPP22_PTP_GCR_RX_RESET | 5136 MVPP22_PTP_GCR_TX_RESET | 5137 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5138 port->rx_hwtstamp = true; 5139 } else { 5140 port->rx_hwtstamp = false; 5141 mvpp2_modify(ptp + MVPP22_PTP_GCR, 5142 MVPP22_PTP_GCR_RX_RESET | 5143 MVPP22_PTP_GCR_TX_RESET | 5144 MVPP22_PTP_GCR_TSU_ENABLE, gcr); 5145 } 5146 5147 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, 5148 MVPP22_PTP_INT_MASK_QUEUE1 | 5149 MVPP22_PTP_INT_MASK_QUEUE0, int_mask); 5150 5151 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) 5152 mvpp22_tai_stop(port->priv->tai); 5153 5154 port->tx_hwtstamp_type = config.tx_type; 5155 5156 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5157 return -EFAULT; 5158 5159 return 0; 5160 } 5161 5162 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) 5163 { 5164 struct hwtstamp_config config; 5165 5166 memset(&config, 0, sizeof(config)); 5167 5168 config.tx_type = port->tx_hwtstamp_type; 5169 config.rx_filter = port->rx_hwtstamp ? 5170 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 5171 5172 if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) 5173 return -EFAULT; 5174 5175 return 0; 5176 } 5177 5178 static int mvpp2_ethtool_get_ts_info(struct net_device *dev, 5179 struct ethtool_ts_info *info) 5180 { 5181 struct mvpp2_port *port = netdev_priv(dev); 5182 5183 if (!port->hwtstamp) 5184 return -EOPNOTSUPP; 5185 5186 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); 5187 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 5188 SOF_TIMESTAMPING_RX_SOFTWARE | 5189 SOF_TIMESTAMPING_SOFTWARE | 5190 SOF_TIMESTAMPING_TX_HARDWARE | 5191 SOF_TIMESTAMPING_RX_HARDWARE | 5192 SOF_TIMESTAMPING_RAW_HARDWARE; 5193 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 5194 BIT(HWTSTAMP_TX_ON); 5195 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 5196 BIT(HWTSTAMP_FILTER_ALL); 5197 5198 return 0; 5199 } 5200 5201 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5202 { 5203 struct mvpp2_port *port = netdev_priv(dev); 5204 5205 switch (cmd) { 5206 case SIOCSHWTSTAMP: 5207 if (port->hwtstamp) 5208 return mvpp2_set_ts_config(port, ifr); 5209 break; 5210 5211 case SIOCGHWTSTAMP: 5212 if (port->hwtstamp) 5213 return mvpp2_get_ts_config(port, ifr); 5214 break; 5215 } 5216 5217 if (!port->phylink) 5218 return -ENOTSUPP; 5219 5220 return phylink_mii_ioctl(port->phylink, ifr, cmd); 5221 } 5222 5223 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 5224 { 5225 struct mvpp2_port *port = netdev_priv(dev); 5226 int ret; 5227 5228 ret = mvpp2_prs_vid_entry_add(port, vid); 5229 if (ret) 5230 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", 5231 MVPP2_PRS_VLAN_FILT_MAX - 1); 5232 return ret; 5233 } 5234 5235 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 5236 { 5237 struct mvpp2_port *port = netdev_priv(dev); 5238 5239 mvpp2_prs_vid_entry_remove(port, vid); 5240 return 0; 5241 } 5242 5243 static int mvpp2_set_features(struct net_device *dev, 5244 netdev_features_t features) 5245 { 5246 netdev_features_t changed = dev->features ^ features; 5247 struct mvpp2_port *port = netdev_priv(dev); 5248 5249 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 5250 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { 5251 mvpp2_prs_vid_enable_filtering(port); 5252 } else { 5253 /* Invalidate all registered VID filters for this 5254 * port 5255 */ 5256 mvpp2_prs_vid_remove_all(port); 5257 5258 mvpp2_prs_vid_disable_filtering(port); 5259 } 5260 } 5261 5262 if (changed & NETIF_F_RXHASH) { 5263 if (features & NETIF_F_RXHASH) 5264 mvpp22_port_rss_enable(port); 5265 else 5266 mvpp22_port_rss_disable(port); 5267 } 5268 5269 return 0; 5270 } 5271 5272 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) 5273 { 5274 struct bpf_prog *prog = bpf->prog, *old_prog; 5275 bool running = netif_running(port->dev); 5276 bool reset = !prog != !port->xdp_prog; 5277 5278 if (port->dev->mtu > ETH_DATA_LEN) { 5279 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); 5280 return -EOPNOTSUPP; 5281 } 5282 5283 if (!port->priv->percpu_pools) { 5284 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); 5285 return -EOPNOTSUPP; 5286 } 5287 5288 if (port->ntxqs < num_possible_cpus() * 2) { 5289 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); 5290 return -EOPNOTSUPP; 5291 } 5292 5293 /* device is up and bpf is added/removed, must setup the RX queues */ 5294 if (running && reset) 5295 mvpp2_stop(port->dev); 5296 5297 old_prog = xchg(&port->xdp_prog, prog); 5298 if (old_prog) 5299 bpf_prog_put(old_prog); 5300 5301 /* bpf is just replaced, RXQ and MTU are already setup */ 5302 if (!reset) 5303 return 0; 5304 5305 /* device was up, restore the link */ 5306 if (running) 5307 mvpp2_open(port->dev); 5308 5309 /* Check Page Pool DMA Direction */ 5310 mvpp2_check_pagepool_dma(port); 5311 5312 return 0; 5313 } 5314 5315 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) 5316 { 5317 struct mvpp2_port *port = netdev_priv(dev); 5318 5319 switch (xdp->command) { 5320 case XDP_SETUP_PROG: 5321 return mvpp2_xdp_setup(port, xdp); 5322 default: 5323 return -EINVAL; 5324 } 5325 } 5326 5327 /* Ethtool methods */ 5328 5329 static int mvpp2_ethtool_nway_reset(struct net_device *dev) 5330 { 5331 struct mvpp2_port *port = netdev_priv(dev); 5332 5333 if (!port->phylink) 5334 return -ENOTSUPP; 5335 5336 return phylink_ethtool_nway_reset(port->phylink); 5337 } 5338 5339 /* Set interrupt coalescing for ethtools */ 5340 static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 5341 struct ethtool_coalesce *c) 5342 { 5343 struct mvpp2_port *port = netdev_priv(dev); 5344 int queue; 5345 5346 for (queue = 0; queue < port->nrxqs; queue++) { 5347 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 5348 5349 rxq->time_coal = c->rx_coalesce_usecs; 5350 rxq->pkts_coal = c->rx_max_coalesced_frames; 5351 mvpp2_rx_pkts_coal_set(port, rxq); 5352 mvpp2_rx_time_coal_set(port, rxq); 5353 } 5354 5355 if (port->has_tx_irqs) { 5356 port->tx_time_coal = c->tx_coalesce_usecs; 5357 mvpp2_tx_time_coal_set(port); 5358 } 5359 5360 for (queue = 0; queue < port->ntxqs; queue++) { 5361 struct mvpp2_tx_queue *txq = port->txqs[queue]; 5362 5363 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5364 5365 if (port->has_tx_irqs) 5366 mvpp2_tx_pkts_coal_set(port, txq); 5367 } 5368 5369 return 0; 5370 } 5371 5372 /* get coalescing for ethtools */ 5373 static int mvpp2_ethtool_get_coalesce(struct net_device *dev, 5374 struct ethtool_coalesce *c) 5375 { 5376 struct mvpp2_port *port = netdev_priv(dev); 5377 5378 c->rx_coalesce_usecs = port->rxqs[0]->time_coal; 5379 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; 5380 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; 5381 c->tx_coalesce_usecs = port->tx_time_coal; 5382 return 0; 5383 } 5384 5385 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, 5386 struct ethtool_drvinfo *drvinfo) 5387 { 5388 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, 5389 sizeof(drvinfo->driver)); 5390 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, 5391 sizeof(drvinfo->version)); 5392 strlcpy(drvinfo->bus_info, dev_name(&dev->dev), 5393 sizeof(drvinfo->bus_info)); 5394 } 5395 5396 static void mvpp2_ethtool_get_ringparam(struct net_device *dev, 5397 struct ethtool_ringparam *ring) 5398 { 5399 struct mvpp2_port *port = netdev_priv(dev); 5400 5401 ring->rx_max_pending = MVPP2_MAX_RXD_MAX; 5402 ring->tx_max_pending = MVPP2_MAX_TXD_MAX; 5403 ring->rx_pending = port->rx_ring_size; 5404 ring->tx_pending = port->tx_ring_size; 5405 } 5406 5407 static int mvpp2_ethtool_set_ringparam(struct net_device *dev, 5408 struct ethtool_ringparam *ring) 5409 { 5410 struct mvpp2_port *port = netdev_priv(dev); 5411 u16 prev_rx_ring_size = port->rx_ring_size; 5412 u16 prev_tx_ring_size = port->tx_ring_size; 5413 int err; 5414 5415 err = mvpp2_check_ringparam_valid(dev, ring); 5416 if (err) 5417 return err; 5418 5419 if (!netif_running(dev)) { 5420 port->rx_ring_size = ring->rx_pending; 5421 port->tx_ring_size = ring->tx_pending; 5422 return 0; 5423 } 5424 5425 /* The interface is running, so we have to force a 5426 * reallocation of the queues 5427 */ 5428 mvpp2_stop_dev(port); 5429 mvpp2_cleanup_rxqs(port); 5430 mvpp2_cleanup_txqs(port); 5431 5432 port->rx_ring_size = ring->rx_pending; 5433 port->tx_ring_size = ring->tx_pending; 5434 5435 err = mvpp2_setup_rxqs(port); 5436 if (err) { 5437 /* Reallocate Rx queues with the original ring size */ 5438 port->rx_ring_size = prev_rx_ring_size; 5439 ring->rx_pending = prev_rx_ring_size; 5440 err = mvpp2_setup_rxqs(port); 5441 if (err) 5442 goto err_out; 5443 } 5444 err = mvpp2_setup_txqs(port); 5445 if (err) { 5446 /* Reallocate Tx queues with the original ring size */ 5447 port->tx_ring_size = prev_tx_ring_size; 5448 ring->tx_pending = prev_tx_ring_size; 5449 err = mvpp2_setup_txqs(port); 5450 if (err) 5451 goto err_clean_rxqs; 5452 } 5453 5454 mvpp2_start_dev(port); 5455 mvpp2_egress_enable(port); 5456 mvpp2_ingress_enable(port); 5457 5458 return 0; 5459 5460 err_clean_rxqs: 5461 mvpp2_cleanup_rxqs(port); 5462 err_out: 5463 netdev_err(dev, "failed to change ring parameters"); 5464 return err; 5465 } 5466 5467 static void mvpp2_ethtool_get_pause_param(struct net_device *dev, 5468 struct ethtool_pauseparam *pause) 5469 { 5470 struct mvpp2_port *port = netdev_priv(dev); 5471 5472 if (!port->phylink) 5473 return; 5474 5475 phylink_ethtool_get_pauseparam(port->phylink, pause); 5476 } 5477 5478 static int mvpp2_ethtool_set_pause_param(struct net_device *dev, 5479 struct ethtool_pauseparam *pause) 5480 { 5481 struct mvpp2_port *port = netdev_priv(dev); 5482 5483 if (!port->phylink) 5484 return -ENOTSUPP; 5485 5486 return phylink_ethtool_set_pauseparam(port->phylink, pause); 5487 } 5488 5489 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, 5490 struct ethtool_link_ksettings *cmd) 5491 { 5492 struct mvpp2_port *port = netdev_priv(dev); 5493 5494 if (!port->phylink) 5495 return -ENOTSUPP; 5496 5497 return phylink_ethtool_ksettings_get(port->phylink, cmd); 5498 } 5499 5500 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, 5501 const struct ethtool_link_ksettings *cmd) 5502 { 5503 struct mvpp2_port *port = netdev_priv(dev); 5504 5505 if (!port->phylink) 5506 return -ENOTSUPP; 5507 5508 return phylink_ethtool_ksettings_set(port->phylink, cmd); 5509 } 5510 5511 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, 5512 struct ethtool_rxnfc *info, u32 *rules) 5513 { 5514 struct mvpp2_port *port = netdev_priv(dev); 5515 int ret = 0, i, loc = 0; 5516 5517 if (!mvpp22_rss_is_supported(port)) 5518 return -EOPNOTSUPP; 5519 5520 switch (info->cmd) { 5521 case ETHTOOL_GRXFH: 5522 ret = mvpp2_ethtool_rxfh_get(port, info); 5523 break; 5524 case ETHTOOL_GRXRINGS: 5525 info->data = port->nrxqs; 5526 break; 5527 case ETHTOOL_GRXCLSRLCNT: 5528 info->rule_cnt = port->n_rfs_rules; 5529 break; 5530 case ETHTOOL_GRXCLSRULE: 5531 ret = mvpp2_ethtool_cls_rule_get(port, info); 5532 break; 5533 case ETHTOOL_GRXCLSRLALL: 5534 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { 5535 if (port->rfs_rules[i]) 5536 rules[loc++] = i; 5537 } 5538 break; 5539 default: 5540 return -ENOTSUPP; 5541 } 5542 5543 return ret; 5544 } 5545 5546 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, 5547 struct ethtool_rxnfc *info) 5548 { 5549 struct mvpp2_port *port = netdev_priv(dev); 5550 int ret = 0; 5551 5552 if (!mvpp22_rss_is_supported(port)) 5553 return -EOPNOTSUPP; 5554 5555 switch (info->cmd) { 5556 case ETHTOOL_SRXFH: 5557 ret = mvpp2_ethtool_rxfh_set(port, info); 5558 break; 5559 case ETHTOOL_SRXCLSRLINS: 5560 ret = mvpp2_ethtool_cls_rule_ins(port, info); 5561 break; 5562 case ETHTOOL_SRXCLSRLDEL: 5563 ret = mvpp2_ethtool_cls_rule_del(port, info); 5564 break; 5565 default: 5566 return -EOPNOTSUPP; 5567 } 5568 return ret; 5569 } 5570 5571 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) 5572 { 5573 struct mvpp2_port *port = netdev_priv(dev); 5574 5575 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; 5576 } 5577 5578 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, 5579 u8 *hfunc) 5580 { 5581 struct mvpp2_port *port = netdev_priv(dev); 5582 int ret = 0; 5583 5584 if (!mvpp22_rss_is_supported(port)) 5585 return -EOPNOTSUPP; 5586 5587 if (indir) 5588 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); 5589 5590 if (hfunc) 5591 *hfunc = ETH_RSS_HASH_CRC32; 5592 5593 return ret; 5594 } 5595 5596 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, 5597 const u8 *key, const u8 hfunc) 5598 { 5599 struct mvpp2_port *port = netdev_priv(dev); 5600 int ret = 0; 5601 5602 if (!mvpp22_rss_is_supported(port)) 5603 return -EOPNOTSUPP; 5604 5605 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 5606 return -EOPNOTSUPP; 5607 5608 if (key) 5609 return -EOPNOTSUPP; 5610 5611 if (indir) 5612 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); 5613 5614 return ret; 5615 } 5616 5617 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, 5618 u8 *key, u8 *hfunc, u32 rss_context) 5619 { 5620 struct mvpp2_port *port = netdev_priv(dev); 5621 int ret = 0; 5622 5623 if (!mvpp22_rss_is_supported(port)) 5624 return -EOPNOTSUPP; 5625 if (rss_context >= MVPP22_N_RSS_TABLES) 5626 return -EINVAL; 5627 5628 if (hfunc) 5629 *hfunc = ETH_RSS_HASH_CRC32; 5630 5631 if (indir) 5632 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); 5633 5634 return ret; 5635 } 5636 5637 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, 5638 const u32 *indir, const u8 *key, 5639 const u8 hfunc, u32 *rss_context, 5640 bool delete) 5641 { 5642 struct mvpp2_port *port = netdev_priv(dev); 5643 int ret; 5644 5645 if (!mvpp22_rss_is_supported(port)) 5646 return -EOPNOTSUPP; 5647 5648 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) 5649 return -EOPNOTSUPP; 5650 5651 if (key) 5652 return -EOPNOTSUPP; 5653 5654 if (delete) 5655 return mvpp22_port_rss_ctx_delete(port, *rss_context); 5656 5657 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { 5658 ret = mvpp22_port_rss_ctx_create(port, rss_context); 5659 if (ret) 5660 return ret; 5661 } 5662 5663 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); 5664 } 5665 /* Device ops */ 5666 5667 static const struct net_device_ops mvpp2_netdev_ops = { 5668 .ndo_open = mvpp2_open, 5669 .ndo_stop = mvpp2_stop, 5670 .ndo_start_xmit = mvpp2_tx, 5671 .ndo_set_rx_mode = mvpp2_set_rx_mode, 5672 .ndo_set_mac_address = mvpp2_set_mac_address, 5673 .ndo_change_mtu = mvpp2_change_mtu, 5674 .ndo_get_stats64 = mvpp2_get_stats64, 5675 .ndo_do_ioctl = mvpp2_ioctl, 5676 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, 5677 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, 5678 .ndo_set_features = mvpp2_set_features, 5679 .ndo_bpf = mvpp2_xdp, 5680 .ndo_xdp_xmit = mvpp2_xdp_xmit, 5681 }; 5682 5683 static const struct ethtool_ops mvpp2_eth_tool_ops = { 5684 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5685 ETHTOOL_COALESCE_MAX_FRAMES, 5686 .nway_reset = mvpp2_ethtool_nway_reset, 5687 .get_link = ethtool_op_get_link, 5688 .get_ts_info = mvpp2_ethtool_get_ts_info, 5689 .set_coalesce = mvpp2_ethtool_set_coalesce, 5690 .get_coalesce = mvpp2_ethtool_get_coalesce, 5691 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 5692 .get_ringparam = mvpp2_ethtool_get_ringparam, 5693 .set_ringparam = mvpp2_ethtool_set_ringparam, 5694 .get_strings = mvpp2_ethtool_get_strings, 5695 .get_ethtool_stats = mvpp2_ethtool_get_stats, 5696 .get_sset_count = mvpp2_ethtool_get_sset_count, 5697 .get_pauseparam = mvpp2_ethtool_get_pause_param, 5698 .set_pauseparam = mvpp2_ethtool_set_pause_param, 5699 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, 5700 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, 5701 .get_rxnfc = mvpp2_ethtool_get_rxnfc, 5702 .set_rxnfc = mvpp2_ethtool_set_rxnfc, 5703 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, 5704 .get_rxfh = mvpp2_ethtool_get_rxfh, 5705 .set_rxfh = mvpp2_ethtool_set_rxfh, 5706 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, 5707 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, 5708 }; 5709 5710 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that 5711 * had a single IRQ defined per-port. 5712 */ 5713 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, 5714 struct device_node *port_node) 5715 { 5716 struct mvpp2_queue_vector *v = &port->qvecs[0]; 5717 5718 v->first_rxq = 0; 5719 v->nrxqs = port->nrxqs; 5720 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5721 v->sw_thread_id = 0; 5722 v->sw_thread_mask = *cpumask_bits(cpu_online_mask); 5723 v->port = port; 5724 v->irq = irq_of_parse_and_map(port_node, 0); 5725 if (v->irq <= 0) 5726 return -EINVAL; 5727 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 5728 NAPI_POLL_WEIGHT); 5729 5730 port->nqvecs = 1; 5731 5732 return 0; 5733 } 5734 5735 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, 5736 struct device_node *port_node) 5737 { 5738 struct mvpp2 *priv = port->priv; 5739 struct mvpp2_queue_vector *v; 5740 int i, ret; 5741 5742 switch (queue_mode) { 5743 case MVPP2_QDIST_SINGLE_MODE: 5744 port->nqvecs = priv->nthreads + 1; 5745 break; 5746 case MVPP2_QDIST_MULTI_MODE: 5747 port->nqvecs = priv->nthreads; 5748 break; 5749 } 5750 5751 for (i = 0; i < port->nqvecs; i++) { 5752 char irqname[16]; 5753 5754 v = port->qvecs + i; 5755 5756 v->port = port; 5757 v->type = MVPP2_QUEUE_VECTOR_PRIVATE; 5758 v->sw_thread_id = i; 5759 v->sw_thread_mask = BIT(i); 5760 5761 if (port->flags & MVPP2_F_DT_COMPAT) 5762 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); 5763 else 5764 snprintf(irqname, sizeof(irqname), "hif%d", i); 5765 5766 if (queue_mode == MVPP2_QDIST_MULTI_MODE) { 5767 v->first_rxq = i; 5768 v->nrxqs = 1; 5769 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && 5770 i == (port->nqvecs - 1)) { 5771 v->first_rxq = 0; 5772 v->nrxqs = port->nrxqs; 5773 v->type = MVPP2_QUEUE_VECTOR_SHARED; 5774 5775 if (port->flags & MVPP2_F_DT_COMPAT) 5776 strncpy(irqname, "rx-shared", sizeof(irqname)); 5777 } 5778 5779 if (port_node) 5780 v->irq = of_irq_get_byname(port_node, irqname); 5781 else 5782 v->irq = fwnode_irq_get(port->fwnode, i); 5783 if (v->irq <= 0) { 5784 ret = -EINVAL; 5785 goto err; 5786 } 5787 5788 netif_napi_add(port->dev, &v->napi, mvpp2_poll, 5789 NAPI_POLL_WEIGHT); 5790 } 5791 5792 return 0; 5793 5794 err: 5795 for (i = 0; i < port->nqvecs; i++) 5796 irq_dispose_mapping(port->qvecs[i].irq); 5797 return ret; 5798 } 5799 5800 static int mvpp2_queue_vectors_init(struct mvpp2_port *port, 5801 struct device_node *port_node) 5802 { 5803 if (port->has_tx_irqs) 5804 return mvpp2_multi_queue_vectors_init(port, port_node); 5805 else 5806 return mvpp2_simple_queue_vectors_init(port, port_node); 5807 } 5808 5809 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) 5810 { 5811 int i; 5812 5813 for (i = 0; i < port->nqvecs; i++) 5814 irq_dispose_mapping(port->qvecs[i].irq); 5815 } 5816 5817 /* Configure Rx queue group interrupt for this port */ 5818 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) 5819 { 5820 struct mvpp2 *priv = port->priv; 5821 u32 val; 5822 int i; 5823 5824 if (priv->hw_version == MVPP21) { 5825 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 5826 port->nrxqs); 5827 return; 5828 } 5829 5830 /* Handle the more complicated PPv2.2 and PPv2.3 case */ 5831 for (i = 0; i < port->nqvecs; i++) { 5832 struct mvpp2_queue_vector *qv = port->qvecs + i; 5833 5834 if (!qv->nrxqs) 5835 continue; 5836 5837 val = qv->sw_thread_id; 5838 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; 5839 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5840 5841 val = qv->first_rxq; 5842 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; 5843 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5844 } 5845 } 5846 5847 /* Initialize port HW */ 5848 static int mvpp2_port_init(struct mvpp2_port *port) 5849 { 5850 struct device *dev = port->dev->dev.parent; 5851 struct mvpp2 *priv = port->priv; 5852 struct mvpp2_txq_pcpu *txq_pcpu; 5853 unsigned int thread; 5854 int queue, err, val; 5855 5856 /* Checks for hardware constraints */ 5857 if (port->first_rxq + port->nrxqs > 5858 MVPP2_MAX_PORTS * priv->max_port_rxqs) 5859 return -EINVAL; 5860 5861 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) 5862 return -EINVAL; 5863 5864 /* Disable port */ 5865 mvpp2_egress_disable(port); 5866 mvpp2_port_disable(port); 5867 5868 if (mvpp2_is_xlg(port->phy_interface)) { 5869 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 5870 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 5871 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 5872 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 5873 } else { 5874 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5875 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 5876 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 5877 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 5878 } 5879 5880 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; 5881 5882 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), 5883 GFP_KERNEL); 5884 if (!port->txqs) 5885 return -ENOMEM; 5886 5887 /* Associate physical Tx queues to this port and initialize. 5888 * The mapping is predefined. 5889 */ 5890 for (queue = 0; queue < port->ntxqs; queue++) { 5891 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 5892 struct mvpp2_tx_queue *txq; 5893 5894 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 5895 if (!txq) { 5896 err = -ENOMEM; 5897 goto err_free_percpu; 5898 } 5899 5900 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); 5901 if (!txq->pcpu) { 5902 err = -ENOMEM; 5903 goto err_free_percpu; 5904 } 5905 5906 txq->id = queue_phy_id; 5907 txq->log_id = queue; 5908 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 5909 for (thread = 0; thread < priv->nthreads; thread++) { 5910 txq_pcpu = per_cpu_ptr(txq->pcpu, thread); 5911 txq_pcpu->thread = thread; 5912 } 5913 5914 port->txqs[queue] = txq; 5915 } 5916 5917 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), 5918 GFP_KERNEL); 5919 if (!port->rxqs) { 5920 err = -ENOMEM; 5921 goto err_free_percpu; 5922 } 5923 5924 /* Allocate and initialize Rx queue for this port */ 5925 for (queue = 0; queue < port->nrxqs; queue++) { 5926 struct mvpp2_rx_queue *rxq; 5927 5928 /* Map physical Rx queue to port's logical Rx queue */ 5929 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 5930 if (!rxq) { 5931 err = -ENOMEM; 5932 goto err_free_percpu; 5933 } 5934 /* Map this Rx queue to a physical queue */ 5935 rxq->id = port->first_rxq + queue; 5936 rxq->port = port->id; 5937 rxq->logic_rxq = queue; 5938 5939 port->rxqs[queue] = rxq; 5940 } 5941 5942 mvpp2_rx_irqs_setup(port); 5943 5944 /* Create Rx descriptor rings */ 5945 for (queue = 0; queue < port->nrxqs; queue++) { 5946 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 5947 5948 rxq->size = port->rx_ring_size; 5949 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 5950 rxq->time_coal = MVPP2_RX_COAL_USEC; 5951 } 5952 5953 mvpp2_ingress_disable(port); 5954 5955 /* Port default configuration */ 5956 mvpp2_defaults_set(port); 5957 5958 /* Port's classifier configuration */ 5959 mvpp2_cls_oversize_rxq_set(port); 5960 mvpp2_cls_port_config(port); 5961 5962 if (mvpp22_rss_is_supported(port)) 5963 mvpp22_port_rss_init(port); 5964 5965 /* Provide an initial Rx packet size */ 5966 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); 5967 5968 /* Initialize pools for swf */ 5969 err = mvpp2_swf_bm_pool_init(port); 5970 if (err) 5971 goto err_free_percpu; 5972 5973 /* Clear all port stats */ 5974 mvpp2_read_stats(port); 5975 memset(port->ethtool_stats, 0, 5976 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); 5977 5978 return 0; 5979 5980 err_free_percpu: 5981 for (queue = 0; queue < port->ntxqs; queue++) { 5982 if (!port->txqs[queue]) 5983 continue; 5984 free_percpu(port->txqs[queue]->pcpu); 5985 } 5986 return err; 5987 } 5988 5989 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, 5990 unsigned long *flags) 5991 { 5992 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", 5993 "tx-cpu3" }; 5994 int i; 5995 5996 for (i = 0; i < 5; i++) 5997 if (of_property_match_string(port_node, "interrupt-names", 5998 irqs[i]) < 0) 5999 return false; 6000 6001 *flags |= MVPP2_F_DT_COMPAT; 6002 return true; 6003 } 6004 6005 /* Checks if the port dt description has the required Tx interrupts: 6006 * - PPv2.1: there are no such interrupts. 6007 * - PPv2.2 and PPv2.3: 6008 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] 6009 * - The new ones have: "hifX" with X in [0..8] 6010 * 6011 * All those variants are supported to keep the backward compatibility. 6012 */ 6013 static bool mvpp2_port_has_irqs(struct mvpp2 *priv, 6014 struct device_node *port_node, 6015 unsigned long *flags) 6016 { 6017 char name[5]; 6018 int i; 6019 6020 /* ACPI */ 6021 if (!port_node) 6022 return true; 6023 6024 if (priv->hw_version == MVPP21) 6025 return false; 6026 6027 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) 6028 return true; 6029 6030 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 6031 snprintf(name, 5, "hif%d", i); 6032 if (of_property_match_string(port_node, "interrupt-names", 6033 name) < 0) 6034 return false; 6035 } 6036 6037 return true; 6038 } 6039 6040 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, 6041 struct fwnode_handle *fwnode, 6042 char **mac_from) 6043 { 6044 struct mvpp2_port *port = netdev_priv(dev); 6045 char hw_mac_addr[ETH_ALEN] = {0}; 6046 char fw_mac_addr[ETH_ALEN]; 6047 6048 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { 6049 *mac_from = "firmware node"; 6050 ether_addr_copy(dev->dev_addr, fw_mac_addr); 6051 return; 6052 } 6053 6054 if (priv->hw_version == MVPP21) { 6055 mvpp21_get_mac_address(port, hw_mac_addr); 6056 if (is_valid_ether_addr(hw_mac_addr)) { 6057 *mac_from = "hardware"; 6058 ether_addr_copy(dev->dev_addr, hw_mac_addr); 6059 return; 6060 } 6061 } 6062 6063 *mac_from = "random"; 6064 eth_hw_addr_random(dev); 6065 } 6066 6067 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) 6068 { 6069 return container_of(config, struct mvpp2_port, phylink_config); 6070 } 6071 6072 static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) 6073 { 6074 return container_of(pcs, struct mvpp2_port, phylink_pcs); 6075 } 6076 6077 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, 6078 struct phylink_link_state *state) 6079 { 6080 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); 6081 u32 val; 6082 6083 state->speed = SPEED_10000; 6084 state->duplex = 1; 6085 state->an_complete = 1; 6086 6087 val = readl(port->base + MVPP22_XLG_STATUS); 6088 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); 6089 6090 state->pause = 0; 6091 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6092 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) 6093 state->pause |= MLO_PAUSE_TX; 6094 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) 6095 state->pause |= MLO_PAUSE_RX; 6096 } 6097 6098 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, 6099 unsigned int mode, 6100 phy_interface_t interface, 6101 const unsigned long *advertising, 6102 bool permit_pause_to_mac) 6103 { 6104 return 0; 6105 } 6106 6107 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { 6108 .pcs_get_state = mvpp2_xlg_pcs_get_state, 6109 .pcs_config = mvpp2_xlg_pcs_config, 6110 }; 6111 6112 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, 6113 struct phylink_link_state *state) 6114 { 6115 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); 6116 u32 val; 6117 6118 val = readl(port->base + MVPP2_GMAC_STATUS0); 6119 6120 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); 6121 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); 6122 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); 6123 6124 switch (port->phy_interface) { 6125 case PHY_INTERFACE_MODE_1000BASEX: 6126 state->speed = SPEED_1000; 6127 break; 6128 case PHY_INTERFACE_MODE_2500BASEX: 6129 state->speed = SPEED_2500; 6130 break; 6131 default: 6132 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) 6133 state->speed = SPEED_1000; 6134 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) 6135 state->speed = SPEED_100; 6136 else 6137 state->speed = SPEED_10; 6138 } 6139 6140 state->pause = 0; 6141 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) 6142 state->pause |= MLO_PAUSE_RX; 6143 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) 6144 state->pause |= MLO_PAUSE_TX; 6145 } 6146 6147 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, 6148 phy_interface_t interface, 6149 const unsigned long *advertising, 6150 bool permit_pause_to_mac) 6151 { 6152 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); 6153 u32 mask, val, an, old_an, changed; 6154 6155 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | 6156 MVPP2_GMAC_IN_BAND_AUTONEG | 6157 MVPP2_GMAC_AN_SPEED_EN | 6158 MVPP2_GMAC_FLOW_CTRL_AUTONEG | 6159 MVPP2_GMAC_AN_DUPLEX_EN; 6160 6161 if (phylink_autoneg_inband(mode)) { 6162 mask |= MVPP2_GMAC_CONFIG_MII_SPEED | 6163 MVPP2_GMAC_CONFIG_GMII_SPEED | 6164 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6165 val = MVPP2_GMAC_IN_BAND_AUTONEG; 6166 6167 if (interface == PHY_INTERFACE_MODE_SGMII) { 6168 /* SGMII mode receives the speed and duplex from PHY */ 6169 val |= MVPP2_GMAC_AN_SPEED_EN | 6170 MVPP2_GMAC_AN_DUPLEX_EN; 6171 } else { 6172 /* 802.3z mode has fixed speed and duplex */ 6173 val |= MVPP2_GMAC_CONFIG_GMII_SPEED | 6174 MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6175 6176 /* The FLOW_CTRL_AUTONEG bit selects either the hardware 6177 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG 6178 * manually controls the GMAC pause modes. 6179 */ 6180 if (permit_pause_to_mac) 6181 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; 6182 6183 /* Configure advertisement bits */ 6184 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; 6185 if (phylink_test(advertising, Pause)) 6186 val |= MVPP2_GMAC_FC_ADV_EN; 6187 if (phylink_test(advertising, Asym_Pause)) 6188 val |= MVPP2_GMAC_FC_ADV_ASM_EN; 6189 } 6190 } else { 6191 val = 0; 6192 } 6193 6194 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6195 an = (an & ~mask) | val; 6196 changed = an ^ old_an; 6197 if (changed) 6198 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6199 6200 /* We are only interested in the advertisement bits changing */ 6201 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); 6202 } 6203 6204 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) 6205 { 6206 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); 6207 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6208 6209 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, 6210 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6211 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, 6212 port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6213 } 6214 6215 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { 6216 .pcs_get_state = mvpp2_gmac_pcs_get_state, 6217 .pcs_config = mvpp2_gmac_pcs_config, 6218 .pcs_an_restart = mvpp2_gmac_pcs_an_restart, 6219 }; 6220 6221 static void mvpp2_phylink_validate(struct phylink_config *config, 6222 unsigned long *supported, 6223 struct phylink_link_state *state) 6224 { 6225 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6226 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 6227 6228 /* Invalid combinations */ 6229 switch (state->interface) { 6230 case PHY_INTERFACE_MODE_10GBASER: 6231 case PHY_INTERFACE_MODE_XAUI: 6232 if (!mvpp2_port_supports_xlg(port)) 6233 goto empty_set; 6234 break; 6235 case PHY_INTERFACE_MODE_RGMII: 6236 case PHY_INTERFACE_MODE_RGMII_ID: 6237 case PHY_INTERFACE_MODE_RGMII_RXID: 6238 case PHY_INTERFACE_MODE_RGMII_TXID: 6239 if (!mvpp2_port_supports_rgmii(port)) 6240 goto empty_set; 6241 break; 6242 default: 6243 break; 6244 } 6245 6246 phylink_set(mask, Autoneg); 6247 phylink_set_port_modes(mask); 6248 6249 if (port->priv->global_tx_fc) { 6250 phylink_set(mask, Pause); 6251 phylink_set(mask, Asym_Pause); 6252 } 6253 6254 switch (state->interface) { 6255 case PHY_INTERFACE_MODE_10GBASER: 6256 case PHY_INTERFACE_MODE_XAUI: 6257 case PHY_INTERFACE_MODE_NA: 6258 if (mvpp2_port_supports_xlg(port)) { 6259 phylink_set(mask, 10000baseT_Full); 6260 phylink_set(mask, 10000baseCR_Full); 6261 phylink_set(mask, 10000baseSR_Full); 6262 phylink_set(mask, 10000baseLR_Full); 6263 phylink_set(mask, 10000baseLRM_Full); 6264 phylink_set(mask, 10000baseER_Full); 6265 phylink_set(mask, 10000baseKR_Full); 6266 } 6267 if (state->interface != PHY_INTERFACE_MODE_NA) 6268 break; 6269 fallthrough; 6270 case PHY_INTERFACE_MODE_RGMII: 6271 case PHY_INTERFACE_MODE_RGMII_ID: 6272 case PHY_INTERFACE_MODE_RGMII_RXID: 6273 case PHY_INTERFACE_MODE_RGMII_TXID: 6274 case PHY_INTERFACE_MODE_SGMII: 6275 phylink_set(mask, 10baseT_Half); 6276 phylink_set(mask, 10baseT_Full); 6277 phylink_set(mask, 100baseT_Half); 6278 phylink_set(mask, 100baseT_Full); 6279 phylink_set(mask, 1000baseT_Full); 6280 phylink_set(mask, 1000baseX_Full); 6281 if (state->interface != PHY_INTERFACE_MODE_NA) 6282 break; 6283 fallthrough; 6284 case PHY_INTERFACE_MODE_1000BASEX: 6285 case PHY_INTERFACE_MODE_2500BASEX: 6286 if (port->comphy || 6287 state->interface != PHY_INTERFACE_MODE_2500BASEX) { 6288 phylink_set(mask, 1000baseT_Full); 6289 phylink_set(mask, 1000baseX_Full); 6290 } 6291 if (port->comphy || 6292 state->interface == PHY_INTERFACE_MODE_2500BASEX) { 6293 phylink_set(mask, 2500baseT_Full); 6294 phylink_set(mask, 2500baseX_Full); 6295 } 6296 break; 6297 default: 6298 goto empty_set; 6299 } 6300 6301 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 6302 bitmap_and(state->advertising, state->advertising, mask, 6303 __ETHTOOL_LINK_MODE_MASK_NBITS); 6304 6305 phylink_helper_basex_speed(state); 6306 return; 6307 6308 empty_set: 6309 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 6310 } 6311 6312 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, 6313 const struct phylink_link_state *state) 6314 { 6315 u32 val; 6316 6317 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6318 MVPP22_XLG_CTRL0_MAC_RESET_DIS, 6319 MVPP22_XLG_CTRL0_MAC_RESET_DIS); 6320 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, 6321 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | 6322 MVPP22_XLG_CTRL4_EN_IDLE_CHECK | 6323 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, 6324 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); 6325 6326 /* Wait for reset to deassert */ 6327 do { 6328 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6329 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); 6330 } 6331 6332 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, 6333 const struct phylink_link_state *state) 6334 { 6335 u32 old_ctrl0, ctrl0; 6336 u32 old_ctrl2, ctrl2; 6337 u32 old_ctrl4, ctrl4; 6338 6339 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 6340 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 6341 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 6342 6343 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; 6344 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); 6345 6346 /* Configure port type */ 6347 if (phy_interface_mode_is_8023z(state->interface)) { 6348 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; 6349 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6350 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6351 MVPP22_CTRL4_DP_CLK_SEL | 6352 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6353 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6354 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; 6355 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 6356 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | 6357 MVPP22_CTRL4_DP_CLK_SEL | 6358 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6359 } else if (phy_interface_mode_is_rgmii(state->interface)) { 6360 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; 6361 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 6362 MVPP22_CTRL4_SYNC_BYPASS_DIS | 6363 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 6364 } 6365 6366 /* Configure negotiation style */ 6367 if (!phylink_autoneg_inband(mode)) { 6368 /* Phy or fixed speed - no in-band AN, nothing to do, leave the 6369 * configured speed, duplex and flow control as-is. 6370 */ 6371 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { 6372 /* SGMII in-band mode receives the speed and duplex from 6373 * the PHY. Flow control information is not received. */ 6374 } else if (phy_interface_mode_is_8023z(state->interface)) { 6375 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can 6376 * they negotiate duplex: they are always operating with a fixed 6377 * speed of 1000/2500Mbps in full duplex, so force 1000/2500 6378 * speed and full duplex here. 6379 */ 6380 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; 6381 } 6382 6383 if (old_ctrl0 != ctrl0) 6384 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); 6385 if (old_ctrl2 != ctrl2) 6386 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); 6387 if (old_ctrl4 != ctrl4) 6388 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); 6389 } 6390 6391 static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, 6392 phy_interface_t interface) 6393 { 6394 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6395 6396 /* Check for invalid configuration */ 6397 if (mvpp2_is_xlg(interface) && port->gop_id != 0) { 6398 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); 6399 return -EINVAL; 6400 } 6401 6402 if (port->phy_interface != interface || 6403 phylink_autoneg_inband(mode)) { 6404 /* Force the link down when changing the interface or if in 6405 * in-band mode to ensure we do not change the configuration 6406 * while the hardware is indicating link is up. We force both 6407 * XLG and GMAC down to ensure that they're both in a known 6408 * state. 6409 */ 6410 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6411 MVPP2_GMAC_FORCE_LINK_PASS | 6412 MVPP2_GMAC_FORCE_LINK_DOWN, 6413 MVPP2_GMAC_FORCE_LINK_DOWN); 6414 6415 if (mvpp2_port_supports_xlg(port)) 6416 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6417 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6418 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 6419 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); 6420 } 6421 6422 /* Make sure the port is disabled when reconfiguring the mode */ 6423 mvpp2_port_disable(port); 6424 6425 if (port->phy_interface != interface) { 6426 /* Place GMAC into reset */ 6427 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6428 MVPP2_GMAC_PORT_RESET_MASK, 6429 MVPP2_GMAC_PORT_RESET_MASK); 6430 6431 if (port->priv->hw_version >= MVPP22) { 6432 mvpp22_gop_mask_irq(port); 6433 6434 phy_power_off(port->comphy); 6435 } 6436 } 6437 6438 /* Select the appropriate PCS operations depending on the 6439 * configured interface mode. We will only switch to a mode 6440 * that the validate() checks have already passed. 6441 */ 6442 if (mvpp2_is_xlg(interface)) 6443 port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; 6444 else 6445 port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; 6446 6447 return 0; 6448 } 6449 6450 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, 6451 phy_interface_t interface) 6452 { 6453 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6454 int ret; 6455 6456 ret = mvpp2__mac_prepare(config, mode, interface); 6457 if (ret == 0) 6458 phylink_set_pcs(port->phylink, &port->phylink_pcs); 6459 6460 return ret; 6461 } 6462 6463 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, 6464 const struct phylink_link_state *state) 6465 { 6466 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6467 6468 /* mac (re)configuration */ 6469 if (mvpp2_is_xlg(state->interface)) 6470 mvpp2_xlg_config(port, mode, state); 6471 else if (phy_interface_mode_is_rgmii(state->interface) || 6472 phy_interface_mode_is_8023z(state->interface) || 6473 state->interface == PHY_INTERFACE_MODE_SGMII) 6474 mvpp2_gmac_config(port, mode, state); 6475 6476 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 6477 mvpp2_port_loopback_set(port, state); 6478 } 6479 6480 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, 6481 phy_interface_t interface) 6482 { 6483 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6484 6485 if (port->priv->hw_version >= MVPP22 && 6486 port->phy_interface != interface) { 6487 port->phy_interface = interface; 6488 6489 /* Reconfigure the serdes lanes */ 6490 mvpp22_mode_reconfigure(port); 6491 6492 /* Unmask interrupts */ 6493 mvpp22_gop_unmask_irq(port); 6494 } 6495 6496 if (!mvpp2_is_xlg(interface)) { 6497 /* Release GMAC reset and wait */ 6498 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, 6499 MVPP2_GMAC_PORT_RESET_MASK, 0); 6500 6501 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 6502 MVPP2_GMAC_PORT_RESET_MASK) 6503 continue; 6504 } 6505 6506 mvpp2_port_enable(port); 6507 6508 /* Allow the link to come up if in in-band mode, otherwise the 6509 * link is forced via mac_link_down()/mac_link_up() 6510 */ 6511 if (phylink_autoneg_inband(mode)) { 6512 if (mvpp2_is_xlg(interface)) 6513 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6514 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6515 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); 6516 else 6517 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6518 MVPP2_GMAC_FORCE_LINK_PASS | 6519 MVPP2_GMAC_FORCE_LINK_DOWN, 0); 6520 } 6521 6522 return 0; 6523 } 6524 6525 static void mvpp2_mac_link_up(struct phylink_config *config, 6526 struct phy_device *phy, 6527 unsigned int mode, phy_interface_t interface, 6528 int speed, int duplex, 6529 bool tx_pause, bool rx_pause) 6530 { 6531 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6532 u32 val; 6533 int i; 6534 6535 if (mvpp2_is_xlg(interface)) { 6536 if (!phylink_autoneg_inband(mode)) { 6537 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6538 if (tx_pause) 6539 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; 6540 if (rx_pause) 6541 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; 6542 6543 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, 6544 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | 6545 MVPP22_XLG_CTRL0_FORCE_LINK_PASS | 6546 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | 6547 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); 6548 } 6549 } else { 6550 if (!phylink_autoneg_inband(mode)) { 6551 val = MVPP2_GMAC_FORCE_LINK_PASS; 6552 6553 if (speed == SPEED_1000 || speed == SPEED_2500) 6554 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 6555 else if (speed == SPEED_100) 6556 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 6557 6558 if (duplex == DUPLEX_FULL) 6559 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 6560 6561 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, 6562 MVPP2_GMAC_FORCE_LINK_DOWN | 6563 MVPP2_GMAC_FORCE_LINK_PASS | 6564 MVPP2_GMAC_CONFIG_MII_SPEED | 6565 MVPP2_GMAC_CONFIG_GMII_SPEED | 6566 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); 6567 } 6568 6569 /* We can always update the flow control enable bits; 6570 * these will only be effective if flow control AN 6571 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. 6572 */ 6573 val = 0; 6574 if (tx_pause) 6575 val |= MVPP22_CTRL4_TX_FC_EN; 6576 if (rx_pause) 6577 val |= MVPP22_CTRL4_RX_FC_EN; 6578 6579 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, 6580 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, 6581 val); 6582 } 6583 6584 if (port->priv->global_tx_fc) { 6585 port->tx_fc = tx_pause; 6586 if (tx_pause) 6587 mvpp2_rxq_enable_fc(port); 6588 else 6589 mvpp2_rxq_disable_fc(port); 6590 if (port->priv->percpu_pools) { 6591 for (i = 0; i < port->nrxqs; i++) 6592 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); 6593 } else { 6594 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); 6595 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); 6596 } 6597 if (port->priv->hw_version == MVPP23) 6598 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); 6599 } 6600 6601 mvpp2_port_enable(port); 6602 6603 mvpp2_egress_enable(port); 6604 mvpp2_ingress_enable(port); 6605 netif_tx_wake_all_queues(port->dev); 6606 } 6607 6608 static void mvpp2_mac_link_down(struct phylink_config *config, 6609 unsigned int mode, phy_interface_t interface) 6610 { 6611 struct mvpp2_port *port = mvpp2_phylink_to_port(config); 6612 u32 val; 6613 6614 if (!phylink_autoneg_inband(mode)) { 6615 if (mvpp2_is_xlg(interface)) { 6616 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 6617 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; 6618 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; 6619 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 6620 } else { 6621 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6622 val &= ~MVPP2_GMAC_FORCE_LINK_PASS; 6623 val |= MVPP2_GMAC_FORCE_LINK_DOWN; 6624 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 6625 } 6626 } 6627 6628 netif_tx_stop_all_queues(port->dev); 6629 mvpp2_egress_disable(port); 6630 mvpp2_ingress_disable(port); 6631 6632 mvpp2_port_disable(port); 6633 } 6634 6635 static const struct phylink_mac_ops mvpp2_phylink_ops = { 6636 .validate = mvpp2_phylink_validate, 6637 .mac_prepare = mvpp2_mac_prepare, 6638 .mac_config = mvpp2_mac_config, 6639 .mac_finish = mvpp2_mac_finish, 6640 .mac_link_up = mvpp2_mac_link_up, 6641 .mac_link_down = mvpp2_mac_link_down, 6642 }; 6643 6644 /* Work-around for ACPI */ 6645 static void mvpp2_acpi_start(struct mvpp2_port *port) 6646 { 6647 /* Phylink isn't used as of now for ACPI, so the MAC has to be 6648 * configured manually when the interface is started. This will 6649 * be removed as soon as the phylink ACPI support lands in. 6650 */ 6651 struct phylink_link_state state = { 6652 .interface = port->phy_interface, 6653 }; 6654 mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, 6655 port->phy_interface); 6656 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); 6657 port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, 6658 port->phy_interface, 6659 state.advertising, false); 6660 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, 6661 port->phy_interface); 6662 mvpp2_mac_link_up(&port->phylink_config, NULL, 6663 MLO_AN_INBAND, port->phy_interface, 6664 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); 6665 } 6666 6667 /* Ports initialization */ 6668 static int mvpp2_port_probe(struct platform_device *pdev, 6669 struct fwnode_handle *port_fwnode, 6670 struct mvpp2 *priv) 6671 { 6672 struct phy *comphy = NULL; 6673 struct mvpp2_port *port; 6674 struct mvpp2_port_pcpu *port_pcpu; 6675 struct device_node *port_node = to_of_node(port_fwnode); 6676 netdev_features_t features; 6677 struct net_device *dev; 6678 struct phylink *phylink; 6679 char *mac_from = ""; 6680 unsigned int ntxqs, nrxqs, thread; 6681 unsigned long flags = 0; 6682 bool has_tx_irqs; 6683 u32 id; 6684 int phy_mode; 6685 int err, i; 6686 6687 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); 6688 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { 6689 dev_err(&pdev->dev, 6690 "not enough IRQs to support multi queue mode\n"); 6691 return -EINVAL; 6692 } 6693 6694 ntxqs = MVPP2_MAX_TXQ; 6695 nrxqs = mvpp2_get_nrxqs(priv); 6696 6697 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); 6698 if (!dev) 6699 return -ENOMEM; 6700 6701 phy_mode = fwnode_get_phy_mode(port_fwnode); 6702 if (phy_mode < 0) { 6703 dev_err(&pdev->dev, "incorrect phy mode\n"); 6704 err = phy_mode; 6705 goto err_free_netdev; 6706 } 6707 6708 /* 6709 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. 6710 * Existing usage of 10GBASE-KR is not correct; no backplane 6711 * negotiation is done, and this driver does not actually support 6712 * 10GBASE-KR. 6713 */ 6714 if (phy_mode == PHY_INTERFACE_MODE_10GKR) 6715 phy_mode = PHY_INTERFACE_MODE_10GBASER; 6716 6717 if (port_node) { 6718 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); 6719 if (IS_ERR(comphy)) { 6720 if (PTR_ERR(comphy) == -EPROBE_DEFER) { 6721 err = -EPROBE_DEFER; 6722 goto err_free_netdev; 6723 } 6724 comphy = NULL; 6725 } 6726 } 6727 6728 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { 6729 err = -EINVAL; 6730 dev_err(&pdev->dev, "missing port-id value\n"); 6731 goto err_free_netdev; 6732 } 6733 6734 dev->tx_queue_len = MVPP2_MAX_TXD_MAX; 6735 dev->watchdog_timeo = 5 * HZ; 6736 dev->netdev_ops = &mvpp2_netdev_ops; 6737 dev->ethtool_ops = &mvpp2_eth_tool_ops; 6738 6739 port = netdev_priv(dev); 6740 port->dev = dev; 6741 port->fwnode = port_fwnode; 6742 port->has_phy = !!of_find_property(port_node, "phy", NULL); 6743 port->ntxqs = ntxqs; 6744 port->nrxqs = nrxqs; 6745 port->priv = priv; 6746 port->has_tx_irqs = has_tx_irqs; 6747 port->flags = flags; 6748 6749 err = mvpp2_queue_vectors_init(port, port_node); 6750 if (err) 6751 goto err_free_netdev; 6752 6753 if (port_node) 6754 port->port_irq = of_irq_get_byname(port_node, "link"); 6755 else 6756 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); 6757 if (port->port_irq == -EPROBE_DEFER) { 6758 err = -EPROBE_DEFER; 6759 goto err_deinit_qvecs; 6760 } 6761 if (port->port_irq <= 0) 6762 /* the link irq is optional */ 6763 port->port_irq = 0; 6764 6765 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) 6766 port->flags |= MVPP2_F_LOOPBACK; 6767 6768 port->id = id; 6769 if (priv->hw_version == MVPP21) 6770 port->first_rxq = port->id * port->nrxqs; 6771 else 6772 port->first_rxq = port->id * priv->max_port_rxqs; 6773 6774 port->of_node = port_node; 6775 port->phy_interface = phy_mode; 6776 port->comphy = comphy; 6777 6778 if (priv->hw_version == MVPP21) { 6779 port->base = devm_platform_ioremap_resource(pdev, 2 + id); 6780 if (IS_ERR(port->base)) { 6781 err = PTR_ERR(port->base); 6782 goto err_free_irq; 6783 } 6784 6785 port->stats_base = port->priv->lms_base + 6786 MVPP21_MIB_COUNTERS_OFFSET + 6787 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; 6788 } else { 6789 if (fwnode_property_read_u32(port_fwnode, "gop-port-id", 6790 &port->gop_id)) { 6791 err = -EINVAL; 6792 dev_err(&pdev->dev, "missing gop-port-id value\n"); 6793 goto err_deinit_qvecs; 6794 } 6795 6796 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); 6797 port->stats_base = port->priv->iface_base + 6798 MVPP22_MIB_COUNTERS_OFFSET + 6799 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; 6800 6801 /* We may want a property to describe whether we should use 6802 * MAC hardware timestamping. 6803 */ 6804 if (priv->tai) 6805 port->hwtstamp = true; 6806 } 6807 6808 /* Alloc per-cpu and ethtool stats */ 6809 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); 6810 if (!port->stats) { 6811 err = -ENOMEM; 6812 goto err_free_irq; 6813 } 6814 6815 port->ethtool_stats = devm_kcalloc(&pdev->dev, 6816 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), 6817 sizeof(u64), GFP_KERNEL); 6818 if (!port->ethtool_stats) { 6819 err = -ENOMEM; 6820 goto err_free_stats; 6821 } 6822 6823 mutex_init(&port->gather_stats_lock); 6824 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); 6825 6826 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); 6827 6828 port->tx_ring_size = MVPP2_MAX_TXD_DFLT; 6829 port->rx_ring_size = MVPP2_MAX_RXD_DFLT; 6830 SET_NETDEV_DEV(dev, &pdev->dev); 6831 6832 err = mvpp2_port_init(port); 6833 if (err < 0) { 6834 dev_err(&pdev->dev, "failed to init port %d\n", id); 6835 goto err_free_stats; 6836 } 6837 6838 mvpp2_port_periodic_xon_disable(port); 6839 6840 mvpp2_mac_reset_assert(port); 6841 mvpp22_pcs_reset_assert(port); 6842 6843 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); 6844 if (!port->pcpu) { 6845 err = -ENOMEM; 6846 goto err_free_txq_pcpu; 6847 } 6848 6849 if (!port->has_tx_irqs) { 6850 for (thread = 0; thread < priv->nthreads; thread++) { 6851 port_pcpu = per_cpu_ptr(port->pcpu, thread); 6852 6853 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, 6854 HRTIMER_MODE_REL_PINNED_SOFT); 6855 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; 6856 port_pcpu->timer_scheduled = false; 6857 port_pcpu->dev = dev; 6858 } 6859 } 6860 6861 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6862 NETIF_F_TSO; 6863 dev->features = features | NETIF_F_RXCSUM; 6864 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | 6865 NETIF_F_HW_VLAN_CTAG_FILTER; 6866 6867 if (mvpp22_rss_is_supported(port)) { 6868 dev->hw_features |= NETIF_F_RXHASH; 6869 dev->features |= NETIF_F_NTUPLE; 6870 } 6871 6872 if (!port->priv->percpu_pools) 6873 mvpp2_set_hw_csum(port, port->pool_long->id); 6874 6875 dev->vlan_features |= features; 6876 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; 6877 dev->priv_flags |= IFF_UNICAST_FLT; 6878 6879 /* MTU range: 68 - 9704 */ 6880 dev->min_mtu = ETH_MIN_MTU; 6881 /* 9704 == 9728 - 20 and rounding to 8 */ 6882 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 6883 dev->dev.of_node = port_node; 6884 6885 /* Phylink isn't used w/ ACPI as of now */ 6886 if (port_node) { 6887 port->phylink_config.dev = &dev->dev; 6888 port->phylink_config.type = PHYLINK_NETDEV; 6889 6890 phylink = phylink_create(&port->phylink_config, port_fwnode, 6891 phy_mode, &mvpp2_phylink_ops); 6892 if (IS_ERR(phylink)) { 6893 err = PTR_ERR(phylink); 6894 goto err_free_port_pcpu; 6895 } 6896 port->phylink = phylink; 6897 } else { 6898 port->phylink = NULL; 6899 } 6900 6901 /* Cycle the comphy to power it down, saving 270mW per port - 6902 * don't worry about an error powering it up. When the comphy 6903 * driver does this, we can remove this code. 6904 */ 6905 if (port->comphy) { 6906 err = mvpp22_comphy_init(port); 6907 if (err == 0) 6908 phy_power_off(port->comphy); 6909 } 6910 6911 err = register_netdev(dev); 6912 if (err < 0) { 6913 dev_err(&pdev->dev, "failed to register netdev\n"); 6914 goto err_phylink; 6915 } 6916 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6917 6918 priv->port_list[priv->port_count++] = port; 6919 6920 return 0; 6921 6922 err_phylink: 6923 if (port->phylink) 6924 phylink_destroy(port->phylink); 6925 err_free_port_pcpu: 6926 free_percpu(port->pcpu); 6927 err_free_txq_pcpu: 6928 for (i = 0; i < port->ntxqs; i++) 6929 free_percpu(port->txqs[i]->pcpu); 6930 err_free_stats: 6931 free_percpu(port->stats); 6932 err_free_irq: 6933 if (port->port_irq) 6934 irq_dispose_mapping(port->port_irq); 6935 err_deinit_qvecs: 6936 mvpp2_queue_vectors_deinit(port); 6937 err_free_netdev: 6938 free_netdev(dev); 6939 return err; 6940 } 6941 6942 /* Ports removal routine */ 6943 static void mvpp2_port_remove(struct mvpp2_port *port) 6944 { 6945 int i; 6946 6947 unregister_netdev(port->dev); 6948 if (port->phylink) 6949 phylink_destroy(port->phylink); 6950 free_percpu(port->pcpu); 6951 free_percpu(port->stats); 6952 for (i = 0; i < port->ntxqs; i++) 6953 free_percpu(port->txqs[i]->pcpu); 6954 mvpp2_queue_vectors_deinit(port); 6955 if (port->port_irq) 6956 irq_dispose_mapping(port->port_irq); 6957 free_netdev(port->dev); 6958 } 6959 6960 /* Initialize decoding windows */ 6961 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 6962 struct mvpp2 *priv) 6963 { 6964 u32 win_enable; 6965 int i; 6966 6967 for (i = 0; i < 6; i++) { 6968 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 6969 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 6970 6971 if (i < 4) 6972 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 6973 } 6974 6975 win_enable = 0; 6976 6977 for (i = 0; i < dram->num_cs; i++) { 6978 const struct mbus_dram_window *cs = dram->cs + i; 6979 6980 mvpp2_write(priv, MVPP2_WIN_BASE(i), 6981 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 6982 dram->mbus_dram_target_id); 6983 6984 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 6985 (cs->size - 1) & 0xffff0000); 6986 6987 win_enable |= (1 << i); 6988 } 6989 6990 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 6991 } 6992 6993 /* Initialize Rx FIFO's */ 6994 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 6995 { 6996 int port; 6997 6998 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 6999 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 7000 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7001 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 7002 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); 7003 } 7004 7005 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7006 MVPP2_RX_FIFO_PORT_MIN_PKT); 7007 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7008 } 7009 7010 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) 7011 { 7012 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); 7013 7014 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); 7015 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); 7016 } 7017 7018 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. 7019 * 4kB fixed space must be assigned for the loopback port. 7020 * Redistribute remaining avialable 44kB space among all active ports. 7021 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G 7022 * SGMII link. 7023 */ 7024 static void mvpp22_rx_fifo_init(struct mvpp2 *priv) 7025 { 7026 int remaining_ports_count; 7027 unsigned long port_map; 7028 int size_remainder; 7029 int port, size; 7030 7031 /* The loopback requires fixed 4kB of the FIFO space assignment. */ 7032 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7033 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); 7034 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7035 7036 /* Set RX FIFO size to 0 for inactive ports. */ 7037 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7038 mvpp22_rx_fifo_set_hw(priv, port, 0); 7039 7040 /* Assign remaining RX FIFO space among all active ports. */ 7041 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; 7042 remaining_ports_count = hweight_long(port_map); 7043 7044 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7045 if (remaining_ports_count == 1) 7046 size = size_remainder; 7047 else if (port == 0) 7048 size = max(size_remainder / remaining_ports_count, 7049 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); 7050 else if (port == 1) 7051 size = max(size_remainder / remaining_ports_count, 7052 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); 7053 else 7054 size = size_remainder / remaining_ports_count; 7055 7056 size_remainder -= size; 7057 remaining_ports_count--; 7058 7059 mvpp22_rx_fifo_set_hw(priv, port, size); 7060 } 7061 7062 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 7063 MVPP2_RX_FIFO_PORT_MIN_PKT); 7064 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 7065 } 7066 7067 /* Configure Rx FIFO Flow control thresholds */ 7068 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) 7069 { 7070 int port, val; 7071 7072 /* Port 0: maximum speed -10Gb/s port 7073 * required by spec RX FIFO threshold 9KB 7074 * Port 1: maximum speed -5Gb/s port 7075 * required by spec RX FIFO threshold 4KB 7076 * Port 2: maximum speed -1Gb/s port 7077 * required by spec RX FIFO threshold 2KB 7078 */ 7079 7080 /* Without loopback port */ 7081 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { 7082 if (port == 0) { 7083 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7084 << MVPP2_RX_FC_TRSH_OFFS; 7085 val &= MVPP2_RX_FC_TRSH_MASK; 7086 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7087 } else if (port == 1) { 7088 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7089 << MVPP2_RX_FC_TRSH_OFFS; 7090 val &= MVPP2_RX_FC_TRSH_MASK; 7091 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7092 } else { 7093 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) 7094 << MVPP2_RX_FC_TRSH_OFFS; 7095 val &= MVPP2_RX_FC_TRSH_MASK; 7096 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7097 } 7098 } 7099 } 7100 7101 /* Configure Rx FIFO Flow control thresholds */ 7102 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) 7103 { 7104 int val; 7105 7106 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); 7107 7108 if (en) 7109 val |= MVPP2_RX_FC_EN; 7110 else 7111 val &= ~MVPP2_RX_FC_EN; 7112 7113 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); 7114 } 7115 7116 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) 7117 { 7118 int threshold = MVPP2_TX_FIFO_THRESHOLD(size); 7119 7120 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); 7121 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); 7122 } 7123 7124 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. 7125 * 1kB fixed space must be assigned for the loopback port. 7126 * Redistribute remaining avialable 18kB space among all active ports. 7127 * The 10G interface should use 10kB (which is maximum possible size 7128 * per single port). 7129 */ 7130 static void mvpp22_tx_fifo_init(struct mvpp2 *priv) 7131 { 7132 int remaining_ports_count; 7133 unsigned long port_map; 7134 int size_remainder; 7135 int port, size; 7136 7137 /* The loopback requires fixed 1kB of the FIFO space assignment. */ 7138 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, 7139 MVPP22_TX_FIFO_DATA_SIZE_1KB); 7140 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); 7141 7142 /* Set TX FIFO size to 0 for inactive ports. */ 7143 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) 7144 mvpp22_tx_fifo_set_hw(priv, port, 0); 7145 7146 /* Assign remaining TX FIFO space among all active ports. */ 7147 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; 7148 remaining_ports_count = hweight_long(port_map); 7149 7150 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { 7151 if (remaining_ports_count == 1) 7152 size = min(size_remainder, 7153 MVPP22_TX_FIFO_DATA_SIZE_10KB); 7154 else if (port == 0) 7155 size = MVPP22_TX_FIFO_DATA_SIZE_10KB; 7156 else 7157 size = size_remainder / remaining_ports_count; 7158 7159 size_remainder -= size; 7160 remaining_ports_count--; 7161 7162 mvpp22_tx_fifo_set_hw(priv, port, size); 7163 } 7164 } 7165 7166 static void mvpp2_axi_init(struct mvpp2 *priv) 7167 { 7168 u32 val, rdval, wrval; 7169 7170 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 7171 7172 /* AXI Bridge Configuration */ 7173 7174 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 7175 << MVPP22_AXI_ATTR_CACHE_OFFS; 7176 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7177 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7178 7179 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 7180 << MVPP22_AXI_ATTR_CACHE_OFFS; 7181 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7182 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 7183 7184 /* BM */ 7185 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 7186 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 7187 7188 /* Descriptors */ 7189 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 7190 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 7191 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 7192 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 7193 7194 /* Buffer Data */ 7195 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 7196 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 7197 7198 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 7199 << MVPP22_AXI_CODE_CACHE_OFFS; 7200 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 7201 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7202 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 7203 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 7204 7205 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 7206 << MVPP22_AXI_CODE_CACHE_OFFS; 7207 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7208 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7209 7210 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 7211 7212 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 7213 << MVPP22_AXI_CODE_CACHE_OFFS; 7214 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 7215 << MVPP22_AXI_CODE_DOMAIN_OFFS; 7216 7217 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 7218 } 7219 7220 /* Initialize network controller common part HW */ 7221 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) 7222 { 7223 const struct mbus_dram_target_info *dram_target_info; 7224 int err, i; 7225 u32 val; 7226 7227 /* MBUS windows configuration */ 7228 dram_target_info = mv_mbus_dram_info(); 7229 if (dram_target_info) 7230 mvpp2_conf_mbus_windows(dram_target_info, priv); 7231 7232 if (priv->hw_version >= MVPP22) 7233 mvpp2_axi_init(priv); 7234 7235 /* Disable HW PHY polling */ 7236 if (priv->hw_version == MVPP21) { 7237 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7238 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 7239 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 7240 } else { 7241 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7242 val &= ~MVPP22_SMI_POLLING_EN; 7243 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 7244 } 7245 7246 /* Allocate and initialize aggregated TXQs */ 7247 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, 7248 sizeof(*priv->aggr_txqs), 7249 GFP_KERNEL); 7250 if (!priv->aggr_txqs) 7251 return -ENOMEM; 7252 7253 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7254 priv->aggr_txqs[i].id = i; 7255 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 7256 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); 7257 if (err < 0) 7258 return err; 7259 } 7260 7261 /* Fifo Init */ 7262 if (priv->hw_version == MVPP21) { 7263 mvpp2_rx_fifo_init(priv); 7264 } else { 7265 mvpp22_rx_fifo_init(priv); 7266 mvpp22_tx_fifo_init(priv); 7267 if (priv->hw_version == MVPP23) 7268 mvpp23_rx_fifo_fc_set_tresh(priv); 7269 } 7270 7271 if (priv->hw_version == MVPP21) 7272 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 7273 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 7274 7275 /* Allow cache snoop when transmiting packets */ 7276 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 7277 7278 /* Buffer Manager initialization */ 7279 err = mvpp2_bm_init(&pdev->dev, priv); 7280 if (err < 0) 7281 return err; 7282 7283 /* Parser default initialization */ 7284 err = mvpp2_prs_default_init(pdev, priv); 7285 if (err < 0) 7286 return err; 7287 7288 /* Classifier default initialization */ 7289 mvpp2_cls_init(priv); 7290 7291 return 0; 7292 } 7293 7294 static int mvpp2_get_sram(struct platform_device *pdev, 7295 struct mvpp2 *priv) 7296 { 7297 struct resource *res; 7298 7299 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 7300 if (!res) { 7301 if (has_acpi_companion(&pdev->dev)) 7302 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n"); 7303 else 7304 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n"); 7305 return 0; 7306 } 7307 7308 priv->cm3_base = devm_ioremap_resource(&pdev->dev, res); 7309 7310 return PTR_ERR_OR_ZERO(priv->cm3_base); 7311 } 7312 7313 static int mvpp2_probe(struct platform_device *pdev) 7314 { 7315 const struct acpi_device_id *acpi_id; 7316 struct fwnode_handle *fwnode = pdev->dev.fwnode; 7317 struct fwnode_handle *port_fwnode; 7318 struct mvpp2 *priv; 7319 struct resource *res; 7320 void __iomem *base; 7321 int i, shared; 7322 int err; 7323 7324 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 7325 if (!priv) 7326 return -ENOMEM; 7327 7328 if (has_acpi_companion(&pdev->dev)) { 7329 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, 7330 &pdev->dev); 7331 if (!acpi_id) 7332 return -EINVAL; 7333 priv->hw_version = (unsigned long)acpi_id->driver_data; 7334 } else { 7335 priv->hw_version = 7336 (unsigned long)of_device_get_match_data(&pdev->dev); 7337 } 7338 7339 /* multi queue mode isn't supported on PPV2.1, fallback to single 7340 * mode 7341 */ 7342 if (priv->hw_version == MVPP21) 7343 queue_mode = MVPP2_QDIST_SINGLE_MODE; 7344 7345 base = devm_platform_ioremap_resource(pdev, 0); 7346 if (IS_ERR(base)) 7347 return PTR_ERR(base); 7348 7349 if (priv->hw_version == MVPP21) { 7350 priv->lms_base = devm_platform_ioremap_resource(pdev, 1); 7351 if (IS_ERR(priv->lms_base)) 7352 return PTR_ERR(priv->lms_base); 7353 } else { 7354 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 7355 if (has_acpi_companion(&pdev->dev)) { 7356 /* In case the MDIO memory region is declared in 7357 * the ACPI, it can already appear as 'in-use' 7358 * in the OS. Because it is overlapped by second 7359 * region of the network controller, make 7360 * sure it is released, before requesting it again. 7361 * The care is taken by mvpp2 driver to avoid 7362 * concurrent access to this memory region. 7363 */ 7364 release_resource(res); 7365 } 7366 priv->iface_base = devm_ioremap_resource(&pdev->dev, res); 7367 if (IS_ERR(priv->iface_base)) 7368 return PTR_ERR(priv->iface_base); 7369 7370 /* Map CM3 SRAM */ 7371 err = mvpp2_get_sram(pdev, priv); 7372 if (err) 7373 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n"); 7374 7375 /* Enable global Flow Control only if handler to SRAM not NULL */ 7376 if (priv->cm3_base) 7377 priv->global_tx_fc = true; 7378 } 7379 7380 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) { 7381 priv->sysctrl_base = 7382 syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 7383 "marvell,system-controller"); 7384 if (IS_ERR(priv->sysctrl_base)) 7385 /* The system controller regmap is optional for dt 7386 * compatibility reasons. When not provided, the 7387 * configuration of the GoP relies on the 7388 * firmware/bootloader. 7389 */ 7390 priv->sysctrl_base = NULL; 7391 } 7392 7393 if (priv->hw_version >= MVPP22 && 7394 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) 7395 priv->percpu_pools = 1; 7396 7397 mvpp2_setup_bm_pool(); 7398 7399 7400 priv->nthreads = min_t(unsigned int, num_present_cpus(), 7401 MVPP2_MAX_THREADS); 7402 7403 shared = num_present_cpus() - priv->nthreads; 7404 if (shared > 0) 7405 bitmap_fill(&priv->lock_map, 7406 min_t(int, shared, MVPP2_MAX_THREADS)); 7407 7408 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7409 u32 addr_space_sz; 7410 7411 addr_space_sz = (priv->hw_version == MVPP21 ? 7412 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); 7413 priv->swth_base[i] = base + i * addr_space_sz; 7414 } 7415 7416 if (priv->hw_version == MVPP21) 7417 priv->max_port_rxqs = 8; 7418 else 7419 priv->max_port_rxqs = 32; 7420 7421 if (dev_of_node(&pdev->dev)) { 7422 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); 7423 if (IS_ERR(priv->pp_clk)) 7424 return PTR_ERR(priv->pp_clk); 7425 err = clk_prepare_enable(priv->pp_clk); 7426 if (err < 0) 7427 return err; 7428 7429 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); 7430 if (IS_ERR(priv->gop_clk)) { 7431 err = PTR_ERR(priv->gop_clk); 7432 goto err_pp_clk; 7433 } 7434 err = clk_prepare_enable(priv->gop_clk); 7435 if (err < 0) 7436 goto err_pp_clk; 7437 7438 if (priv->hw_version >= MVPP22) { 7439 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); 7440 if (IS_ERR(priv->mg_clk)) { 7441 err = PTR_ERR(priv->mg_clk); 7442 goto err_gop_clk; 7443 } 7444 7445 err = clk_prepare_enable(priv->mg_clk); 7446 if (err < 0) 7447 goto err_gop_clk; 7448 7449 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); 7450 if (IS_ERR(priv->mg_core_clk)) { 7451 priv->mg_core_clk = NULL; 7452 } else { 7453 err = clk_prepare_enable(priv->mg_core_clk); 7454 if (err < 0) 7455 goto err_mg_clk; 7456 } 7457 } 7458 7459 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 7460 if (IS_ERR(priv->axi_clk)) { 7461 err = PTR_ERR(priv->axi_clk); 7462 if (err == -EPROBE_DEFER) 7463 goto err_mg_core_clk; 7464 priv->axi_clk = NULL; 7465 } else { 7466 err = clk_prepare_enable(priv->axi_clk); 7467 if (err < 0) 7468 goto err_mg_core_clk; 7469 } 7470 7471 /* Get system's tclk rate */ 7472 priv->tclk = clk_get_rate(priv->pp_clk); 7473 } else if (device_property_read_u32(&pdev->dev, "clock-frequency", 7474 &priv->tclk)) { 7475 dev_err(&pdev->dev, "missing clock-frequency value\n"); 7476 return -EINVAL; 7477 } 7478 7479 if (priv->hw_version >= MVPP22) { 7480 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 7481 if (err) 7482 goto err_axi_clk; 7483 /* Sadly, the BM pools all share the same register to 7484 * store the high 32 bits of their address. So they 7485 * must all have the same high 32 bits, which forces 7486 * us to restrict coherent memory to DMA_BIT_MASK(32). 7487 */ 7488 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7489 if (err) 7490 goto err_axi_clk; 7491 } 7492 7493 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ 7494 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7495 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) 7496 priv->port_map |= BIT(i); 7497 } 7498 7499 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) 7500 priv->hw_version = MVPP23; 7501 7502 /* Init mss lock */ 7503 spin_lock_init(&priv->mss_spinlock); 7504 7505 /* Initialize network controller */ 7506 err = mvpp2_init(pdev, priv); 7507 if (err < 0) { 7508 dev_err(&pdev->dev, "failed to initialize controller\n"); 7509 goto err_axi_clk; 7510 } 7511 7512 err = mvpp22_tai_probe(&pdev->dev, priv); 7513 if (err < 0) 7514 goto err_axi_clk; 7515 7516 /* Initialize ports */ 7517 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7518 err = mvpp2_port_probe(pdev, port_fwnode, priv); 7519 if (err < 0) 7520 goto err_port_probe; 7521 } 7522 7523 if (priv->port_count == 0) { 7524 dev_err(&pdev->dev, "no ports enabled\n"); 7525 err = -ENODEV; 7526 goto err_axi_clk; 7527 } 7528 7529 /* Statistics must be gathered regularly because some of them (like 7530 * packets counters) are 32-bit registers and could overflow quite 7531 * quickly. For instance, a 10Gb link used at full bandwidth with the 7532 * smallest packets (64B) will overflow a 32-bit counter in less than 7533 * 30 seconds. Then, use a workqueue to fill 64-bit counters. 7534 */ 7535 snprintf(priv->queue_name, sizeof(priv->queue_name), 7536 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), 7537 priv->port_count > 1 ? "+" : ""); 7538 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 7539 if (!priv->stats_queue) { 7540 err = -ENOMEM; 7541 goto err_port_probe; 7542 } 7543 7544 if (priv->global_tx_fc && priv->hw_version >= MVPP22) { 7545 err = mvpp2_enable_global_fc(priv); 7546 if (err) 7547 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n"); 7548 } 7549 7550 mvpp2_dbgfs_init(priv, pdev->name); 7551 7552 platform_set_drvdata(pdev, priv); 7553 return 0; 7554 7555 err_port_probe: 7556 i = 0; 7557 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7558 if (priv->port_list[i]) 7559 mvpp2_port_remove(priv->port_list[i]); 7560 i++; 7561 } 7562 err_axi_clk: 7563 clk_disable_unprepare(priv->axi_clk); 7564 7565 err_mg_core_clk: 7566 if (priv->hw_version >= MVPP22) 7567 clk_disable_unprepare(priv->mg_core_clk); 7568 err_mg_clk: 7569 if (priv->hw_version >= MVPP22) 7570 clk_disable_unprepare(priv->mg_clk); 7571 err_gop_clk: 7572 clk_disable_unprepare(priv->gop_clk); 7573 err_pp_clk: 7574 clk_disable_unprepare(priv->pp_clk); 7575 return err; 7576 } 7577 7578 static int mvpp2_remove(struct platform_device *pdev) 7579 { 7580 struct mvpp2 *priv = platform_get_drvdata(pdev); 7581 struct fwnode_handle *fwnode = pdev->dev.fwnode; 7582 int i = 0, poolnum = MVPP2_BM_POOLS_NUM; 7583 struct fwnode_handle *port_fwnode; 7584 7585 mvpp2_dbgfs_cleanup(priv); 7586 7587 fwnode_for_each_available_child_node(fwnode, port_fwnode) { 7588 if (priv->port_list[i]) { 7589 mutex_destroy(&priv->port_list[i]->gather_stats_lock); 7590 mvpp2_port_remove(priv->port_list[i]); 7591 } 7592 i++; 7593 } 7594 7595 destroy_workqueue(priv->stats_queue); 7596 7597 if (priv->percpu_pools) 7598 poolnum = mvpp2_get_nrxqs(priv) * 2; 7599 7600 for (i = 0; i < poolnum; i++) { 7601 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; 7602 7603 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); 7604 } 7605 7606 for (i = 0; i < MVPP2_MAX_THREADS; i++) { 7607 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; 7608 7609 dma_free_coherent(&pdev->dev, 7610 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 7611 aggr_txq->descs, 7612 aggr_txq->descs_dma); 7613 } 7614 7615 if (is_acpi_node(port_fwnode)) 7616 return 0; 7617 7618 clk_disable_unprepare(priv->axi_clk); 7619 clk_disable_unprepare(priv->mg_core_clk); 7620 clk_disable_unprepare(priv->mg_clk); 7621 clk_disable_unprepare(priv->pp_clk); 7622 clk_disable_unprepare(priv->gop_clk); 7623 7624 return 0; 7625 } 7626 7627 static const struct of_device_id mvpp2_match[] = { 7628 { 7629 .compatible = "marvell,armada-375-pp2", 7630 .data = (void *)MVPP21, 7631 }, 7632 { 7633 .compatible = "marvell,armada-7k-pp22", 7634 .data = (void *)MVPP22, 7635 }, 7636 { } 7637 }; 7638 MODULE_DEVICE_TABLE(of, mvpp2_match); 7639 7640 #ifdef CONFIG_ACPI 7641 static const struct acpi_device_id mvpp2_acpi_match[] = { 7642 { "MRVL0110", MVPP22 }, 7643 { }, 7644 }; 7645 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); 7646 #endif 7647 7648 static struct platform_driver mvpp2_driver = { 7649 .probe = mvpp2_probe, 7650 .remove = mvpp2_remove, 7651 .driver = { 7652 .name = MVPP2_DRIVER_NAME, 7653 .of_match_table = mvpp2_match, 7654 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), 7655 }, 7656 }; 7657 7658 module_platform_driver(mvpp2_driver); 7659 7660 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); 7661 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 7662 MODULE_LICENSE("GPL v2"); 7663