1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <linux/bpf.h> 4 #include <linux/filter.h> 5 6 #include "lan966x_main.h" 7 8 static int lan966x_fdma_channel_active(struct lan966x *lan966x) 9 { 10 return lan_rd(lan966x, FDMA_CH_ACTIVE); 11 } 12 13 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, 14 struct lan966x_db *db) 15 { 16 struct page *page; 17 18 page = page_pool_dev_alloc_pages(rx->page_pool); 19 if (unlikely(!page)) 20 return NULL; 21 22 db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; 23 24 return page; 25 } 26 27 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) 28 { 29 int i, j; 30 31 for (i = 0; i < FDMA_DCB_MAX; ++i) { 32 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) 33 page_pool_put_full_page(rx->page_pool, 34 rx->page[i][j], false); 35 } 36 } 37 38 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) 39 { 40 struct page *page; 41 42 page = rx->page[rx->dcb_index][rx->db_index]; 43 if (unlikely(!page)) 44 return; 45 46 page_pool_recycle_direct(rx->page_pool, page); 47 } 48 49 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, 50 struct lan966x_rx_dcb *dcb, 51 u64 nextptr) 52 { 53 struct lan966x_db *db; 54 int i; 55 56 for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { 57 db = &dcb->db[i]; 58 db->status = FDMA_DCB_STATUS_INTR; 59 } 60 61 dcb->nextptr = FDMA_DCB_INVALID_DATA; 62 dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); 63 64 rx->last_entry->nextptr = nextptr; 65 rx->last_entry = dcb; 66 } 67 68 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) 69 { 70 struct lan966x *lan966x = rx->lan966x; 71 struct page_pool_params pp_params = { 72 .order = rx->page_order, 73 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 74 .pool_size = FDMA_DCB_MAX, 75 .nid = NUMA_NO_NODE, 76 .dev = lan966x->dev, 77 .dma_dir = DMA_FROM_DEVICE, 78 .offset = XDP_PACKET_HEADROOM, 79 .max_len = rx->max_mtu - 80 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 81 }; 82 83 if (lan966x_xdp_present(lan966x)) 84 pp_params.dma_dir = DMA_BIDIRECTIONAL; 85 86 rx->page_pool = page_pool_create(&pp_params); 87 88 for (int i = 0; i < lan966x->num_phys_ports; ++i) { 89 struct lan966x_port *port; 90 91 if (!lan966x->ports[i]) 92 continue; 93 94 port = lan966x->ports[i]; 95 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq); 96 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL, 97 rx->page_pool); 98 } 99 100 return PTR_ERR_OR_ZERO(rx->page_pool); 101 } 102 103 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) 104 { 105 struct lan966x *lan966x = rx->lan966x; 106 struct lan966x_rx_dcb *dcb; 107 struct lan966x_db *db; 108 struct page *page; 109 int i, j; 110 int size; 111 112 if (lan966x_fdma_rx_alloc_page_pool(rx)) 113 return PTR_ERR(rx->page_pool); 114 115 /* calculate how many pages are needed to allocate the dcbs */ 116 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; 117 size = ALIGN(size, PAGE_SIZE); 118 119 rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); 120 if (!rx->dcbs) 121 return -ENOMEM; 122 123 rx->last_entry = rx->dcbs; 124 rx->db_index = 0; 125 rx->dcb_index = 0; 126 127 /* Now for each dcb allocate the dbs */ 128 for (i = 0; i < FDMA_DCB_MAX; ++i) { 129 dcb = &rx->dcbs[i]; 130 dcb->info = 0; 131 132 /* For each db allocate a page and map it to the DB dataptr. */ 133 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { 134 db = &dcb->db[j]; 135 page = lan966x_fdma_rx_alloc_page(rx, db); 136 if (!page) 137 return -ENOMEM; 138 139 db->status = 0; 140 rx->page[i][j] = page; 141 } 142 143 lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); 144 } 145 146 return 0; 147 } 148 149 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx) 150 { 151 rx->dcb_index++; 152 rx->dcb_index &= FDMA_DCB_MAX - 1; 153 } 154 155 static void lan966x_fdma_rx_free(struct lan966x_rx *rx) 156 { 157 struct lan966x *lan966x = rx->lan966x; 158 u32 size; 159 160 /* Now it is possible to do the cleanup of dcb */ 161 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 162 size = ALIGN(size, PAGE_SIZE); 163 dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); 164 } 165 166 static void lan966x_fdma_rx_start(struct lan966x_rx *rx) 167 { 168 struct lan966x *lan966x = rx->lan966x; 169 u32 mask; 170 171 /* When activating a channel, first is required to write the first DCB 172 * address and then to activate it 173 */ 174 lan_wr(lower_32_bits((u64)rx->dma), lan966x, 175 FDMA_DCB_LLP(rx->channel_id)); 176 lan_wr(upper_32_bits((u64)rx->dma), lan966x, 177 FDMA_DCB_LLP1(rx->channel_id)); 178 179 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | 180 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 181 FDMA_CH_CFG_CH_INJ_PORT_SET(0) | 182 FDMA_CH_CFG_CH_MEM_SET(1), 183 lan966x, FDMA_CH_CFG(rx->channel_id)); 184 185 /* Start fdma */ 186 lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), 187 FDMA_PORT_CTRL_XTR_STOP, 188 lan966x, FDMA_PORT_CTRL(0)); 189 190 /* Enable interrupts */ 191 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); 192 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); 193 mask |= BIT(rx->channel_id); 194 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), 195 FDMA_INTR_DB_ENA_INTR_DB_ENA, 196 lan966x, FDMA_INTR_DB_ENA); 197 198 /* Activate the channel */ 199 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), 200 FDMA_CH_ACTIVATE_CH_ACTIVATE, 201 lan966x, FDMA_CH_ACTIVATE); 202 } 203 204 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) 205 { 206 struct lan966x *lan966x = rx->lan966x; 207 u32 val; 208 209 /* Disable the channel */ 210 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), 211 FDMA_CH_DISABLE_CH_DISABLE, 212 lan966x, FDMA_CH_DISABLE); 213 214 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, 215 val, !(val & BIT(rx->channel_id)), 216 READL_SLEEP_US, READL_TIMEOUT_US); 217 218 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), 219 FDMA_CH_DB_DISCARD_DB_DISCARD, 220 lan966x, FDMA_CH_DB_DISCARD); 221 } 222 223 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) 224 { 225 struct lan966x *lan966x = rx->lan966x; 226 227 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), 228 FDMA_CH_RELOAD_CH_RELOAD, 229 lan966x, FDMA_CH_RELOAD); 230 } 231 232 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, 233 struct lan966x_tx_dcb *dcb) 234 { 235 dcb->nextptr = FDMA_DCB_INVALID_DATA; 236 dcb->info = 0; 237 } 238 239 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) 240 { 241 struct lan966x *lan966x = tx->lan966x; 242 struct lan966x_tx_dcb *dcb; 243 struct lan966x_db *db; 244 int size; 245 int i, j; 246 247 tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), 248 GFP_KERNEL); 249 if (!tx->dcbs_buf) 250 return -ENOMEM; 251 252 /* calculate how many pages are needed to allocate the dcbs */ 253 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 254 size = ALIGN(size, PAGE_SIZE); 255 tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); 256 if (!tx->dcbs) 257 goto out; 258 259 /* Now for each dcb allocate the db */ 260 for (i = 0; i < FDMA_DCB_MAX; ++i) { 261 dcb = &tx->dcbs[i]; 262 263 for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { 264 db = &dcb->db[j]; 265 db->dataptr = 0; 266 db->status = 0; 267 } 268 269 lan966x_fdma_tx_add_dcb(tx, dcb); 270 } 271 272 return 0; 273 274 out: 275 kfree(tx->dcbs_buf); 276 return -ENOMEM; 277 } 278 279 static void lan966x_fdma_tx_free(struct lan966x_tx *tx) 280 { 281 struct lan966x *lan966x = tx->lan966x; 282 int size; 283 284 kfree(tx->dcbs_buf); 285 286 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; 287 size = ALIGN(size, PAGE_SIZE); 288 dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); 289 } 290 291 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) 292 { 293 struct lan966x *lan966x = tx->lan966x; 294 u32 mask; 295 296 /* When activating a channel, first is required to write the first DCB 297 * address and then to activate it 298 */ 299 lan_wr(lower_32_bits((u64)tx->dma), lan966x, 300 FDMA_DCB_LLP(tx->channel_id)); 301 lan_wr(upper_32_bits((u64)tx->dma), lan966x, 302 FDMA_DCB_LLP1(tx->channel_id)); 303 304 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | 305 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 306 FDMA_CH_CFG_CH_INJ_PORT_SET(0) | 307 FDMA_CH_CFG_CH_MEM_SET(1), 308 lan966x, FDMA_CH_CFG(tx->channel_id)); 309 310 /* Start fdma */ 311 lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), 312 FDMA_PORT_CTRL_INJ_STOP, 313 lan966x, FDMA_PORT_CTRL(0)); 314 315 /* Enable interrupts */ 316 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); 317 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); 318 mask |= BIT(tx->channel_id); 319 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), 320 FDMA_INTR_DB_ENA_INTR_DB_ENA, 321 lan966x, FDMA_INTR_DB_ENA); 322 323 /* Activate the channel */ 324 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), 325 FDMA_CH_ACTIVATE_CH_ACTIVATE, 326 lan966x, FDMA_CH_ACTIVATE); 327 } 328 329 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) 330 { 331 struct lan966x *lan966x = tx->lan966x; 332 u32 val; 333 334 /* Disable the channel */ 335 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), 336 FDMA_CH_DISABLE_CH_DISABLE, 337 lan966x, FDMA_CH_DISABLE); 338 339 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, 340 val, !(val & BIT(tx->channel_id)), 341 READL_SLEEP_US, READL_TIMEOUT_US); 342 343 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), 344 FDMA_CH_DB_DISCARD_DB_DISCARD, 345 lan966x, FDMA_CH_DB_DISCARD); 346 347 tx->activated = false; 348 tx->last_in_use = -1; 349 } 350 351 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) 352 { 353 struct lan966x *lan966x = tx->lan966x; 354 355 /* Write the registers to reload the channel */ 356 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), 357 FDMA_CH_RELOAD_CH_RELOAD, 358 lan966x, FDMA_CH_RELOAD); 359 } 360 361 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) 362 { 363 struct lan966x_port *port; 364 int i; 365 366 for (i = 0; i < lan966x->num_phys_ports; ++i) { 367 port = lan966x->ports[i]; 368 if (!port) 369 continue; 370 371 if (netif_queue_stopped(port->dev)) 372 netif_wake_queue(port->dev); 373 } 374 } 375 376 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) 377 { 378 struct lan966x_port *port; 379 int i; 380 381 for (i = 0; i < lan966x->num_phys_ports; ++i) { 382 port = lan966x->ports[i]; 383 if (!port) 384 continue; 385 386 netif_stop_queue(port->dev); 387 } 388 } 389 390 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) 391 { 392 struct lan966x_tx *tx = &lan966x->tx; 393 struct lan966x_rx *rx = &lan966x->rx; 394 struct lan966x_tx_dcb_buf *dcb_buf; 395 struct xdp_frame_bulk bq; 396 struct lan966x_db *db; 397 unsigned long flags; 398 bool clear = false; 399 int i; 400 401 xdp_frame_bulk_init(&bq); 402 403 spin_lock_irqsave(&lan966x->tx_lock, flags); 404 for (i = 0; i < FDMA_DCB_MAX; ++i) { 405 dcb_buf = &tx->dcbs_buf[i]; 406 407 if (!dcb_buf->used) 408 continue; 409 410 db = &tx->dcbs[i].db[0]; 411 if (!(db->status & FDMA_DCB_STATUS_DONE)) 412 continue; 413 414 dcb_buf->dev->stats.tx_packets++; 415 dcb_buf->dev->stats.tx_bytes += dcb_buf->len; 416 417 dcb_buf->used = false; 418 if (dcb_buf->use_skb) { 419 dma_unmap_single(lan966x->dev, 420 dcb_buf->dma_addr, 421 dcb_buf->len, 422 DMA_TO_DEVICE); 423 424 if (!dcb_buf->ptp) 425 napi_consume_skb(dcb_buf->data.skb, weight); 426 } else { 427 if (dcb_buf->xdp_ndo) 428 dma_unmap_single(lan966x->dev, 429 dcb_buf->dma_addr, 430 dcb_buf->len, 431 DMA_TO_DEVICE); 432 433 if (dcb_buf->xdp_ndo) 434 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq); 435 else 436 page_pool_recycle_direct(rx->page_pool, 437 dcb_buf->data.page); 438 } 439 440 clear = true; 441 } 442 443 xdp_flush_frame_bulk(&bq); 444 445 if (clear) 446 lan966x_fdma_wakeup_netdev(lan966x); 447 448 spin_unlock_irqrestore(&lan966x->tx_lock, flags); 449 } 450 451 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) 452 { 453 struct lan966x_db *db; 454 455 /* Check if there is any data */ 456 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 457 if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) 458 return false; 459 460 return true; 461 } 462 463 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) 464 { 465 struct lan966x *lan966x = rx->lan966x; 466 struct lan966x_port *port; 467 struct lan966x_db *db; 468 struct page *page; 469 470 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 471 page = rx->page[rx->dcb_index][rx->db_index]; 472 if (unlikely(!page)) 473 return FDMA_ERROR; 474 475 dma_sync_single_for_cpu(lan966x->dev, 476 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM, 477 FDMA_DCB_STATUS_BLOCKL(db->status), 478 DMA_FROM_DEVICE); 479 480 lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM, 481 src_port); 482 if (WARN_ON(*src_port >= lan966x->num_phys_ports)) 483 return FDMA_ERROR; 484 485 port = lan966x->ports[*src_port]; 486 if (!lan966x_xdp_port_present(port)) 487 return FDMA_PASS; 488 489 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status)); 490 } 491 492 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, 493 u64 src_port) 494 { 495 struct lan966x *lan966x = rx->lan966x; 496 struct lan966x_db *db; 497 struct sk_buff *skb; 498 struct page *page; 499 u64 timestamp; 500 501 /* Get the received frame and unmap it */ 502 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 503 page = rx->page[rx->dcb_index][rx->db_index]; 504 505 skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); 506 if (unlikely(!skb)) 507 goto free_page; 508 509 skb_mark_for_recycle(skb); 510 511 skb_reserve(skb, XDP_PACKET_HEADROOM); 512 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); 513 514 lan966x_ifh_get_timestamp(skb->data, ×tamp); 515 516 skb->dev = lan966x->ports[src_port]->dev; 517 skb_pull(skb, IFH_LEN_BYTES); 518 519 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) 520 skb_trim(skb, skb->len - ETH_FCS_LEN); 521 522 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp); 523 skb->protocol = eth_type_trans(skb, skb->dev); 524 525 if (lan966x->bridge_mask & BIT(src_port)) { 526 skb->offload_fwd_mark = 1; 527 528 skb_reset_network_header(skb); 529 if (!lan966x_hw_offload(lan966x, src_port, skb)) 530 skb->offload_fwd_mark = 0; 531 } 532 533 skb->dev->stats.rx_bytes += skb->len; 534 skb->dev->stats.rx_packets++; 535 536 return skb; 537 538 free_page: 539 page_pool_recycle_direct(rx->page_pool, page); 540 541 return NULL; 542 } 543 544 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) 545 { 546 struct lan966x *lan966x = container_of(napi, struct lan966x, napi); 547 struct lan966x_rx *rx = &lan966x->rx; 548 int dcb_reload = rx->dcb_index; 549 struct lan966x_rx_dcb *old_dcb; 550 struct lan966x_db *db; 551 bool redirect = false; 552 struct sk_buff *skb; 553 struct page *page; 554 int counter = 0; 555 u64 src_port; 556 u64 nextptr; 557 558 lan966x_fdma_tx_clear_buf(lan966x, weight); 559 560 /* Get all received skb */ 561 while (counter < weight) { 562 if (!lan966x_fdma_rx_more_frames(rx)) 563 break; 564 565 counter++; 566 567 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) { 568 case FDMA_PASS: 569 break; 570 case FDMA_ERROR: 571 lan966x_fdma_rx_free_page(rx); 572 lan966x_fdma_rx_advance_dcb(rx); 573 goto allocate_new; 574 case FDMA_REDIRECT: 575 redirect = true; 576 fallthrough; 577 case FDMA_TX: 578 lan966x_fdma_rx_advance_dcb(rx); 579 continue; 580 case FDMA_DROP: 581 lan966x_fdma_rx_free_page(rx); 582 lan966x_fdma_rx_advance_dcb(rx); 583 continue; 584 } 585 586 skb = lan966x_fdma_rx_get_frame(rx, src_port); 587 lan966x_fdma_rx_advance_dcb(rx); 588 if (!skb) 589 goto allocate_new; 590 591 napi_gro_receive(&lan966x->napi, skb); 592 } 593 594 allocate_new: 595 /* Allocate new pages and map them */ 596 while (dcb_reload != rx->dcb_index) { 597 db = &rx->dcbs[dcb_reload].db[rx->db_index]; 598 page = lan966x_fdma_rx_alloc_page(rx, db); 599 if (unlikely(!page)) 600 break; 601 rx->page[dcb_reload][rx->db_index] = page; 602 603 old_dcb = &rx->dcbs[dcb_reload]; 604 dcb_reload++; 605 dcb_reload &= FDMA_DCB_MAX - 1; 606 607 nextptr = rx->dma + ((unsigned long)old_dcb - 608 (unsigned long)rx->dcbs); 609 lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); 610 lan966x_fdma_rx_reload(rx); 611 } 612 613 if (redirect) 614 xdp_do_flush(); 615 616 if (counter < weight && napi_complete_done(napi, counter)) 617 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); 618 619 return counter; 620 } 621 622 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) 623 { 624 struct lan966x *lan966x = args; 625 u32 db, err, err_type; 626 627 db = lan_rd(lan966x, FDMA_INTR_DB); 628 err = lan_rd(lan966x, FDMA_INTR_ERR); 629 630 if (db) { 631 lan_wr(0, lan966x, FDMA_INTR_DB_ENA); 632 lan_wr(db, lan966x, FDMA_INTR_DB); 633 634 napi_schedule(&lan966x->napi); 635 } 636 637 if (err) { 638 err_type = lan_rd(lan966x, FDMA_ERRORS); 639 640 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); 641 642 lan_wr(err, lan966x, FDMA_INTR_ERR); 643 lan_wr(err_type, lan966x, FDMA_ERRORS); 644 } 645 646 return IRQ_HANDLED; 647 } 648 649 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) 650 { 651 struct lan966x_tx_dcb_buf *dcb_buf; 652 int i; 653 654 for (i = 0; i < FDMA_DCB_MAX; ++i) { 655 dcb_buf = &tx->dcbs_buf[i]; 656 if (!dcb_buf->used && i != tx->last_in_use) 657 return i; 658 } 659 660 return -1; 661 } 662 663 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, 664 int next_to_use, int len, 665 dma_addr_t dma_addr) 666 { 667 struct lan966x_tx_dcb *next_dcb; 668 struct lan966x_db *next_db; 669 670 next_dcb = &tx->dcbs[next_to_use]; 671 next_dcb->nextptr = FDMA_DCB_INVALID_DATA; 672 673 next_db = &next_dcb->db[0]; 674 next_db->dataptr = dma_addr; 675 next_db->status = FDMA_DCB_STATUS_SOF | 676 FDMA_DCB_STATUS_EOF | 677 FDMA_DCB_STATUS_INTR | 678 FDMA_DCB_STATUS_BLOCKO(0) | 679 FDMA_DCB_STATUS_BLOCKL(len); 680 } 681 682 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) 683 { 684 struct lan966x *lan966x = tx->lan966x; 685 struct lan966x_tx_dcb *dcb; 686 687 if (likely(lan966x->tx.activated)) { 688 /* Connect current dcb to the next db */ 689 dcb = &tx->dcbs[tx->last_in_use]; 690 dcb->nextptr = tx->dma + (next_to_use * 691 sizeof(struct lan966x_tx_dcb)); 692 693 lan966x_fdma_tx_reload(tx); 694 } else { 695 /* Because it is first time, then just activate */ 696 lan966x->tx.activated = true; 697 lan966x_fdma_tx_activate(tx); 698 } 699 700 /* Move to next dcb because this last in use */ 701 tx->last_in_use = next_to_use; 702 } 703 704 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) 705 { 706 struct lan966x *lan966x = port->lan966x; 707 struct lan966x_tx_dcb_buf *next_dcb_buf; 708 struct lan966x_tx *tx = &lan966x->tx; 709 struct xdp_frame *xdpf; 710 dma_addr_t dma_addr; 711 struct page *page; 712 int next_to_use; 713 __be32 *ifh; 714 int ret = 0; 715 716 spin_lock(&lan966x->tx_lock); 717 718 /* Get next index */ 719 next_to_use = lan966x_fdma_get_next_dcb(tx); 720 if (next_to_use < 0) { 721 netif_stop_queue(port->dev); 722 ret = NETDEV_TX_BUSY; 723 goto out; 724 } 725 726 /* Get the next buffer */ 727 next_dcb_buf = &tx->dcbs_buf[next_to_use]; 728 729 /* Generate new IFH */ 730 if (!len) { 731 xdpf = ptr; 732 733 if (xdpf->headroom < IFH_LEN_BYTES) { 734 ret = NETDEV_TX_OK; 735 goto out; 736 } 737 738 ifh = xdpf->data - IFH_LEN_BYTES; 739 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); 740 lan966x_ifh_set_bypass(ifh, 1); 741 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); 742 743 dma_addr = dma_map_single(lan966x->dev, 744 xdpf->data - IFH_LEN_BYTES, 745 xdpf->len + IFH_LEN_BYTES, 746 DMA_TO_DEVICE); 747 if (dma_mapping_error(lan966x->dev, dma_addr)) { 748 ret = NETDEV_TX_OK; 749 goto out; 750 } 751 752 next_dcb_buf->data.xdpf = xdpf; 753 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; 754 755 /* Setup next dcb */ 756 lan966x_fdma_tx_setup_dcb(tx, next_to_use, 757 xdpf->len + IFH_LEN_BYTES, 758 dma_addr); 759 } else { 760 page = ptr; 761 762 ifh = page_address(page) + XDP_PACKET_HEADROOM; 763 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); 764 lan966x_ifh_set_bypass(ifh, 1); 765 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); 766 767 dma_addr = page_pool_get_dma_addr(page); 768 dma_sync_single_for_device(lan966x->dev, 769 dma_addr + XDP_PACKET_HEADROOM, 770 len + IFH_LEN_BYTES, 771 DMA_TO_DEVICE); 772 773 next_dcb_buf->data.page = page; 774 next_dcb_buf->len = len + IFH_LEN_BYTES; 775 776 /* Setup next dcb */ 777 lan966x_fdma_tx_setup_dcb(tx, next_to_use, 778 len + IFH_LEN_BYTES, 779 dma_addr + XDP_PACKET_HEADROOM); 780 } 781 782 /* Fill up the buffer */ 783 next_dcb_buf->use_skb = false; 784 next_dcb_buf->xdp_ndo = !len; 785 next_dcb_buf->dma_addr = dma_addr; 786 next_dcb_buf->used = true; 787 next_dcb_buf->ptp = false; 788 next_dcb_buf->dev = port->dev; 789 790 /* Start the transmission */ 791 lan966x_fdma_tx_start(tx, next_to_use); 792 793 out: 794 spin_unlock(&lan966x->tx_lock); 795 796 return ret; 797 } 798 799 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) 800 { 801 struct lan966x_port *port = netdev_priv(dev); 802 struct lan966x *lan966x = port->lan966x; 803 struct lan966x_tx_dcb_buf *next_dcb_buf; 804 struct lan966x_tx *tx = &lan966x->tx; 805 int needed_headroom; 806 int needed_tailroom; 807 dma_addr_t dma_addr; 808 int next_to_use; 809 int err; 810 811 /* Get next index */ 812 next_to_use = lan966x_fdma_get_next_dcb(tx); 813 if (next_to_use < 0) { 814 netif_stop_queue(dev); 815 return NETDEV_TX_BUSY; 816 } 817 818 if (skb_put_padto(skb, ETH_ZLEN)) { 819 dev->stats.tx_dropped++; 820 return NETDEV_TX_OK; 821 } 822 823 /* skb processing */ 824 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0); 825 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); 826 if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { 827 err = pskb_expand_head(skb, needed_headroom, needed_tailroom, 828 GFP_ATOMIC); 829 if (unlikely(err)) { 830 dev->stats.tx_dropped++; 831 err = NETDEV_TX_OK; 832 goto release; 833 } 834 } 835 836 skb_tx_timestamp(skb); 837 skb_push(skb, IFH_LEN_BYTES); 838 memcpy(skb->data, ifh, IFH_LEN_BYTES); 839 skb_put(skb, 4); 840 841 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, 842 DMA_TO_DEVICE); 843 if (dma_mapping_error(lan966x->dev, dma_addr)) { 844 dev->stats.tx_dropped++; 845 err = NETDEV_TX_OK; 846 goto release; 847 } 848 849 /* Setup next dcb */ 850 lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr); 851 852 /* Fill up the buffer */ 853 next_dcb_buf = &tx->dcbs_buf[next_to_use]; 854 next_dcb_buf->use_skb = true; 855 next_dcb_buf->data.skb = skb; 856 next_dcb_buf->xdp_ndo = false; 857 next_dcb_buf->len = skb->len; 858 next_dcb_buf->dma_addr = dma_addr; 859 next_dcb_buf->used = true; 860 next_dcb_buf->ptp = false; 861 next_dcb_buf->dev = dev; 862 863 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 864 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 865 next_dcb_buf->ptp = true; 866 867 /* Start the transmission */ 868 lan966x_fdma_tx_start(tx, next_to_use); 869 870 return NETDEV_TX_OK; 871 872 release: 873 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 874 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) 875 lan966x_ptp_txtstamp_release(port, skb); 876 877 dev_kfree_skb_any(skb); 878 return err; 879 } 880 881 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) 882 { 883 int max_mtu = 0; 884 int i; 885 886 for (i = 0; i < lan966x->num_phys_ports; ++i) { 887 struct lan966x_port *port; 888 int mtu; 889 890 port = lan966x->ports[i]; 891 if (!port) 892 continue; 893 894 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 895 if (mtu > max_mtu) 896 max_mtu = mtu; 897 } 898 899 return max_mtu; 900 } 901 902 static int lan966x_qsys_sw_status(struct lan966x *lan966x) 903 { 904 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); 905 } 906 907 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) 908 { 909 struct page_pool *page_pool; 910 dma_addr_t rx_dma; 911 void *rx_dcbs; 912 u32 size; 913 int err; 914 915 /* Store these for later to free them */ 916 rx_dma = lan966x->rx.dma; 917 rx_dcbs = lan966x->rx.dcbs; 918 page_pool = lan966x->rx.page_pool; 919 920 napi_synchronize(&lan966x->napi); 921 napi_disable(&lan966x->napi); 922 lan966x_fdma_stop_netdev(lan966x); 923 924 lan966x_fdma_rx_disable(&lan966x->rx); 925 lan966x_fdma_rx_free_pages(&lan966x->rx); 926 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; 927 lan966x->rx.max_mtu = new_mtu; 928 err = lan966x_fdma_rx_alloc(&lan966x->rx); 929 if (err) 930 goto restore; 931 lan966x_fdma_rx_start(&lan966x->rx); 932 933 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; 934 size = ALIGN(size, PAGE_SIZE); 935 dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); 936 937 page_pool_destroy(page_pool); 938 939 lan966x_fdma_wakeup_netdev(lan966x); 940 napi_enable(&lan966x->napi); 941 942 return err; 943 restore: 944 lan966x->rx.page_pool = page_pool; 945 lan966x->rx.dma = rx_dma; 946 lan966x->rx.dcbs = rx_dcbs; 947 lan966x_fdma_rx_start(&lan966x->rx); 948 949 return err; 950 } 951 952 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x) 953 { 954 return lan966x_fdma_get_max_mtu(lan966x) + 955 IFH_LEN_BYTES + 956 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 957 VLAN_HLEN * 2 + 958 XDP_PACKET_HEADROOM; 959 } 960 961 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu) 962 { 963 int err; 964 u32 val; 965 966 /* Disable the CPU port */ 967 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), 968 QSYS_SW_PORT_MODE_PORT_ENA, 969 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); 970 971 /* Flush the CPU queues */ 972 readx_poll_timeout(lan966x_qsys_sw_status, lan966x, 973 val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), 974 READL_SLEEP_US, READL_TIMEOUT_US); 975 976 /* Add a sleep in case there are frames between the queues and the CPU 977 * port 978 */ 979 usleep_range(1000, 2000); 980 981 err = lan966x_fdma_reload(lan966x, max_mtu); 982 983 /* Enable back the CPU port */ 984 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), 985 QSYS_SW_PORT_MODE_PORT_ENA, 986 lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); 987 988 return err; 989 } 990 991 int lan966x_fdma_change_mtu(struct lan966x *lan966x) 992 { 993 int max_mtu; 994 995 max_mtu = lan966x_fdma_get_max_frame(lan966x); 996 if (max_mtu == lan966x->rx.max_mtu) 997 return 0; 998 999 return __lan966x_fdma_reload(lan966x, max_mtu); 1000 } 1001 1002 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x) 1003 { 1004 int max_mtu; 1005 1006 max_mtu = lan966x_fdma_get_max_frame(lan966x); 1007 return __lan966x_fdma_reload(lan966x, max_mtu); 1008 } 1009 1010 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) 1011 { 1012 if (lan966x->fdma_ndev) 1013 return; 1014 1015 lan966x->fdma_ndev = dev; 1016 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll); 1017 napi_enable(&lan966x->napi); 1018 } 1019 1020 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) 1021 { 1022 if (lan966x->fdma_ndev == dev) { 1023 netif_napi_del(&lan966x->napi); 1024 lan966x->fdma_ndev = NULL; 1025 } 1026 } 1027 1028 int lan966x_fdma_init(struct lan966x *lan966x) 1029 { 1030 int err; 1031 1032 if (!lan966x->fdma) 1033 return 0; 1034 1035 lan966x->rx.lan966x = lan966x; 1036 lan966x->rx.channel_id = FDMA_XTR_CHANNEL; 1037 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); 1038 lan966x->tx.lan966x = lan966x; 1039 lan966x->tx.channel_id = FDMA_INJ_CHANNEL; 1040 lan966x->tx.last_in_use = -1; 1041 1042 err = lan966x_fdma_rx_alloc(&lan966x->rx); 1043 if (err) 1044 return err; 1045 1046 err = lan966x_fdma_tx_alloc(&lan966x->tx); 1047 if (err) { 1048 lan966x_fdma_rx_free(&lan966x->rx); 1049 return err; 1050 } 1051 1052 lan966x_fdma_rx_start(&lan966x->rx); 1053 1054 return 0; 1055 } 1056 1057 void lan966x_fdma_deinit(struct lan966x *lan966x) 1058 { 1059 if (!lan966x->fdma) 1060 return; 1061 1062 lan966x_fdma_rx_disable(&lan966x->rx); 1063 lan966x_fdma_tx_disable(&lan966x->tx); 1064 1065 napi_synchronize(&lan966x->napi); 1066 napi_disable(&lan966x->napi); 1067 1068 lan966x_fdma_rx_free_pages(&lan966x->rx); 1069 lan966x_fdma_rx_free(&lan966x->rx); 1070 page_pool_destroy(lan966x->rx.page_pool); 1071 lan966x_fdma_tx_free(&lan966x->tx); 1072 } 1073