1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <net/ip.h> 10 #include <net/tso.h> 11 #include <linux/bpf.h> 12 #include <linux/bpf_trace.h> 13 14 #include "otx2_reg.h" 15 #include "otx2_common.h" 16 #include "otx2_struct.h" 17 #include "otx2_txrx.h" 18 #include "otx2_ptp.h" 19 #include "cn10k.h" 20 21 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) 22 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, 23 struct bpf_prog *prog, 24 struct nix_cqe_rx_s *cqe, 25 struct otx2_cq_queue *cq); 26 27 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, 28 struct otx2_cq_queue *cq) 29 { 30 u64 incr = (u64)(cq->cq_idx) << 32; 31 u64 status; 32 33 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); 34 35 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || 36 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { 37 dev_err(pfvf->dev, "CQ stopped due to error"); 38 return -EINVAL; 39 } 40 41 cq->cq_tail = status & 0xFFFFF; 42 cq->cq_head = (status >> 20) & 0xFFFFF; 43 if (cq->cq_tail < cq->cq_head) 44 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + 45 cq->cq_tail; 46 else 47 cq->pend_cqe = cq->cq_tail - cq->cq_head; 48 49 return 0; 50 } 51 52 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) 53 { 54 struct nix_cqe_hdr_s *cqe_hdr; 55 56 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); 57 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) 58 return NULL; 59 60 cq->cq_head++; 61 cq->cq_head &= (cq->cqe_cnt - 1); 62 63 return cqe_hdr; 64 } 65 66 static unsigned int frag_num(unsigned int i) 67 { 68 #ifdef __BIG_ENDIAN 69 return (i & ~3) + 3 - (i & 3); 70 #else 71 return i; 72 #endif 73 } 74 75 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 76 struct sk_buff *skb, int seg, int *len) 77 { 78 const skb_frag_t *frag; 79 struct page *page; 80 int offset; 81 82 /* First segment is always skb->data */ 83 if (!seg) { 84 page = virt_to_page(skb->data); 85 offset = offset_in_page(skb->data); 86 *len = skb_headlen(skb); 87 } else { 88 frag = &skb_shinfo(skb)->frags[seg - 1]; 89 page = skb_frag_page(frag); 90 offset = skb_frag_off(frag); 91 *len = skb_frag_size(frag); 92 } 93 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); 94 } 95 96 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) 97 { 98 int seg; 99 100 for (seg = 0; seg < sg->num_segs; seg++) { 101 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], 102 sg->size[seg], DMA_TO_DEVICE); 103 } 104 sg->num_segs = 0; 105 } 106 107 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, 108 struct otx2_snd_queue *sq, 109 struct nix_cqe_tx_s *cqe) 110 { 111 struct nix_send_comp_s *snd_comp = &cqe->comp; 112 struct sg_list *sg; 113 struct page *page; 114 u64 pa; 115 116 sg = &sq->sg[snd_comp->sqe_id]; 117 118 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); 119 otx2_dma_unmap_page(pfvf, sg->dma_addr[0], 120 sg->size[0], DMA_TO_DEVICE); 121 page = virt_to_page(phys_to_virt(pa)); 122 put_page(page); 123 } 124 125 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, 126 struct otx2_cq_queue *cq, 127 struct otx2_snd_queue *sq, 128 struct nix_cqe_tx_s *cqe, 129 int budget, int *tx_pkts, int *tx_bytes) 130 { 131 struct nix_send_comp_s *snd_comp = &cqe->comp; 132 struct skb_shared_hwtstamps ts; 133 struct sk_buff *skb = NULL; 134 u64 timestamp, tsns; 135 struct sg_list *sg; 136 int err; 137 138 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) 139 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n", 140 pfvf->netdev->name, cq->cint_idx, 141 snd_comp->status); 142 143 sg = &sq->sg[snd_comp->sqe_id]; 144 skb = (struct sk_buff *)sg->skb; 145 if (unlikely(!skb)) 146 return; 147 148 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 149 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; 150 if (timestamp != 1) { 151 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); 152 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); 153 if (!err) { 154 memset(&ts, 0, sizeof(ts)); 155 ts.hwtstamp = ns_to_ktime(tsns); 156 skb_tstamp_tx(skb, &ts); 157 } 158 } 159 } 160 161 *tx_bytes += skb->len; 162 (*tx_pkts)++; 163 otx2_dma_unmap_skb_frags(pfvf, sg); 164 napi_consume_skb(skb, budget); 165 sg->skb = (u64)NULL; 166 } 167 168 static void otx2_set_rxtstamp(struct otx2_nic *pfvf, 169 struct sk_buff *skb, void *data) 170 { 171 u64 timestamp, tsns; 172 int err; 173 174 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) 175 return; 176 177 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); 178 /* The first 8 bytes is the timestamp */ 179 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); 180 if (err) 181 return; 182 183 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); 184 } 185 186 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, 187 u64 iova, int len, struct nix_rx_parse_s *parse, 188 int qidx) 189 { 190 struct page *page; 191 int off = 0; 192 void *va; 193 194 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); 195 196 if (likely(!skb_shinfo(skb)->nr_frags)) { 197 /* Check if data starts at some nonzero offset 198 * from the start of the buffer. For now the 199 * only possible offset is 8 bytes in the case 200 * where packet is prepended by a timestamp. 201 */ 202 if (parse->laptr) { 203 otx2_set_rxtstamp(pfvf, skb, va); 204 off = OTX2_HW_TIMESTAMP_LEN; 205 } 206 } 207 208 page = virt_to_page(va); 209 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { 210 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 211 va - page_address(page) + off, 212 len - off, pfvf->rbsize); 213 214 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, 215 pfvf->rbsize, DMA_FROM_DEVICE); 216 return true; 217 } 218 219 /* If more than MAX_SKB_FRAGS fragments are received then 220 * give back those buffer pointers to hardware for reuse. 221 */ 222 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); 223 224 return false; 225 } 226 227 static void otx2_set_rxhash(struct otx2_nic *pfvf, 228 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) 229 { 230 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; 231 struct otx2_rss_info *rss; 232 u32 hash = 0; 233 234 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) 235 return; 236 237 rss = &pfvf->hw.rss_info; 238 if (rss->flowkey_cfg) { 239 if (rss->flowkey_cfg & 240 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)) 241 hash_type = PKT_HASH_TYPE_L4; 242 else 243 hash_type = PKT_HASH_TYPE_L3; 244 hash = cqe->hdr.flow_tag; 245 } 246 skb_set_hash(skb, hash, hash_type); 247 } 248 249 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, 250 int qidx) 251 { 252 struct nix_rx_sg_s *sg = &cqe->sg; 253 void *end, *start; 254 u64 *seg_addr; 255 int seg; 256 257 start = (void *)sg; 258 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); 259 while (start < end) { 260 sg = (struct nix_rx_sg_s *)start; 261 seg_addr = &sg->seg_addr; 262 for (seg = 0; seg < sg->segs; seg++, seg_addr++) 263 pfvf->hw_ops->aura_freeptr(pfvf, qidx, 264 *seg_addr & ~0x07ULL); 265 start += sizeof(*sg); 266 } 267 } 268 269 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, 270 struct nix_cqe_rx_s *cqe, int qidx) 271 { 272 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; 273 struct nix_rx_parse_s *parse = &cqe->parse; 274 275 if (netif_msg_rx_err(pfvf)) 276 netdev_err(pfvf->netdev, 277 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n", 278 qidx, parse->errlev, parse->errcode); 279 280 if (parse->errlev == NPC_ERRLVL_RE) { 281 switch (parse->errcode) { 282 case ERRCODE_FCS: 283 case ERRCODE_FCS_RCV: 284 atomic_inc(&stats->rx_fcs_errs); 285 break; 286 case ERRCODE_UNDERSIZE: 287 atomic_inc(&stats->rx_undersize_errs); 288 break; 289 case ERRCODE_OVERSIZE: 290 atomic_inc(&stats->rx_oversize_errs); 291 break; 292 case ERRCODE_OL2_LEN_MISMATCH: 293 atomic_inc(&stats->rx_len_errs); 294 break; 295 default: 296 atomic_inc(&stats->rx_other_errs); 297 break; 298 } 299 } else if (parse->errlev == NPC_ERRLVL_NIX) { 300 switch (parse->errcode) { 301 case ERRCODE_OL3_LEN: 302 case ERRCODE_OL4_LEN: 303 case ERRCODE_IL3_LEN: 304 case ERRCODE_IL4_LEN: 305 atomic_inc(&stats->rx_len_errs); 306 break; 307 case ERRCODE_OL4_CSUM: 308 case ERRCODE_IL4_CSUM: 309 atomic_inc(&stats->rx_csum_errs); 310 break; 311 default: 312 atomic_inc(&stats->rx_other_errs); 313 break; 314 } 315 } else { 316 atomic_inc(&stats->rx_other_errs); 317 /* For now ignore all the NPC parser errors and 318 * pass the packets to stack. 319 */ 320 return false; 321 } 322 323 /* If RXALL is enabled pass on packets to stack. */ 324 if (pfvf->netdev->features & NETIF_F_RXALL) 325 return false; 326 327 /* Free buffer back to pool */ 328 if (cqe->sg.segs) 329 otx2_free_rcv_seg(pfvf, cqe, qidx); 330 return true; 331 } 332 333 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, 334 struct napi_struct *napi, 335 struct otx2_cq_queue *cq, 336 struct nix_cqe_rx_s *cqe) 337 { 338 struct nix_rx_parse_s *parse = &cqe->parse; 339 struct nix_rx_sg_s *sg = &cqe->sg; 340 struct sk_buff *skb = NULL; 341 void *end, *start; 342 u64 *seg_addr; 343 u16 *seg_size; 344 int seg; 345 346 if (unlikely(parse->errlev || parse->errcode)) { 347 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) 348 return; 349 } 350 351 if (pfvf->xdp_prog) 352 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) 353 return; 354 355 skb = napi_get_frags(napi); 356 if (unlikely(!skb)) 357 return; 358 359 start = (void *)sg; 360 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); 361 while (start < end) { 362 sg = (struct nix_rx_sg_s *)start; 363 seg_addr = &sg->seg_addr; 364 seg_size = (void *)sg; 365 for (seg = 0; seg < sg->segs; seg++, seg_addr++) { 366 if (otx2_skb_add_frag(pfvf, skb, *seg_addr, 367 seg_size[seg], parse, cq->cq_idx)) 368 cq->pool_ptrs++; 369 } 370 start += sizeof(*sg); 371 } 372 otx2_set_rxhash(pfvf, cqe, skb); 373 374 skb_record_rx_queue(skb, cq->cq_idx); 375 if (pfvf->netdev->features & NETIF_F_RXCSUM) 376 skb->ip_summed = CHECKSUM_UNNECESSARY; 377 378 napi_gro_frags(napi); 379 } 380 381 static int otx2_rx_napi_handler(struct otx2_nic *pfvf, 382 struct napi_struct *napi, 383 struct otx2_cq_queue *cq, int budget) 384 { 385 struct nix_cqe_rx_s *cqe; 386 int processed_cqe = 0; 387 388 if (cq->pend_cqe >= budget) 389 goto process_cqe; 390 391 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 392 return 0; 393 394 process_cqe: 395 while (likely(processed_cqe < budget) && cq->pend_cqe) { 396 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); 397 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || 398 !cqe->sg.seg_addr) { 399 if (!processed_cqe) 400 return 0; 401 break; 402 } 403 cq->cq_head++; 404 cq->cq_head &= (cq->cqe_cnt - 1); 405 406 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); 407 408 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; 409 cqe->sg.seg_addr = 0x00; 410 processed_cqe++; 411 cq->pend_cqe--; 412 } 413 414 /* Free CQEs to HW */ 415 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 416 ((u64)cq->cq_idx << 32) | processed_cqe); 417 418 return processed_cqe; 419 } 420 421 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) 422 { 423 struct otx2_nic *pfvf = dev; 424 dma_addr_t bufptr; 425 426 while (cq->pool_ptrs) { 427 if (otx2_alloc_buffer(pfvf, cq, &bufptr)) 428 break; 429 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); 430 cq->pool_ptrs--; 431 } 432 } 433 434 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, 435 struct otx2_cq_queue *cq, int budget) 436 { 437 int tx_pkts = 0, tx_bytes = 0, qidx; 438 struct nix_cqe_tx_s *cqe; 439 int processed_cqe = 0; 440 441 if (cq->pend_cqe >= budget) 442 goto process_cqe; 443 444 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 445 return 0; 446 447 process_cqe: 448 while (likely(processed_cqe < budget) && cq->pend_cqe) { 449 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); 450 if (unlikely(!cqe)) { 451 if (!processed_cqe) 452 return 0; 453 break; 454 } 455 if (cq->cq_type == CQ_XDP) { 456 qidx = cq->cq_idx - pfvf->hw.rx_queues; 457 otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], 458 cqe); 459 } else { 460 otx2_snd_pkt_handler(pfvf, cq, 461 &pfvf->qset.sq[cq->cint_idx], 462 cqe, budget, &tx_pkts, &tx_bytes); 463 } 464 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; 465 processed_cqe++; 466 cq->pend_cqe--; 467 } 468 469 /* Free CQEs to HW */ 470 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 471 ((u64)cq->cq_idx << 32) | processed_cqe); 472 473 if (likely(tx_pkts)) { 474 struct netdev_queue *txq; 475 476 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); 477 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 478 /* Check if queue was stopped earlier due to ring full */ 479 smp_mb(); 480 if (netif_tx_queue_stopped(txq) && 481 netif_carrier_ok(pfvf->netdev)) 482 netif_tx_wake_queue(txq); 483 } 484 return 0; 485 } 486 487 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) 488 { 489 struct dim_sample dim_sample; 490 u64 rx_frames, rx_bytes; 491 492 rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + 493 OTX2_GET_RX_STATS(RX_UCAST); 494 rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); 495 dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); 496 net_dim(&cq_poll->dim, dim_sample); 497 } 498 499 int otx2_napi_handler(struct napi_struct *napi, int budget) 500 { 501 struct otx2_cq_queue *rx_cq = NULL; 502 struct otx2_cq_poll *cq_poll; 503 int workdone = 0, cq_idx, i; 504 struct otx2_cq_queue *cq; 505 struct otx2_qset *qset; 506 struct otx2_nic *pfvf; 507 508 cq_poll = container_of(napi, struct otx2_cq_poll, napi); 509 pfvf = (struct otx2_nic *)cq_poll->dev; 510 qset = &pfvf->qset; 511 512 for (i = 0; i < CQS_PER_CINT; i++) { 513 cq_idx = cq_poll->cq_ids[i]; 514 if (unlikely(cq_idx == CINT_INVALID_CQ)) 515 continue; 516 cq = &qset->cq[cq_idx]; 517 if (cq->cq_type == CQ_RX) { 518 rx_cq = cq; 519 workdone += otx2_rx_napi_handler(pfvf, napi, 520 cq, budget); 521 } else { 522 workdone += otx2_tx_napi_handler(pfvf, cq, budget); 523 } 524 } 525 526 if (rx_cq && rx_cq->pool_ptrs) 527 pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); 528 /* Clear the IRQ */ 529 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); 530 531 if (workdone < budget && napi_complete_done(napi, workdone)) { 532 /* If interface is going down, don't re-enable IRQ */ 533 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) 534 return workdone; 535 536 /* Check for adaptive interrupt coalesce */ 537 if (workdone != 0 && 538 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == 539 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { 540 /* Adjust irq coalese using net_dim */ 541 otx2_adjust_adaptive_coalese(pfvf, cq_poll); 542 /* Update irq coalescing */ 543 for (i = 0; i < pfvf->hw.cint_cnt; i++) 544 otx2_config_irq_coalescing(pfvf, i); 545 } 546 547 /* Re-enable interrupts */ 548 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), 549 BIT_ULL(0)); 550 } 551 return workdone; 552 } 553 554 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, 555 int size, int qidx) 556 { 557 u64 status; 558 559 /* Packet data stores should finish before SQE is flushed to HW */ 560 dma_wmb(); 561 562 do { 563 memcpy(sq->lmt_addr, sq->sqe_base, size); 564 status = otx2_lmt_flush(sq->io_addr); 565 } while (status == 0); 566 567 sq->head++; 568 sq->head &= (sq->sqe_cnt - 1); 569 } 570 571 #define MAX_SEGS_PER_SG 3 572 /* Add SQE scatter/gather subdescriptor structure */ 573 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 574 struct sk_buff *skb, int num_segs, int *offset) 575 { 576 struct nix_sqe_sg_s *sg = NULL; 577 u64 dma_addr, *iova = NULL; 578 u16 *sg_lens = NULL; 579 int seg, len; 580 581 sq->sg[sq->head].num_segs = 0; 582 583 for (seg = 0; seg < num_segs; seg++) { 584 if ((seg % MAX_SEGS_PER_SG) == 0) { 585 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 586 sg->ld_type = NIX_SEND_LDTYPE_LDD; 587 sg->subdc = NIX_SUBDC_SG; 588 sg->segs = 0; 589 sg_lens = (void *)sg; 590 iova = (void *)sg + sizeof(*sg); 591 /* Next subdc always starts at a 16byte boundary. 592 * So if sg->segs is whether 2 or 3, offset += 16bytes. 593 */ 594 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) 595 *offset += sizeof(*sg) + (3 * sizeof(u64)); 596 else 597 *offset += sizeof(*sg) + sizeof(u64); 598 } 599 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); 600 if (dma_mapping_error(pfvf->dev, dma_addr)) 601 return false; 602 603 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len; 604 sg->segs++; 605 *iova++ = dma_addr; 606 607 /* Save DMA mapping info for later unmapping */ 608 sq->sg[sq->head].dma_addr[seg] = dma_addr; 609 sq->sg[sq->head].size[seg] = len; 610 sq->sg[sq->head].num_segs++; 611 } 612 613 sq->sg[sq->head].skb = (u64)skb; 614 return true; 615 } 616 617 /* Add SQE extended header subdescriptor */ 618 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 619 struct sk_buff *skb, int *offset) 620 { 621 struct nix_sqe_ext_s *ext; 622 623 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); 624 ext->subdc = NIX_SUBDC_EXT; 625 if (skb_shinfo(skb)->gso_size) { 626 ext->lso = 1; 627 ext->lso_sb = skb_tcp_all_headers(skb); 628 ext->lso_mps = skb_shinfo(skb)->gso_size; 629 630 /* Only TSOv4 and TSOv6 GSO offloads are supported */ 631 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 632 ext->lso_format = pfvf->hw.lso_tsov4_idx; 633 634 /* HW adds payload size to 'ip_hdr->tot_len' while 635 * sending TSO segment, hence set payload length 636 * in IP header of the packet to just header length. 637 */ 638 ip_hdr(skb)->tot_len = 639 htons(ext->lso_sb - skb_network_offset(skb)); 640 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 641 ext->lso_format = pfvf->hw.lso_tsov6_idx; 642 643 ipv6_hdr(skb)->payload_len = 644 htons(ext->lso_sb - skb_network_offset(skb)); 645 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 646 __be16 l3_proto = vlan_get_protocol(skb); 647 struct udphdr *udph = udp_hdr(skb); 648 u16 iplen; 649 650 ext->lso_sb = skb_transport_offset(skb) + 651 sizeof(struct udphdr); 652 653 /* HW adds payload size to length fields in IP and 654 * UDP headers while segmentation, hence adjust the 655 * lengths to just header sizes. 656 */ 657 iplen = htons(ext->lso_sb - skb_network_offset(skb)); 658 if (l3_proto == htons(ETH_P_IP)) { 659 ip_hdr(skb)->tot_len = iplen; 660 ext->lso_format = pfvf->hw.lso_udpv4_idx; 661 } else { 662 ipv6_hdr(skb)->payload_len = iplen; 663 ext->lso_format = pfvf->hw.lso_udpv6_idx; 664 } 665 666 udph->len = htons(sizeof(struct udphdr)); 667 } 668 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 669 ext->tstmp = 1; 670 } 671 672 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) 673 if (skb_vlan_tag_present(skb)) { 674 if (skb->vlan_proto == htons(ETH_P_8021Q)) { 675 ext->vlan1_ins_ena = 1; 676 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; 677 ext->vlan1_ins_tci = skb_vlan_tag_get(skb); 678 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { 679 ext->vlan0_ins_ena = 1; 680 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; 681 ext->vlan0_ins_tci = skb_vlan_tag_get(skb); 682 } 683 } 684 685 *offset += sizeof(*ext); 686 } 687 688 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, 689 int alg, u64 iova) 690 { 691 struct nix_sqe_mem_s *mem; 692 693 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); 694 mem->subdc = NIX_SUBDC_MEM; 695 mem->alg = alg; 696 mem->wmem = 1; /* wait for the memory operation */ 697 mem->addr = iova; 698 699 *offset += sizeof(*mem); 700 } 701 702 /* Add SQE header subdescriptor structure */ 703 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 704 struct nix_sqe_hdr_s *sqe_hdr, 705 struct sk_buff *skb, u16 qidx) 706 { 707 int proto = 0; 708 709 /* Check if SQE was framed before, if yes then no need to 710 * set these constants again and again. 711 */ 712 if (!sqe_hdr->total) { 713 /* Don't free Tx buffers to Aura */ 714 sqe_hdr->df = 1; 715 sqe_hdr->aura = sq->aura_id; 716 /* Post a CQE Tx after pkt transmission */ 717 sqe_hdr->pnc = 1; 718 sqe_hdr->sq = qidx; 719 } 720 sqe_hdr->total = skb->len; 721 /* Set SQE identifier which will be used later for freeing SKB */ 722 sqe_hdr->sqe_id = sq->head; 723 724 /* Offload TCP/UDP checksum to HW */ 725 if (skb->ip_summed == CHECKSUM_PARTIAL) { 726 sqe_hdr->ol3ptr = skb_network_offset(skb); 727 sqe_hdr->ol4ptr = skb_transport_offset(skb); 728 /* get vlan protocol Ethertype */ 729 if (eth_type_vlan(skb->protocol)) 730 skb->protocol = vlan_get_protocol(skb); 731 732 if (skb->protocol == htons(ETH_P_IP)) { 733 proto = ip_hdr(skb)->protocol; 734 /* In case of TSO, HW needs this to be explicitly set. 735 * So set this always, instead of adding a check. 736 */ 737 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; 738 } else if (skb->protocol == htons(ETH_P_IPV6)) { 739 proto = ipv6_hdr(skb)->nexthdr; 740 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6; 741 } 742 743 if (proto == IPPROTO_TCP) 744 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; 745 else if (proto == IPPROTO_UDP) 746 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; 747 } 748 } 749 750 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, 751 struct otx2_snd_queue *sq, 752 struct sk_buff *skb, int sqe, int hdr_len) 753 { 754 int num_segs = skb_shinfo(skb)->nr_frags + 1; 755 struct sg_list *sg = &sq->sg[sqe]; 756 u64 dma_addr; 757 int seg, len; 758 759 sg->num_segs = 0; 760 761 /* Get payload length at skb->data */ 762 len = skb_headlen(skb) - hdr_len; 763 764 for (seg = 0; seg < num_segs; seg++) { 765 /* Skip skb->data, if there is no payload */ 766 if (!seg && !len) 767 continue; 768 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); 769 if (dma_mapping_error(pfvf->dev, dma_addr)) 770 goto unmap; 771 772 /* Save DMA mapping info for later unmapping */ 773 sg->dma_addr[sg->num_segs] = dma_addr; 774 sg->size[sg->num_segs] = len; 775 sg->num_segs++; 776 } 777 return 0; 778 unmap: 779 otx2_dma_unmap_skb_frags(pfvf, sg); 780 return -EINVAL; 781 } 782 783 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, 784 struct sk_buff *skb, int seg, 785 u64 seg_addr, int hdr_len, int sqe) 786 { 787 struct sg_list *sg = &sq->sg[sqe]; 788 const skb_frag_t *frag; 789 int offset; 790 791 if (seg < 0) 792 return sg->dma_addr[0] + (seg_addr - (u64)skb->data); 793 794 frag = &skb_shinfo(skb)->frags[seg]; 795 offset = seg_addr - (u64)skb_frag_address(frag); 796 if (skb_headlen(skb) - hdr_len) 797 seg++; 798 return sg->dma_addr[seg] + offset; 799 } 800 801 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq, 802 struct sg_list *list, int *offset) 803 { 804 struct nix_sqe_sg_s *sg = NULL; 805 u16 *sg_lens = NULL; 806 u64 *iova = NULL; 807 int seg; 808 809 /* Add SG descriptors with buffer addresses */ 810 for (seg = 0; seg < list->num_segs; seg++) { 811 if ((seg % MAX_SEGS_PER_SG) == 0) { 812 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 813 sg->ld_type = NIX_SEND_LDTYPE_LDD; 814 sg->subdc = NIX_SUBDC_SG; 815 sg->segs = 0; 816 sg_lens = (void *)sg; 817 iova = (void *)sg + sizeof(*sg); 818 /* Next subdc always starts at a 16byte boundary. 819 * So if sg->segs is whether 2 or 3, offset += 16bytes. 820 */ 821 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) 822 *offset += sizeof(*sg) + (3 * sizeof(u64)); 823 else 824 *offset += sizeof(*sg) + sizeof(u64); 825 } 826 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; 827 *iova++ = list->dma_addr[seg]; 828 sg->segs++; 829 } 830 } 831 832 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 833 struct sk_buff *skb, u16 qidx) 834 { 835 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); 836 int hdr_len, tcp_data, seg_len, pkt_len, offset; 837 struct nix_sqe_hdr_s *sqe_hdr; 838 int first_sqe = sq->head; 839 struct sg_list list; 840 struct tso_t tso; 841 842 hdr_len = tso_start(skb, &tso); 843 844 /* Map SKB's fragments to DMA. 845 * It's done here to avoid mapping for every TSO segment's packet. 846 */ 847 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { 848 dev_kfree_skb_any(skb); 849 return; 850 } 851 852 netdev_tx_sent_queue(txq, skb->len); 853 854 tcp_data = skb->len - hdr_len; 855 while (tcp_data > 0) { 856 char *hdr; 857 858 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); 859 tcp_data -= seg_len; 860 861 /* Set SQE's SEND_HDR */ 862 memset(sq->sqe_base, 0, sq->sqe_size); 863 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 864 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); 865 offset = sizeof(*sqe_hdr); 866 867 /* Add TSO segment's pkt header */ 868 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); 869 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0); 870 list.dma_addr[0] = 871 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); 872 list.size[0] = hdr_len; 873 list.num_segs = 1; 874 875 /* Add TSO segment's payload data fragments */ 876 pkt_len = hdr_len; 877 while (seg_len > 0) { 878 int size; 879 880 size = min_t(int, tso.size, seg_len); 881 882 list.size[list.num_segs] = size; 883 list.dma_addr[list.num_segs] = 884 otx2_tso_frag_dma_addr(sq, skb, 885 tso.next_frag_idx - 1, 886 (u64)tso.data, hdr_len, 887 first_sqe); 888 list.num_segs++; 889 pkt_len += size; 890 seg_len -= size; 891 tso_build_data(skb, &tso, size); 892 } 893 sqe_hdr->total = pkt_len; 894 otx2_sqe_tso_add_sg(sq, &list, &offset); 895 896 /* DMA mappings and skb needs to be freed only after last 897 * TSO segment is transmitted out. So set 'PNC' only for 898 * last segment. Also point last segment's sqe_id to first 899 * segment's SQE index where skb address and DMA mappings 900 * are saved. 901 */ 902 if (!tcp_data) { 903 sqe_hdr->pnc = 1; 904 sqe_hdr->sqe_id = first_sqe; 905 sq->sg[first_sqe].skb = (u64)skb; 906 } else { 907 sqe_hdr->pnc = 0; 908 } 909 910 sqe_hdr->sizem1 = (offset / 16) - 1; 911 912 /* Flush SQE to HW */ 913 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 914 } 915 } 916 917 static bool is_hw_tso_supported(struct otx2_nic *pfvf, 918 struct sk_buff *skb) 919 { 920 int payload_len, last_seg_size; 921 922 if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) 923 return true; 924 925 /* On 96xx A0, HW TSO not supported */ 926 if (!is_96xx_B0(pfvf->pdev)) 927 return false; 928 929 /* HW has an issue due to which when the payload of the last LSO 930 * segment is shorter than 16 bytes, some header fields may not 931 * be correctly modified, hence don't offload such TSO segments. 932 */ 933 934 payload_len = skb->len - skb_tcp_all_headers(skb); 935 last_seg_size = payload_len % skb_shinfo(skb)->gso_size; 936 if (last_seg_size && last_seg_size < 16) 937 return false; 938 939 return true; 940 } 941 942 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) 943 { 944 if (!skb_shinfo(skb)->gso_size) 945 return 1; 946 947 /* HW TSO */ 948 if (is_hw_tso_supported(pfvf, skb)) 949 return 1; 950 951 /* SW TSO */ 952 return skb_shinfo(skb)->gso_segs; 953 } 954 955 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, 956 struct otx2_snd_queue *sq, int *offset) 957 { 958 u64 iova; 959 960 if (!skb_shinfo(skb)->gso_size && 961 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 962 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 963 iova = sq->timestamps->iova + (sq->head * sizeof(u64)); 964 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); 965 } else { 966 skb_tx_timestamp(skb); 967 } 968 } 969 970 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, 971 struct sk_buff *skb, u16 qidx) 972 { 973 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); 974 struct otx2_nic *pfvf = netdev_priv(netdev); 975 int offset, num_segs, free_sqe; 976 struct nix_sqe_hdr_s *sqe_hdr; 977 978 /* Check if there is room for new SQE. 979 * 'Num of SQBs freed to SQ's pool - SQ's Aura count' 980 * will give free SQE count. 981 */ 982 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; 983 984 if (free_sqe < sq->sqe_thresh || 985 free_sqe < otx2_get_sqe_count(pfvf, skb)) 986 return false; 987 988 num_segs = skb_shinfo(skb)->nr_frags + 1; 989 990 /* If SKB doesn't fit in a single SQE, linearize it. 991 * TODO: Consider adding JUMP descriptor instead. 992 */ 993 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { 994 if (__skb_linearize(skb)) { 995 dev_kfree_skb_any(skb); 996 return true; 997 } 998 num_segs = skb_shinfo(skb)->nr_frags + 1; 999 } 1000 1001 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { 1002 /* Insert vlan tag before giving pkt to tso */ 1003 if (skb_vlan_tag_present(skb)) 1004 skb = __vlan_hwaccel_push_inside(skb); 1005 otx2_sq_append_tso(pfvf, sq, skb, qidx); 1006 return true; 1007 } 1008 1009 /* Set SQE's SEND_HDR. 1010 * Do not clear the first 64bit as it contains constant info. 1011 */ 1012 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); 1013 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 1014 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); 1015 offset = sizeof(*sqe_hdr); 1016 1017 /* Add extended header if needed */ 1018 otx2_sqe_add_ext(pfvf, sq, skb, &offset); 1019 1020 /* Add SG subdesc with data frags */ 1021 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { 1022 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); 1023 return false; 1024 } 1025 1026 otx2_set_txtstamp(pfvf, skb, sq, &offset); 1027 1028 sqe_hdr->sizem1 = (offset / 16) - 1; 1029 1030 netdev_tx_sent_queue(txq, skb->len); 1031 1032 /* Flush SQE to HW */ 1033 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 1034 1035 return true; 1036 } 1037 EXPORT_SYMBOL(otx2_sq_append_skb); 1038 1039 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) 1040 { 1041 struct nix_cqe_rx_s *cqe; 1042 int processed_cqe = 0; 1043 u64 iova, pa; 1044 1045 if (pfvf->xdp_prog) 1046 xdp_rxq_info_unreg(&cq->xdp_rxq); 1047 1048 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 1049 return; 1050 1051 while (cq->pend_cqe) { 1052 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); 1053 processed_cqe++; 1054 cq->pend_cqe--; 1055 1056 if (!cqe) 1057 continue; 1058 if (cqe->sg.segs > 1) { 1059 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); 1060 continue; 1061 } 1062 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; 1063 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1064 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); 1065 put_page(virt_to_page(phys_to_virt(pa))); 1066 } 1067 1068 /* Free CQEs to HW */ 1069 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 1070 ((u64)cq->cq_idx << 32) | processed_cqe); 1071 } 1072 1073 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) 1074 { 1075 struct sk_buff *skb = NULL; 1076 struct otx2_snd_queue *sq; 1077 struct nix_cqe_tx_s *cqe; 1078 int processed_cqe = 0; 1079 struct sg_list *sg; 1080 1081 sq = &pfvf->qset.sq[cq->cint_idx]; 1082 1083 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 1084 return; 1085 1086 while (cq->pend_cqe) { 1087 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); 1088 processed_cqe++; 1089 cq->pend_cqe--; 1090 1091 if (!cqe) 1092 continue; 1093 sg = &sq->sg[cqe->comp.sqe_id]; 1094 skb = (struct sk_buff *)sg->skb; 1095 if (skb) { 1096 otx2_dma_unmap_skb_frags(pfvf, sg); 1097 dev_kfree_skb_any(skb); 1098 sg->skb = (u64)NULL; 1099 } 1100 } 1101 1102 /* Free CQEs to HW */ 1103 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 1104 ((u64)cq->cq_idx << 32) | processed_cqe); 1105 } 1106 1107 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) 1108 { 1109 struct msg_req *msg; 1110 int err; 1111 1112 mutex_lock(&pfvf->mbox.lock); 1113 if (enable) 1114 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); 1115 else 1116 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); 1117 1118 if (!msg) { 1119 mutex_unlock(&pfvf->mbox.lock); 1120 return -ENOMEM; 1121 } 1122 1123 err = otx2_sync_mbox_msg(&pfvf->mbox); 1124 mutex_unlock(&pfvf->mbox.lock); 1125 return err; 1126 } 1127 1128 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, 1129 int len, int *offset) 1130 { 1131 struct nix_sqe_sg_s *sg = NULL; 1132 u64 *iova = NULL; 1133 1134 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 1135 sg->ld_type = NIX_SEND_LDTYPE_LDD; 1136 sg->subdc = NIX_SUBDC_SG; 1137 sg->segs = 1; 1138 sg->seg1_size = len; 1139 iova = (void *)sg + sizeof(*sg); 1140 *iova = dma_addr; 1141 *offset += sizeof(*sg) + sizeof(u64); 1142 1143 sq->sg[sq->head].dma_addr[0] = dma_addr; 1144 sq->sg[sq->head].size[0] = len; 1145 sq->sg[sq->head].num_segs = 1; 1146 } 1147 1148 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) 1149 { 1150 struct nix_sqe_hdr_s *sqe_hdr; 1151 struct otx2_snd_queue *sq; 1152 int offset, free_sqe; 1153 1154 sq = &pfvf->qset.sq[qidx]; 1155 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; 1156 if (free_sqe < sq->sqe_thresh) 1157 return false; 1158 1159 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); 1160 1161 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 1162 1163 if (!sqe_hdr->total) { 1164 sqe_hdr->aura = sq->aura_id; 1165 sqe_hdr->df = 1; 1166 sqe_hdr->sq = qidx; 1167 sqe_hdr->pnc = 1; 1168 } 1169 sqe_hdr->total = len; 1170 sqe_hdr->sqe_id = sq->head; 1171 1172 offset = sizeof(*sqe_hdr); 1173 1174 otx2_xdp_sqe_add_sg(sq, iova, len, &offset); 1175 sqe_hdr->sizem1 = (offset / 16) - 1; 1176 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 1177 1178 return true; 1179 } 1180 1181 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, 1182 struct bpf_prog *prog, 1183 struct nix_cqe_rx_s *cqe, 1184 struct otx2_cq_queue *cq) 1185 { 1186 unsigned char *hard_start, *data; 1187 int qidx = cq->cq_idx; 1188 struct xdp_buff xdp; 1189 struct page *page; 1190 u64 iova, pa; 1191 u32 act; 1192 int err; 1193 1194 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; 1195 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1196 page = virt_to_page(phys_to_virt(pa)); 1197 1198 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); 1199 1200 data = (unsigned char *)phys_to_virt(pa); 1201 hard_start = page_address(page); 1202 xdp_prepare_buff(&xdp, hard_start, data - hard_start, 1203 cqe->sg.seg_size, false); 1204 1205 act = bpf_prog_run_xdp(prog, &xdp); 1206 1207 switch (act) { 1208 case XDP_PASS: 1209 break; 1210 case XDP_TX: 1211 qidx += pfvf->hw.tx_queues; 1212 cq->pool_ptrs++; 1213 return otx2_xdp_sq_append_pkt(pfvf, iova, 1214 cqe->sg.seg_size, qidx); 1215 case XDP_REDIRECT: 1216 cq->pool_ptrs++; 1217 err = xdp_do_redirect(pfvf->netdev, &xdp, prog); 1218 1219 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, 1220 DMA_FROM_DEVICE); 1221 if (!err) 1222 return true; 1223 put_page(page); 1224 break; 1225 default: 1226 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); 1227 break; 1228 case XDP_ABORTED: 1229 trace_xdp_exception(pfvf->netdev, prog, act); 1230 break; 1231 case XDP_DROP: 1232 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, 1233 DMA_FROM_DEVICE); 1234 put_page(page); 1235 cq->pool_ptrs++; 1236 return true; 1237 } 1238 return false; 1239 } 1240