1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <net/ip.h> 10 #include <net/tso.h> 11 #include <linux/bpf.h> 12 #include <linux/bpf_trace.h> 13 14 #include "otx2_reg.h" 15 #include "otx2_common.h" 16 #include "otx2_struct.h" 17 #include "otx2_txrx.h" 18 #include "otx2_ptp.h" 19 #include "cn10k.h" 20 21 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) 22 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, 23 struct bpf_prog *prog, 24 struct nix_cqe_rx_s *cqe, 25 struct otx2_cq_queue *cq); 26 27 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, 28 struct otx2_cq_queue *cq) 29 { 30 u64 incr = (u64)(cq->cq_idx) << 32; 31 u64 status; 32 33 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); 34 35 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || 36 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { 37 dev_err(pfvf->dev, "CQ stopped due to error"); 38 return -EINVAL; 39 } 40 41 cq->cq_tail = status & 0xFFFFF; 42 cq->cq_head = (status >> 20) & 0xFFFFF; 43 if (cq->cq_tail < cq->cq_head) 44 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + 45 cq->cq_tail; 46 else 47 cq->pend_cqe = cq->cq_tail - cq->cq_head; 48 49 return 0; 50 } 51 52 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) 53 { 54 struct nix_cqe_hdr_s *cqe_hdr; 55 56 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); 57 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) 58 return NULL; 59 60 cq->cq_head++; 61 cq->cq_head &= (cq->cqe_cnt - 1); 62 63 return cqe_hdr; 64 } 65 66 static unsigned int frag_num(unsigned int i) 67 { 68 #ifdef __BIG_ENDIAN 69 return (i & ~3) + 3 - (i & 3); 70 #else 71 return i; 72 #endif 73 } 74 75 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 76 struct sk_buff *skb, int seg, int *len) 77 { 78 const skb_frag_t *frag; 79 struct page *page; 80 int offset; 81 82 /* First segment is always skb->data */ 83 if (!seg) { 84 page = virt_to_page(skb->data); 85 offset = offset_in_page(skb->data); 86 *len = skb_headlen(skb); 87 } else { 88 frag = &skb_shinfo(skb)->frags[seg - 1]; 89 page = skb_frag_page(frag); 90 offset = skb_frag_off(frag); 91 *len = skb_frag_size(frag); 92 } 93 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); 94 } 95 96 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) 97 { 98 int seg; 99 100 for (seg = 0; seg < sg->num_segs; seg++) { 101 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], 102 sg->size[seg], DMA_TO_DEVICE); 103 } 104 sg->num_segs = 0; 105 } 106 107 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, 108 struct otx2_snd_queue *sq, 109 struct nix_cqe_tx_s *cqe) 110 { 111 struct nix_send_comp_s *snd_comp = &cqe->comp; 112 struct sg_list *sg; 113 struct page *page; 114 u64 pa; 115 116 sg = &sq->sg[snd_comp->sqe_id]; 117 118 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); 119 otx2_dma_unmap_page(pfvf, sg->dma_addr[0], 120 sg->size[0], DMA_TO_DEVICE); 121 page = virt_to_page(phys_to_virt(pa)); 122 put_page(page); 123 } 124 125 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, 126 struct otx2_cq_queue *cq, 127 struct otx2_snd_queue *sq, 128 struct nix_cqe_tx_s *cqe, 129 int budget, int *tx_pkts, int *tx_bytes) 130 { 131 struct nix_send_comp_s *snd_comp = &cqe->comp; 132 struct skb_shared_hwtstamps ts; 133 struct sk_buff *skb = NULL; 134 u64 timestamp, tsns; 135 struct sg_list *sg; 136 int err; 137 138 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) 139 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n", 140 pfvf->netdev->name, cq->cint_idx, 141 snd_comp->status); 142 143 sg = &sq->sg[snd_comp->sqe_id]; 144 skb = (struct sk_buff *)sg->skb; 145 if (unlikely(!skb)) 146 return; 147 148 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { 149 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; 150 if (timestamp != 1) { 151 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); 152 if (!err) { 153 memset(&ts, 0, sizeof(ts)); 154 ts.hwtstamp = ns_to_ktime(tsns); 155 skb_tstamp_tx(skb, &ts); 156 } 157 } 158 } 159 160 *tx_bytes += skb->len; 161 (*tx_pkts)++; 162 otx2_dma_unmap_skb_frags(pfvf, sg); 163 napi_consume_skb(skb, budget); 164 sg->skb = (u64)NULL; 165 } 166 167 static void otx2_set_rxtstamp(struct otx2_nic *pfvf, 168 struct sk_buff *skb, void *data) 169 { 170 u64 tsns; 171 int err; 172 173 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) 174 return; 175 176 /* The first 8 bytes is the timestamp */ 177 err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns); 178 if (err) 179 return; 180 181 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); 182 } 183 184 static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, 185 u64 iova, int len, struct nix_rx_parse_s *parse) 186 { 187 struct page *page; 188 int off = 0; 189 void *va; 190 191 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); 192 193 if (likely(!skb_shinfo(skb)->nr_frags)) { 194 /* Check if data starts at some nonzero offset 195 * from the start of the buffer. For now the 196 * only possible offset is 8 bytes in the case 197 * where packet is prepended by a timestamp. 198 */ 199 if (parse->laptr) { 200 otx2_set_rxtstamp(pfvf, skb, va); 201 off = OTX2_HW_TIMESTAMP_LEN; 202 } 203 } 204 205 page = virt_to_page(va); 206 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 207 va - page_address(page) + off, len - off, pfvf->rbsize); 208 209 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, 210 pfvf->rbsize, DMA_FROM_DEVICE); 211 } 212 213 static void otx2_set_rxhash(struct otx2_nic *pfvf, 214 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) 215 { 216 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; 217 struct otx2_rss_info *rss; 218 u32 hash = 0; 219 220 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) 221 return; 222 223 rss = &pfvf->hw.rss_info; 224 if (rss->flowkey_cfg) { 225 if (rss->flowkey_cfg & 226 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)) 227 hash_type = PKT_HASH_TYPE_L4; 228 else 229 hash_type = PKT_HASH_TYPE_L3; 230 hash = cqe->hdr.flow_tag; 231 } 232 skb_set_hash(skb, hash, hash_type); 233 } 234 235 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, 236 int qidx) 237 { 238 struct nix_rx_sg_s *sg = &cqe->sg; 239 void *end, *start; 240 u64 *seg_addr; 241 int seg; 242 243 start = (void *)sg; 244 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); 245 while (start < end) { 246 sg = (struct nix_rx_sg_s *)start; 247 seg_addr = &sg->seg_addr; 248 for (seg = 0; seg < sg->segs; seg++, seg_addr++) 249 pfvf->hw_ops->aura_freeptr(pfvf, qidx, 250 *seg_addr & ~0x07ULL); 251 start += sizeof(*sg); 252 } 253 } 254 255 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, 256 struct nix_cqe_rx_s *cqe, int qidx) 257 { 258 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; 259 struct nix_rx_parse_s *parse = &cqe->parse; 260 261 if (netif_msg_rx_err(pfvf)) 262 netdev_err(pfvf->netdev, 263 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n", 264 qidx, parse->errlev, parse->errcode); 265 266 if (parse->errlev == NPC_ERRLVL_RE) { 267 switch (parse->errcode) { 268 case ERRCODE_FCS: 269 case ERRCODE_FCS_RCV: 270 atomic_inc(&stats->rx_fcs_errs); 271 break; 272 case ERRCODE_UNDERSIZE: 273 atomic_inc(&stats->rx_undersize_errs); 274 break; 275 case ERRCODE_OVERSIZE: 276 atomic_inc(&stats->rx_oversize_errs); 277 break; 278 case ERRCODE_OL2_LEN_MISMATCH: 279 atomic_inc(&stats->rx_len_errs); 280 break; 281 default: 282 atomic_inc(&stats->rx_other_errs); 283 break; 284 } 285 } else if (parse->errlev == NPC_ERRLVL_NIX) { 286 switch (parse->errcode) { 287 case ERRCODE_OL3_LEN: 288 case ERRCODE_OL4_LEN: 289 case ERRCODE_IL3_LEN: 290 case ERRCODE_IL4_LEN: 291 atomic_inc(&stats->rx_len_errs); 292 break; 293 case ERRCODE_OL4_CSUM: 294 case ERRCODE_IL4_CSUM: 295 atomic_inc(&stats->rx_csum_errs); 296 break; 297 default: 298 atomic_inc(&stats->rx_other_errs); 299 break; 300 } 301 } else { 302 atomic_inc(&stats->rx_other_errs); 303 /* For now ignore all the NPC parser errors and 304 * pass the packets to stack. 305 */ 306 return false; 307 } 308 309 /* If RXALL is enabled pass on packets to stack. */ 310 if (pfvf->netdev->features & NETIF_F_RXALL) 311 return false; 312 313 /* Free buffer back to pool */ 314 if (cqe->sg.segs) 315 otx2_free_rcv_seg(pfvf, cqe, qidx); 316 return true; 317 } 318 319 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, 320 struct napi_struct *napi, 321 struct otx2_cq_queue *cq, 322 struct nix_cqe_rx_s *cqe) 323 { 324 struct nix_rx_parse_s *parse = &cqe->parse; 325 struct nix_rx_sg_s *sg = &cqe->sg; 326 struct sk_buff *skb = NULL; 327 void *end, *start; 328 u64 *seg_addr; 329 u16 *seg_size; 330 int seg; 331 332 if (unlikely(parse->errlev || parse->errcode)) { 333 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) 334 return; 335 } 336 337 if (pfvf->xdp_prog) 338 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) 339 return; 340 341 skb = napi_get_frags(napi); 342 if (unlikely(!skb)) 343 return; 344 345 start = (void *)sg; 346 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); 347 while (start < end) { 348 sg = (struct nix_rx_sg_s *)start; 349 seg_addr = &sg->seg_addr; 350 seg_size = (void *)sg; 351 for (seg = 0; seg < sg->segs; seg++, seg_addr++) { 352 otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg], 353 parse); 354 cq->pool_ptrs++; 355 } 356 start += sizeof(*sg); 357 } 358 otx2_set_rxhash(pfvf, cqe, skb); 359 360 skb_record_rx_queue(skb, cq->cq_idx); 361 if (pfvf->netdev->features & NETIF_F_RXCSUM) 362 skb->ip_summed = CHECKSUM_UNNECESSARY; 363 364 napi_gro_frags(napi); 365 } 366 367 static int otx2_rx_napi_handler(struct otx2_nic *pfvf, 368 struct napi_struct *napi, 369 struct otx2_cq_queue *cq, int budget) 370 { 371 struct nix_cqe_rx_s *cqe; 372 int processed_cqe = 0; 373 374 if (cq->pend_cqe >= budget) 375 goto process_cqe; 376 377 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 378 return 0; 379 380 process_cqe: 381 while (likely(processed_cqe < budget) && cq->pend_cqe) { 382 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); 383 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || 384 !cqe->sg.seg_addr) { 385 if (!processed_cqe) 386 return 0; 387 break; 388 } 389 cq->cq_head++; 390 cq->cq_head &= (cq->cqe_cnt - 1); 391 392 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); 393 394 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; 395 cqe->sg.seg_addr = 0x00; 396 processed_cqe++; 397 cq->pend_cqe--; 398 } 399 400 /* Free CQEs to HW */ 401 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 402 ((u64)cq->cq_idx << 32) | processed_cqe); 403 404 return processed_cqe; 405 } 406 407 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) 408 { 409 struct otx2_nic *pfvf = dev; 410 dma_addr_t bufptr; 411 412 while (cq->pool_ptrs) { 413 if (otx2_alloc_buffer(pfvf, cq, &bufptr)) 414 break; 415 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); 416 cq->pool_ptrs--; 417 } 418 } 419 420 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, 421 struct otx2_cq_queue *cq, int budget) 422 { 423 int tx_pkts = 0, tx_bytes = 0, qidx; 424 struct nix_cqe_tx_s *cqe; 425 int processed_cqe = 0; 426 427 if (cq->pend_cqe >= budget) 428 goto process_cqe; 429 430 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 431 return 0; 432 433 process_cqe: 434 while (likely(processed_cqe < budget) && cq->pend_cqe) { 435 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); 436 if (unlikely(!cqe)) { 437 if (!processed_cqe) 438 return 0; 439 break; 440 } 441 if (cq->cq_type == CQ_XDP) { 442 qidx = cq->cq_idx - pfvf->hw.rx_queues; 443 otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], 444 cqe); 445 } else { 446 otx2_snd_pkt_handler(pfvf, cq, 447 &pfvf->qset.sq[cq->cint_idx], 448 cqe, budget, &tx_pkts, &tx_bytes); 449 } 450 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; 451 processed_cqe++; 452 cq->pend_cqe--; 453 } 454 455 /* Free CQEs to HW */ 456 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 457 ((u64)cq->cq_idx << 32) | processed_cqe); 458 459 if (likely(tx_pkts)) { 460 struct netdev_queue *txq; 461 462 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx); 463 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 464 /* Check if queue was stopped earlier due to ring full */ 465 smp_mb(); 466 if (netif_tx_queue_stopped(txq) && 467 netif_carrier_ok(pfvf->netdev)) 468 netif_tx_wake_queue(txq); 469 } 470 return 0; 471 } 472 473 int otx2_napi_handler(struct napi_struct *napi, int budget) 474 { 475 struct otx2_cq_queue *rx_cq = NULL; 476 struct otx2_cq_poll *cq_poll; 477 int workdone = 0, cq_idx, i; 478 struct otx2_cq_queue *cq; 479 struct otx2_qset *qset; 480 struct otx2_nic *pfvf; 481 482 cq_poll = container_of(napi, struct otx2_cq_poll, napi); 483 pfvf = (struct otx2_nic *)cq_poll->dev; 484 qset = &pfvf->qset; 485 486 for (i = 0; i < CQS_PER_CINT; i++) { 487 cq_idx = cq_poll->cq_ids[i]; 488 if (unlikely(cq_idx == CINT_INVALID_CQ)) 489 continue; 490 cq = &qset->cq[cq_idx]; 491 if (cq->cq_type == CQ_RX) { 492 rx_cq = cq; 493 workdone += otx2_rx_napi_handler(pfvf, napi, 494 cq, budget); 495 } else { 496 workdone += otx2_tx_napi_handler(pfvf, cq, budget); 497 } 498 } 499 500 if (rx_cq && rx_cq->pool_ptrs) 501 pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); 502 /* Clear the IRQ */ 503 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); 504 505 if (workdone < budget && napi_complete_done(napi, workdone)) { 506 /* If interface is going down, don't re-enable IRQ */ 507 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) 508 return workdone; 509 510 /* Re-enable interrupts */ 511 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), 512 BIT_ULL(0)); 513 } 514 return workdone; 515 } 516 517 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, 518 int size, int qidx) 519 { 520 u64 status; 521 522 /* Packet data stores should finish before SQE is flushed to HW */ 523 dma_wmb(); 524 525 do { 526 memcpy(sq->lmt_addr, sq->sqe_base, size); 527 status = otx2_lmt_flush(sq->io_addr); 528 } while (status == 0); 529 530 sq->head++; 531 sq->head &= (sq->sqe_cnt - 1); 532 } 533 534 #define MAX_SEGS_PER_SG 3 535 /* Add SQE scatter/gather subdescriptor structure */ 536 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 537 struct sk_buff *skb, int num_segs, int *offset) 538 { 539 struct nix_sqe_sg_s *sg = NULL; 540 u64 dma_addr, *iova = NULL; 541 u16 *sg_lens = NULL; 542 int seg, len; 543 544 sq->sg[sq->head].num_segs = 0; 545 546 for (seg = 0; seg < num_segs; seg++) { 547 if ((seg % MAX_SEGS_PER_SG) == 0) { 548 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 549 sg->ld_type = NIX_SEND_LDTYPE_LDD; 550 sg->subdc = NIX_SUBDC_SG; 551 sg->segs = 0; 552 sg_lens = (void *)sg; 553 iova = (void *)sg + sizeof(*sg); 554 /* Next subdc always starts at a 16byte boundary. 555 * So if sg->segs is whether 2 or 3, offset += 16bytes. 556 */ 557 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) 558 *offset += sizeof(*sg) + (3 * sizeof(u64)); 559 else 560 *offset += sizeof(*sg) + sizeof(u64); 561 } 562 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); 563 if (dma_mapping_error(pfvf->dev, dma_addr)) 564 return false; 565 566 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len; 567 sg->segs++; 568 *iova++ = dma_addr; 569 570 /* Save DMA mapping info for later unmapping */ 571 sq->sg[sq->head].dma_addr[seg] = dma_addr; 572 sq->sg[sq->head].size[seg] = len; 573 sq->sg[sq->head].num_segs++; 574 } 575 576 sq->sg[sq->head].skb = (u64)skb; 577 return true; 578 } 579 580 /* Add SQE extended header subdescriptor */ 581 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 582 struct sk_buff *skb, int *offset) 583 { 584 struct nix_sqe_ext_s *ext; 585 586 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); 587 ext->subdc = NIX_SUBDC_EXT; 588 if (skb_shinfo(skb)->gso_size) { 589 ext->lso = 1; 590 ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb); 591 ext->lso_mps = skb_shinfo(skb)->gso_size; 592 593 /* Only TSOv4 and TSOv6 GSO offloads are supported */ 594 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 595 ext->lso_format = pfvf->hw.lso_tsov4_idx; 596 597 /* HW adds payload size to 'ip_hdr->tot_len' while 598 * sending TSO segment, hence set payload length 599 * in IP header of the packet to just header length. 600 */ 601 ip_hdr(skb)->tot_len = 602 htons(ext->lso_sb - skb_network_offset(skb)); 603 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 604 ext->lso_format = pfvf->hw.lso_tsov6_idx; 605 606 ipv6_hdr(skb)->payload_len = 607 htons(ext->lso_sb - skb_network_offset(skb)); 608 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 609 __be16 l3_proto = vlan_get_protocol(skb); 610 struct udphdr *udph = udp_hdr(skb); 611 u16 iplen; 612 613 ext->lso_sb = skb_transport_offset(skb) + 614 sizeof(struct udphdr); 615 616 /* HW adds payload size to length fields in IP and 617 * UDP headers while segmentation, hence adjust the 618 * lengths to just header sizes. 619 */ 620 iplen = htons(ext->lso_sb - skb_network_offset(skb)); 621 if (l3_proto == htons(ETH_P_IP)) { 622 ip_hdr(skb)->tot_len = iplen; 623 ext->lso_format = pfvf->hw.lso_udpv4_idx; 624 } else { 625 ipv6_hdr(skb)->payload_len = iplen; 626 ext->lso_format = pfvf->hw.lso_udpv6_idx; 627 } 628 629 udph->len = htons(sizeof(struct udphdr)); 630 } 631 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 632 ext->tstmp = 1; 633 } 634 635 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) 636 if (skb_vlan_tag_present(skb)) { 637 if (skb->vlan_proto == htons(ETH_P_8021Q)) { 638 ext->vlan1_ins_ena = 1; 639 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; 640 ext->vlan1_ins_tci = skb_vlan_tag_get(skb); 641 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { 642 ext->vlan0_ins_ena = 1; 643 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; 644 ext->vlan0_ins_tci = skb_vlan_tag_get(skb); 645 } 646 } 647 648 *offset += sizeof(*ext); 649 } 650 651 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, 652 int alg, u64 iova) 653 { 654 struct nix_sqe_mem_s *mem; 655 656 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); 657 mem->subdc = NIX_SUBDC_MEM; 658 mem->alg = alg; 659 mem->wmem = 1; /* wait for the memory operation */ 660 mem->addr = iova; 661 662 *offset += sizeof(*mem); 663 } 664 665 /* Add SQE header subdescriptor structure */ 666 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 667 struct nix_sqe_hdr_s *sqe_hdr, 668 struct sk_buff *skb, u16 qidx) 669 { 670 int proto = 0; 671 672 /* Check if SQE was framed before, if yes then no need to 673 * set these constants again and again. 674 */ 675 if (!sqe_hdr->total) { 676 /* Don't free Tx buffers to Aura */ 677 sqe_hdr->df = 1; 678 sqe_hdr->aura = sq->aura_id; 679 /* Post a CQE Tx after pkt transmission */ 680 sqe_hdr->pnc = 1; 681 sqe_hdr->sq = qidx; 682 } 683 sqe_hdr->total = skb->len; 684 /* Set SQE identifier which will be used later for freeing SKB */ 685 sqe_hdr->sqe_id = sq->head; 686 687 /* Offload TCP/UDP checksum to HW */ 688 if (skb->ip_summed == CHECKSUM_PARTIAL) { 689 sqe_hdr->ol3ptr = skb_network_offset(skb); 690 sqe_hdr->ol4ptr = skb_transport_offset(skb); 691 /* get vlan protocol Ethertype */ 692 if (eth_type_vlan(skb->protocol)) 693 skb->protocol = vlan_get_protocol(skb); 694 695 if (skb->protocol == htons(ETH_P_IP)) { 696 proto = ip_hdr(skb)->protocol; 697 /* In case of TSO, HW needs this to be explicitly set. 698 * So set this always, instead of adding a check. 699 */ 700 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; 701 } else if (skb->protocol == htons(ETH_P_IPV6)) { 702 proto = ipv6_hdr(skb)->nexthdr; 703 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6; 704 } 705 706 if (proto == IPPROTO_TCP) 707 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; 708 else if (proto == IPPROTO_UDP) 709 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; 710 } 711 } 712 713 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, 714 struct otx2_snd_queue *sq, 715 struct sk_buff *skb, int sqe, int hdr_len) 716 { 717 int num_segs = skb_shinfo(skb)->nr_frags + 1; 718 struct sg_list *sg = &sq->sg[sqe]; 719 u64 dma_addr; 720 int seg, len; 721 722 sg->num_segs = 0; 723 724 /* Get payload length at skb->data */ 725 len = skb_headlen(skb) - hdr_len; 726 727 for (seg = 0; seg < num_segs; seg++) { 728 /* Skip skb->data, if there is no payload */ 729 if (!seg && !len) 730 continue; 731 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); 732 if (dma_mapping_error(pfvf->dev, dma_addr)) 733 goto unmap; 734 735 /* Save DMA mapping info for later unmapping */ 736 sg->dma_addr[sg->num_segs] = dma_addr; 737 sg->size[sg->num_segs] = len; 738 sg->num_segs++; 739 } 740 return 0; 741 unmap: 742 otx2_dma_unmap_skb_frags(pfvf, sg); 743 return -EINVAL; 744 } 745 746 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, 747 struct sk_buff *skb, int seg, 748 u64 seg_addr, int hdr_len, int sqe) 749 { 750 struct sg_list *sg = &sq->sg[sqe]; 751 const skb_frag_t *frag; 752 int offset; 753 754 if (seg < 0) 755 return sg->dma_addr[0] + (seg_addr - (u64)skb->data); 756 757 frag = &skb_shinfo(skb)->frags[seg]; 758 offset = seg_addr - (u64)skb_frag_address(frag); 759 if (skb_headlen(skb) - hdr_len) 760 seg++; 761 return sg->dma_addr[seg] + offset; 762 } 763 764 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq, 765 struct sg_list *list, int *offset) 766 { 767 struct nix_sqe_sg_s *sg = NULL; 768 u16 *sg_lens = NULL; 769 u64 *iova = NULL; 770 int seg; 771 772 /* Add SG descriptors with buffer addresses */ 773 for (seg = 0; seg < list->num_segs; seg++) { 774 if ((seg % MAX_SEGS_PER_SG) == 0) { 775 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 776 sg->ld_type = NIX_SEND_LDTYPE_LDD; 777 sg->subdc = NIX_SUBDC_SG; 778 sg->segs = 0; 779 sg_lens = (void *)sg; 780 iova = (void *)sg + sizeof(*sg); 781 /* Next subdc always starts at a 16byte boundary. 782 * So if sg->segs is whether 2 or 3, offset += 16bytes. 783 */ 784 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) 785 *offset += sizeof(*sg) + (3 * sizeof(u64)); 786 else 787 *offset += sizeof(*sg) + sizeof(u64); 788 } 789 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; 790 *iova++ = list->dma_addr[seg]; 791 sg->segs++; 792 } 793 } 794 795 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, 796 struct sk_buff *skb, u16 qidx) 797 { 798 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); 799 int hdr_len, tcp_data, seg_len, pkt_len, offset; 800 struct nix_sqe_hdr_s *sqe_hdr; 801 int first_sqe = sq->head; 802 struct sg_list list; 803 struct tso_t tso; 804 805 hdr_len = tso_start(skb, &tso); 806 807 /* Map SKB's fragments to DMA. 808 * It's done here to avoid mapping for every TSO segment's packet. 809 */ 810 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { 811 dev_kfree_skb_any(skb); 812 return; 813 } 814 815 netdev_tx_sent_queue(txq, skb->len); 816 817 tcp_data = skb->len - hdr_len; 818 while (tcp_data > 0) { 819 char *hdr; 820 821 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); 822 tcp_data -= seg_len; 823 824 /* Set SQE's SEND_HDR */ 825 memset(sq->sqe_base, 0, sq->sqe_size); 826 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 827 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); 828 offset = sizeof(*sqe_hdr); 829 830 /* Add TSO segment's pkt header */ 831 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); 832 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0); 833 list.dma_addr[0] = 834 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); 835 list.size[0] = hdr_len; 836 list.num_segs = 1; 837 838 /* Add TSO segment's payload data fragments */ 839 pkt_len = hdr_len; 840 while (seg_len > 0) { 841 int size; 842 843 size = min_t(int, tso.size, seg_len); 844 845 list.size[list.num_segs] = size; 846 list.dma_addr[list.num_segs] = 847 otx2_tso_frag_dma_addr(sq, skb, 848 tso.next_frag_idx - 1, 849 (u64)tso.data, hdr_len, 850 first_sqe); 851 list.num_segs++; 852 pkt_len += size; 853 seg_len -= size; 854 tso_build_data(skb, &tso, size); 855 } 856 sqe_hdr->total = pkt_len; 857 otx2_sqe_tso_add_sg(sq, &list, &offset); 858 859 /* DMA mappings and skb needs to be freed only after last 860 * TSO segment is transmitted out. So set 'PNC' only for 861 * last segment. Also point last segment's sqe_id to first 862 * segment's SQE index where skb address and DMA mappings 863 * are saved. 864 */ 865 if (!tcp_data) { 866 sqe_hdr->pnc = 1; 867 sqe_hdr->sqe_id = first_sqe; 868 sq->sg[first_sqe].skb = (u64)skb; 869 } else { 870 sqe_hdr->pnc = 0; 871 } 872 873 sqe_hdr->sizem1 = (offset / 16) - 1; 874 875 /* Flush SQE to HW */ 876 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 877 } 878 } 879 880 static bool is_hw_tso_supported(struct otx2_nic *pfvf, 881 struct sk_buff *skb) 882 { 883 int payload_len, last_seg_size; 884 885 if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) 886 return true; 887 888 /* On 96xx A0, HW TSO not supported */ 889 if (!is_96xx_B0(pfvf->pdev)) 890 return false; 891 892 /* HW has an issue due to which when the payload of the last LSO 893 * segment is shorter than 16 bytes, some header fields may not 894 * be correctly modified, hence don't offload such TSO segments. 895 */ 896 897 payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); 898 last_seg_size = payload_len % skb_shinfo(skb)->gso_size; 899 if (last_seg_size && last_seg_size < 16) 900 return false; 901 902 return true; 903 } 904 905 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) 906 { 907 if (!skb_shinfo(skb)->gso_size) 908 return 1; 909 910 /* HW TSO */ 911 if (is_hw_tso_supported(pfvf, skb)) 912 return 1; 913 914 /* SW TSO */ 915 return skb_shinfo(skb)->gso_segs; 916 } 917 918 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, 919 struct otx2_snd_queue *sq, int *offset) 920 { 921 u64 iova; 922 923 if (!skb_shinfo(skb)->gso_size && 924 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 925 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 926 iova = sq->timestamps->iova + (sq->head * sizeof(u64)); 927 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova); 928 } else { 929 skb_tx_timestamp(skb); 930 } 931 } 932 933 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, 934 struct sk_buff *skb, u16 qidx) 935 { 936 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); 937 struct otx2_nic *pfvf = netdev_priv(netdev); 938 int offset, num_segs, free_sqe; 939 struct nix_sqe_hdr_s *sqe_hdr; 940 941 /* Check if there is room for new SQE. 942 * 'Num of SQBs freed to SQ's pool - SQ's Aura count' 943 * will give free SQE count. 944 */ 945 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; 946 947 if (free_sqe < sq->sqe_thresh || 948 free_sqe < otx2_get_sqe_count(pfvf, skb)) 949 return false; 950 951 num_segs = skb_shinfo(skb)->nr_frags + 1; 952 953 /* If SKB doesn't fit in a single SQE, linearize it. 954 * TODO: Consider adding JUMP descriptor instead. 955 */ 956 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { 957 if (__skb_linearize(skb)) { 958 dev_kfree_skb_any(skb); 959 return true; 960 } 961 num_segs = skb_shinfo(skb)->nr_frags + 1; 962 } 963 964 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { 965 /* Insert vlan tag before giving pkt to tso */ 966 if (skb_vlan_tag_present(skb)) 967 skb = __vlan_hwaccel_push_inside(skb); 968 otx2_sq_append_tso(pfvf, sq, skb, qidx); 969 return true; 970 } 971 972 /* Set SQE's SEND_HDR. 973 * Do not clear the first 64bit as it contains constant info. 974 */ 975 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); 976 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 977 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); 978 offset = sizeof(*sqe_hdr); 979 980 /* Add extended header if needed */ 981 otx2_sqe_add_ext(pfvf, sq, skb, &offset); 982 983 /* Add SG subdesc with data frags */ 984 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { 985 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); 986 return false; 987 } 988 989 otx2_set_txtstamp(pfvf, skb, sq, &offset); 990 991 sqe_hdr->sizem1 = (offset / 16) - 1; 992 993 netdev_tx_sent_queue(txq, skb->len); 994 995 /* Flush SQE to HW */ 996 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 997 998 return true; 999 } 1000 EXPORT_SYMBOL(otx2_sq_append_skb); 1001 1002 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) 1003 { 1004 struct nix_cqe_rx_s *cqe; 1005 int processed_cqe = 0; 1006 u64 iova, pa; 1007 1008 if (pfvf->xdp_prog) 1009 xdp_rxq_info_unreg(&cq->xdp_rxq); 1010 1011 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 1012 return; 1013 1014 while (cq->pend_cqe) { 1015 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); 1016 processed_cqe++; 1017 cq->pend_cqe--; 1018 1019 if (!cqe) 1020 continue; 1021 if (cqe->sg.segs > 1) { 1022 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); 1023 continue; 1024 } 1025 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; 1026 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1027 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); 1028 put_page(virt_to_page(phys_to_virt(pa))); 1029 } 1030 1031 /* Free CQEs to HW */ 1032 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 1033 ((u64)cq->cq_idx << 32) | processed_cqe); 1034 } 1035 1036 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) 1037 { 1038 struct sk_buff *skb = NULL; 1039 struct otx2_snd_queue *sq; 1040 struct nix_cqe_tx_s *cqe; 1041 int processed_cqe = 0; 1042 struct sg_list *sg; 1043 1044 sq = &pfvf->qset.sq[cq->cint_idx]; 1045 1046 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) 1047 return; 1048 1049 while (cq->pend_cqe) { 1050 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); 1051 processed_cqe++; 1052 cq->pend_cqe--; 1053 1054 if (!cqe) 1055 continue; 1056 sg = &sq->sg[cqe->comp.sqe_id]; 1057 skb = (struct sk_buff *)sg->skb; 1058 if (skb) { 1059 otx2_dma_unmap_skb_frags(pfvf, sg); 1060 dev_kfree_skb_any(skb); 1061 sg->skb = (u64)NULL; 1062 } 1063 } 1064 1065 /* Free CQEs to HW */ 1066 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, 1067 ((u64)cq->cq_idx << 32) | processed_cqe); 1068 } 1069 1070 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) 1071 { 1072 struct msg_req *msg; 1073 int err; 1074 1075 mutex_lock(&pfvf->mbox.lock); 1076 if (enable) 1077 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); 1078 else 1079 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); 1080 1081 if (!msg) { 1082 mutex_unlock(&pfvf->mbox.lock); 1083 return -ENOMEM; 1084 } 1085 1086 err = otx2_sync_mbox_msg(&pfvf->mbox); 1087 mutex_unlock(&pfvf->mbox.lock); 1088 return err; 1089 } 1090 1091 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, 1092 int len, int *offset) 1093 { 1094 struct nix_sqe_sg_s *sg = NULL; 1095 u64 *iova = NULL; 1096 1097 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); 1098 sg->ld_type = NIX_SEND_LDTYPE_LDD; 1099 sg->subdc = NIX_SUBDC_SG; 1100 sg->segs = 1; 1101 sg->seg1_size = len; 1102 iova = (void *)sg + sizeof(*sg); 1103 *iova = dma_addr; 1104 *offset += sizeof(*sg) + sizeof(u64); 1105 1106 sq->sg[sq->head].dma_addr[0] = dma_addr; 1107 sq->sg[sq->head].size[0] = len; 1108 sq->sg[sq->head].num_segs = 1; 1109 } 1110 1111 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) 1112 { 1113 struct nix_sqe_hdr_s *sqe_hdr; 1114 struct otx2_snd_queue *sq; 1115 int offset, free_sqe; 1116 1117 sq = &pfvf->qset.sq[qidx]; 1118 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; 1119 if (free_sqe < sq->sqe_thresh) 1120 return false; 1121 1122 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); 1123 1124 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); 1125 1126 if (!sqe_hdr->total) { 1127 sqe_hdr->aura = sq->aura_id; 1128 sqe_hdr->df = 1; 1129 sqe_hdr->sq = qidx; 1130 sqe_hdr->pnc = 1; 1131 } 1132 sqe_hdr->total = len; 1133 sqe_hdr->sqe_id = sq->head; 1134 1135 offset = sizeof(*sqe_hdr); 1136 1137 otx2_xdp_sqe_add_sg(sq, iova, len, &offset); 1138 sqe_hdr->sizem1 = (offset / 16) - 1; 1139 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 1140 1141 return true; 1142 } 1143 1144 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, 1145 struct bpf_prog *prog, 1146 struct nix_cqe_rx_s *cqe, 1147 struct otx2_cq_queue *cq) 1148 { 1149 unsigned char *hard_start, *data; 1150 int qidx = cq->cq_idx; 1151 struct xdp_buff xdp; 1152 struct page *page; 1153 u64 iova, pa; 1154 u32 act; 1155 int err; 1156 1157 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; 1158 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); 1159 page = virt_to_page(phys_to_virt(pa)); 1160 1161 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); 1162 1163 data = (unsigned char *)phys_to_virt(pa); 1164 hard_start = page_address(page); 1165 xdp_prepare_buff(&xdp, hard_start, data - hard_start, 1166 cqe->sg.seg_size, false); 1167 1168 act = bpf_prog_run_xdp(prog, &xdp); 1169 1170 switch (act) { 1171 case XDP_PASS: 1172 break; 1173 case XDP_TX: 1174 qidx += pfvf->hw.tx_queues; 1175 cq->pool_ptrs++; 1176 return otx2_xdp_sq_append_pkt(pfvf, iova, 1177 cqe->sg.seg_size, qidx); 1178 case XDP_REDIRECT: 1179 cq->pool_ptrs++; 1180 err = xdp_do_redirect(pfvf->netdev, &xdp, prog); 1181 1182 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, 1183 DMA_FROM_DEVICE); 1184 if (!err) 1185 return true; 1186 put_page(page); 1187 break; 1188 default: 1189 bpf_warn_invalid_xdp_action(act); 1190 break; 1191 case XDP_ABORTED: 1192 trace_xdp_exception(pfvf->netdev, prog, act); 1193 break; 1194 case XDP_DROP: 1195 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, 1196 DMA_FROM_DEVICE); 1197 put_page(page); 1198 cq->pool_ptrs++; 1199 return true; 1200 } 1201 return false; 1202 } 1203