1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <linux/iommu.h> 14 #include <net/ip.h> 15 #include <net/tso.h> 16 17 #include "nic_reg.h" 18 #include "nic.h" 19 #include "q_struct.h" 20 #include "nicvf_queues.h" 21 22 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 23 int size, u64 data); 24 static void nicvf_get_page(struct nicvf *nic) 25 { 26 if (!nic->rb_pageref || !nic->rb_page) 27 return; 28 29 page_ref_add(nic->rb_page, nic->rb_pageref); 30 nic->rb_pageref = 0; 31 } 32 33 /* Poll a register for a specific value */ 34 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 35 u64 reg, int bit_pos, int bits, int val) 36 { 37 u64 bit_mask; 38 u64 reg_val; 39 int timeout = 10; 40 41 bit_mask = (1ULL << bits) - 1; 42 bit_mask = (bit_mask << bit_pos); 43 44 while (timeout) { 45 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 46 if (((reg_val & bit_mask) >> bit_pos) == val) 47 return 0; 48 usleep_range(1000, 2000); 49 timeout--; 50 } 51 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 52 return 1; 53 } 54 55 /* Allocate memory for a queue's descriptors */ 56 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 57 int q_len, int desc_size, int align_bytes) 58 { 59 dmem->q_len = q_len; 60 dmem->size = (desc_size * q_len) + align_bytes; 61 /* Save address, need it while freeing */ 62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 63 &dmem->dma, GFP_KERNEL); 64 if (!dmem->unalign_base) 65 return -ENOMEM; 66 67 /* Align memory address for 'align_bytes' */ 68 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 69 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 70 return 0; 71 } 72 73 /* Free queue's descriptor memory */ 74 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 75 { 76 if (!dmem) 77 return; 78 79 dma_free_coherent(&nic->pdev->dev, dmem->size, 80 dmem->unalign_base, dmem->dma); 81 dmem->unalign_base = NULL; 82 dmem->base = NULL; 83 } 84 85 #define XDP_PAGE_REFCNT_REFILL 256 86 87 /* Allocate a new page or recycle one if possible 88 * 89 * We cannot optimize dma mapping here, since 90 * 1. It's only one RBDR ring for 8 Rx queues. 91 * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed 92 * and not idx into RBDR ring, so can't refer to saved info. 93 * 3. There are multiple receive buffers per page 94 */ 95 static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, 96 struct rbdr *rbdr, gfp_t gfp) 97 { 98 int ref_count; 99 struct page *page = NULL; 100 struct pgcache *pgcache, *next; 101 102 /* Check if page is already allocated */ 103 pgcache = &rbdr->pgcache[rbdr->pgidx]; 104 page = pgcache->page; 105 /* Check if page can be recycled */ 106 if (page) { 107 ref_count = page_ref_count(page); 108 /* Check if this page has been used once i.e 'put_page' 109 * called after packet transmission i.e internal ref_count 110 * and page's ref_count are equal i.e page can be recycled. 111 */ 112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) 113 pgcache->ref_count--; 114 else 115 page = NULL; 116 117 /* In non-XDP mode, page's ref_count needs to be '1' for it 118 * to be recycled. 119 */ 120 if (!rbdr->is_xdp && (ref_count != 1)) 121 page = NULL; 122 } 123 124 if (!page) { 125 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0); 126 if (!page) 127 return NULL; 128 129 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc); 130 131 /* Check for space */ 132 if (rbdr->pgalloc >= rbdr->pgcnt) { 133 /* Page can still be used */ 134 nic->rb_page = page; 135 return NULL; 136 } 137 138 /* Save the page in page cache */ 139 pgcache->page = page; 140 pgcache->dma_addr = 0; 141 pgcache->ref_count = 0; 142 rbdr->pgalloc++; 143 } 144 145 /* Take additional page references for recycling */ 146 if (rbdr->is_xdp) { 147 /* Since there is single RBDR (i.e single core doing 148 * page recycling) per 8 Rx queues, in XDP mode adjusting 149 * page references atomically is the biggest bottleneck, so 150 * take bunch of references at a time. 151 * 152 * So here, below reference counts defer by '1'. 153 */ 154 if (!pgcache->ref_count) { 155 pgcache->ref_count = XDP_PAGE_REFCNT_REFILL; 156 page_ref_add(page, XDP_PAGE_REFCNT_REFILL); 157 } 158 } else { 159 /* In non-XDP case, single 64K page is divided across multiple 160 * receive buffers, so cost of recycling is less anyway. 161 * So we can do with just one extra reference. 162 */ 163 page_ref_add(page, 1); 164 } 165 166 rbdr->pgidx++; 167 rbdr->pgidx &= (rbdr->pgcnt - 1); 168 169 /* Prefetch refcount of next page in page cache */ 170 next = &rbdr->pgcache[rbdr->pgidx]; 171 page = next->page; 172 if (page) 173 prefetch(&page->_refcount); 174 175 return pgcache; 176 } 177 178 /* Allocate buffer for packet reception */ 179 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, 180 gfp_t gfp, u32 buf_len, u64 *rbuf) 181 { 182 struct pgcache *pgcache = NULL; 183 184 /* Check if request can be accomodated in previous allocated page. 185 * But in XDP mode only one buffer per page is permitted. 186 */ 187 if (!rbdr->is_xdp && nic->rb_page && 188 ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) { 189 nic->rb_pageref++; 190 goto ret; 191 } 192 193 nicvf_get_page(nic); 194 nic->rb_page = NULL; 195 196 /* Get new page, either recycled or new one */ 197 pgcache = nicvf_alloc_page(nic, rbdr, gfp); 198 if (!pgcache && !nic->rb_page) { 199 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); 200 return -ENOMEM; 201 } 202 203 nic->rb_page_offset = 0; 204 205 /* Reserve space for header modifications by BPF program */ 206 if (rbdr->is_xdp) 207 buf_len += XDP_HEADROOM; 208 209 /* Check if it's recycled */ 210 if (pgcache) 211 nic->rb_page = pgcache->page; 212 ret: 213 if (rbdr->is_xdp && pgcache && pgcache->dma_addr) { 214 *rbuf = pgcache->dma_addr; 215 } else { 216 /* HW will ensure data coherency, CPU sync not required */ 217 *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, 218 nic->rb_page_offset, buf_len, 219 DMA_FROM_DEVICE, 220 DMA_ATTR_SKIP_CPU_SYNC); 221 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { 222 if (!nic->rb_page_offset) 223 __free_pages(nic->rb_page, 0); 224 nic->rb_page = NULL; 225 return -ENOMEM; 226 } 227 228 if (pgcache) 229 pgcache->dma_addr = *rbuf + XDP_HEADROOM; 230 nic->rb_page_offset += buf_len; 231 } 232 233 return 0; 234 } 235 236 /* Build skb around receive buffer */ 237 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 238 u64 rb_ptr, int len) 239 { 240 void *data; 241 struct sk_buff *skb; 242 243 data = phys_to_virt(rb_ptr); 244 245 /* Now build an skb to give to stack */ 246 skb = build_skb(data, RCV_FRAG_LEN); 247 if (!skb) { 248 put_page(virt_to_page(data)); 249 return NULL; 250 } 251 252 prefetch(skb->data); 253 return skb; 254 } 255 256 /* Allocate RBDR ring and populate receive buffers */ 257 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 258 int ring_len, int buf_size) 259 { 260 int idx; 261 u64 rbuf; 262 struct rbdr_entry_t *desc; 263 int err; 264 265 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 266 sizeof(struct rbdr_entry_t), 267 NICVF_RCV_BUF_ALIGN_BYTES); 268 if (err) 269 return err; 270 271 rbdr->desc = rbdr->dmem.base; 272 /* Buffer size has to be in multiples of 128 bytes */ 273 rbdr->dma_size = buf_size; 274 rbdr->enable = true; 275 rbdr->thresh = RBDR_THRESH; 276 rbdr->head = 0; 277 rbdr->tail = 0; 278 279 /* Initialize page recycling stuff. 280 * 281 * Can't use single buffer per page especially with 64K pages. 282 * On embedded platforms i.e 81xx/83xx available memory itself 283 * is low and minimum ring size of RBDR is 8K, that takes away 284 * lots of memory. 285 * 286 * But for XDP it has to be a single buffer per page. 287 */ 288 if (!nic->pnicvf->xdp_prog) { 289 rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size); 290 rbdr->is_xdp = false; 291 } else { 292 rbdr->pgcnt = ring_len; 293 rbdr->is_xdp = true; 294 } 295 rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt); 296 rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) * 297 rbdr->pgcnt, GFP_KERNEL); 298 if (!rbdr->pgcache) 299 return -ENOMEM; 300 rbdr->pgidx = 0; 301 rbdr->pgalloc = 0; 302 303 nic->rb_page = NULL; 304 for (idx = 0; idx < ring_len; idx++) { 305 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL, 306 RCV_FRAG_LEN, &rbuf); 307 if (err) { 308 /* To free already allocated and mapped ones */ 309 rbdr->tail = idx - 1; 310 return err; 311 } 312 313 desc = GET_RBDR_DESC(rbdr, idx); 314 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); 315 } 316 317 nicvf_get_page(nic); 318 319 return 0; 320 } 321 322 /* Free RBDR ring and its receive buffers */ 323 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 324 { 325 int head, tail; 326 u64 buf_addr, phys_addr; 327 struct pgcache *pgcache; 328 struct rbdr_entry_t *desc; 329 330 if (!rbdr) 331 return; 332 333 rbdr->enable = false; 334 if (!rbdr->dmem.base) 335 return; 336 337 head = rbdr->head; 338 tail = rbdr->tail; 339 340 /* Release page references */ 341 while (head != tail) { 342 desc = GET_RBDR_DESC(rbdr, head); 343 buf_addr = desc->buf_addr; 344 phys_addr = nicvf_iova_to_phys(nic, buf_addr); 345 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, 346 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 347 if (phys_addr) 348 put_page(virt_to_page(phys_to_virt(phys_addr))); 349 head++; 350 head &= (rbdr->dmem.q_len - 1); 351 } 352 /* Release buffer of tail desc */ 353 desc = GET_RBDR_DESC(rbdr, tail); 354 buf_addr = desc->buf_addr; 355 phys_addr = nicvf_iova_to_phys(nic, buf_addr); 356 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, 357 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 358 if (phys_addr) 359 put_page(virt_to_page(phys_to_virt(phys_addr))); 360 361 /* Sync page cache info */ 362 smp_rmb(); 363 364 /* Release additional page references held for recycling */ 365 head = 0; 366 while (head < rbdr->pgcnt) { 367 pgcache = &rbdr->pgcache[head]; 368 if (pgcache->page && page_ref_count(pgcache->page) != 0) { 369 if (!rbdr->is_xdp) { 370 put_page(pgcache->page); 371 continue; 372 } 373 page_ref_sub(pgcache->page, pgcache->ref_count - 1); 374 put_page(pgcache->page); 375 } 376 head++; 377 } 378 379 /* Free RBDR ring */ 380 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 381 } 382 383 /* Refill receive buffer descriptors with new buffers. 384 */ 385 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 386 { 387 struct queue_set *qs = nic->qs; 388 int rbdr_idx = qs->rbdr_cnt; 389 int tail, qcount; 390 int refill_rb_cnt; 391 struct rbdr *rbdr; 392 struct rbdr_entry_t *desc; 393 u64 rbuf; 394 int new_rb = 0; 395 396 refill: 397 if (!rbdr_idx) 398 return; 399 rbdr_idx--; 400 rbdr = &qs->rbdr[rbdr_idx]; 401 /* Check if it's enabled */ 402 if (!rbdr->enable) 403 goto next_rbdr; 404 405 /* Get no of desc's to be refilled */ 406 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 407 qcount &= 0x7FFFF; 408 /* Doorbell can be ringed with a max of ring size minus 1 */ 409 if (qcount >= (qs->rbdr_len - 1)) 410 goto next_rbdr; 411 else 412 refill_rb_cnt = qs->rbdr_len - qcount - 1; 413 414 /* Sync page cache info */ 415 smp_rmb(); 416 417 /* Start filling descs from tail */ 418 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 419 while (refill_rb_cnt) { 420 tail++; 421 tail &= (rbdr->dmem.q_len - 1); 422 423 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf)) 424 break; 425 426 desc = GET_RBDR_DESC(rbdr, tail); 427 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1); 428 refill_rb_cnt--; 429 new_rb++; 430 } 431 432 nicvf_get_page(nic); 433 434 /* make sure all memory stores are done before ringing doorbell */ 435 smp_wmb(); 436 437 /* Check if buffer allocation failed */ 438 if (refill_rb_cnt) 439 nic->rb_alloc_fail = true; 440 else 441 nic->rb_alloc_fail = false; 442 443 /* Notify HW */ 444 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 445 rbdr_idx, new_rb); 446 next_rbdr: 447 /* Re-enable RBDR interrupts only if buffer allocation is success */ 448 if (!nic->rb_alloc_fail && rbdr->enable && 449 netif_running(nic->pnicvf->netdev)) 450 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 451 452 if (rbdr_idx) 453 goto refill; 454 } 455 456 /* Alloc rcv buffers in non-atomic mode for better success */ 457 void nicvf_rbdr_work(struct work_struct *work) 458 { 459 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 460 461 nicvf_refill_rbdr(nic, GFP_KERNEL); 462 if (nic->rb_alloc_fail) 463 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 464 else 465 nic->rb_work_scheduled = false; 466 } 467 468 /* In Softirq context, alloc rcv buffers in atomic mode */ 469 void nicvf_rbdr_task(unsigned long data) 470 { 471 struct nicvf *nic = (struct nicvf *)data; 472 473 nicvf_refill_rbdr(nic, GFP_ATOMIC); 474 if (nic->rb_alloc_fail) { 475 nic->rb_work_scheduled = true; 476 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 477 } 478 } 479 480 /* Initialize completion queue */ 481 static int nicvf_init_cmp_queue(struct nicvf *nic, 482 struct cmp_queue *cq, int q_len) 483 { 484 int err; 485 486 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 487 NICVF_CQ_BASE_ALIGN_BYTES); 488 if (err) 489 return err; 490 491 cq->desc = cq->dmem.base; 492 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; 493 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 494 495 return 0; 496 } 497 498 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 499 { 500 if (!cq) 501 return; 502 if (!cq->dmem.base) 503 return; 504 505 nicvf_free_q_desc_mem(nic, &cq->dmem); 506 } 507 508 /* Initialize transmit queue */ 509 static int nicvf_init_snd_queue(struct nicvf *nic, 510 struct snd_queue *sq, int q_len, int qidx) 511 { 512 int err; 513 514 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 515 NICVF_SQ_BASE_ALIGN_BYTES); 516 if (err) 517 return err; 518 519 sq->desc = sq->dmem.base; 520 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 521 if (!sq->skbuff) 522 return -ENOMEM; 523 524 sq->head = 0; 525 sq->tail = 0; 526 sq->thresh = SND_QUEUE_THRESH; 527 528 /* Check if this SQ is a XDP TX queue */ 529 if (nic->sqs_mode) 530 qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS); 531 if (qidx < nic->pnicvf->xdp_tx_queues) { 532 /* Alloc memory to save page pointers for XDP_TX */ 533 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 534 if (!sq->xdp_page) 535 return -ENOMEM; 536 sq->xdp_desc_cnt = 0; 537 sq->xdp_free_cnt = q_len - 1; 538 sq->is_xdp = true; 539 } else { 540 sq->xdp_page = NULL; 541 sq->xdp_desc_cnt = 0; 542 sq->xdp_free_cnt = 0; 543 sq->is_xdp = false; 544 545 atomic_set(&sq->free_cnt, q_len - 1); 546 547 /* Preallocate memory for TSO segment's header */ 548 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 549 q_len * TSO_HEADER_SIZE, 550 &sq->tso_hdrs_phys, 551 GFP_KERNEL); 552 if (!sq->tso_hdrs) 553 return -ENOMEM; 554 } 555 556 return 0; 557 } 558 559 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, 560 int hdr_sqe, u8 subdesc_cnt) 561 { 562 u8 idx; 563 struct sq_gather_subdesc *gather; 564 565 /* Unmap DMA mapped skb data buffers */ 566 for (idx = 0; idx < subdesc_cnt; idx++) { 567 hdr_sqe++; 568 hdr_sqe &= (sq->dmem.q_len - 1); 569 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); 570 /* HW will ensure data coherency, CPU sync not required */ 571 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr, 572 gather->size, DMA_TO_DEVICE, 573 DMA_ATTR_SKIP_CPU_SYNC); 574 } 575 } 576 577 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 578 { 579 struct sk_buff *skb; 580 struct page *page; 581 struct sq_hdr_subdesc *hdr; 582 struct sq_hdr_subdesc *tso_sqe; 583 584 if (!sq) 585 return; 586 if (!sq->dmem.base) 587 return; 588 589 if (sq->tso_hdrs) 590 dma_free_coherent(&nic->pdev->dev, 591 sq->dmem.q_len * TSO_HEADER_SIZE, 592 sq->tso_hdrs, sq->tso_hdrs_phys); 593 594 /* Free pending skbs in the queue */ 595 smp_rmb(); 596 while (sq->head != sq->tail) { 597 skb = (struct sk_buff *)sq->skbuff[sq->head]; 598 if (!skb || !sq->xdp_page) 599 goto next; 600 601 page = (struct page *)sq->xdp_page[sq->head]; 602 if (!page) 603 goto next; 604 else 605 put_page(page); 606 607 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 608 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 609 if (hdr->dont_send) { 610 /* Get actual TSO descriptors and unmap them */ 611 tso_sqe = 612 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); 613 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, 614 tso_sqe->subdesc_cnt); 615 } else { 616 nicvf_unmap_sndq_buffers(nic, sq, sq->head, 617 hdr->subdesc_cnt); 618 } 619 if (skb) 620 dev_kfree_skb_any(skb); 621 next: 622 sq->head++; 623 sq->head &= (sq->dmem.q_len - 1); 624 } 625 kfree(sq->skbuff); 626 kfree(sq->xdp_page); 627 nicvf_free_q_desc_mem(nic, &sq->dmem); 628 } 629 630 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 631 struct queue_set *qs, int qidx) 632 { 633 /* Disable send queue */ 634 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 635 /* Check if SQ is stopped */ 636 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 637 return; 638 /* Reset send queue */ 639 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 640 } 641 642 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 643 struct queue_set *qs, int qidx) 644 { 645 union nic_mbx mbx = {}; 646 647 /* Make sure all packets in the pipeline are written back into mem */ 648 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 649 nicvf_send_msg_to_pf(nic, &mbx); 650 } 651 652 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 653 struct queue_set *qs, int qidx) 654 { 655 /* Disable timer threshold (doesn't get reset upon CQ reset */ 656 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 657 /* Disable completion queue */ 658 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 659 /* Reset completion queue */ 660 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 661 } 662 663 static void nicvf_reclaim_rbdr(struct nicvf *nic, 664 struct rbdr *rbdr, int qidx) 665 { 666 u64 tmp, fifo_state; 667 int timeout = 10; 668 669 /* Save head and tail pointers for feeing up buffers */ 670 rbdr->head = nicvf_queue_reg_read(nic, 671 NIC_QSET_RBDR_0_1_HEAD, 672 qidx) >> 3; 673 rbdr->tail = nicvf_queue_reg_read(nic, 674 NIC_QSET_RBDR_0_1_TAIL, 675 qidx) >> 3; 676 677 /* If RBDR FIFO is in 'FAIL' state then do a reset first 678 * before relaiming. 679 */ 680 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 681 if (((fifo_state >> 62) & 0x03) == 0x3) 682 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 683 qidx, NICVF_RBDR_RESET); 684 685 /* Disable RBDR */ 686 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 687 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 688 return; 689 while (1) { 690 tmp = nicvf_queue_reg_read(nic, 691 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 692 qidx); 693 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 694 break; 695 usleep_range(1000, 2000); 696 timeout--; 697 if (!timeout) { 698 netdev_err(nic->netdev, 699 "Failed polling on prefetch status\n"); 700 return; 701 } 702 } 703 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 704 qidx, NICVF_RBDR_RESET); 705 706 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 707 return; 708 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 709 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 710 return; 711 } 712 713 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 714 { 715 u64 rq_cfg; 716 int sqs; 717 718 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 719 720 /* Enable first VLAN stripping */ 721 if (features & NETIF_F_HW_VLAN_CTAG_RX) 722 rq_cfg |= (1ULL << 25); 723 else 724 rq_cfg &= ~(1ULL << 25); 725 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 726 727 /* Configure Secondary Qsets, if any */ 728 for (sqs = 0; sqs < nic->sqs_count; sqs++) 729 if (nic->snicvf[sqs]) 730 nicvf_queue_reg_write(nic->snicvf[sqs], 731 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 732 } 733 734 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) 735 { 736 union nic_mbx mbx = {}; 737 738 /* Reset all RQ/SQ and VF stats */ 739 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 740 mbx.reset_stat.rx_stat_mask = 0x3FFF; 741 mbx.reset_stat.tx_stat_mask = 0x1F; 742 mbx.reset_stat.rq_stat_mask = 0xFFFF; 743 mbx.reset_stat.sq_stat_mask = 0xFFFF; 744 nicvf_send_msg_to_pf(nic, &mbx); 745 } 746 747 /* Configures receive queue */ 748 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 749 int qidx, bool enable) 750 { 751 union nic_mbx mbx = {}; 752 struct rcv_queue *rq; 753 struct rq_cfg rq_cfg; 754 755 rq = &qs->rq[qidx]; 756 rq->enable = enable; 757 758 /* Disable receive queue */ 759 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 760 761 if (!rq->enable) { 762 nicvf_reclaim_rcv_queue(nic, qs, qidx); 763 xdp_rxq_info_unreg(&rq->xdp_rxq); 764 return; 765 } 766 767 rq->cq_qs = qs->vnic_id; 768 rq->cq_idx = qidx; 769 rq->start_rbdr_qs = qs->vnic_id; 770 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 771 rq->cont_rbdr_qs = qs->vnic_id; 772 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 773 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 774 rq->caching = 1; 775 776 /* Driver have no proper error path for failed XDP RX-queue info reg */ 777 WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0); 778 779 /* Send a mailbox msg to PF to config RQ */ 780 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 781 mbx.rq.qs_num = qs->vnic_id; 782 mbx.rq.rq_num = qidx; 783 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 784 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 785 (rq->cont_qs_rbdr_idx << 8) | 786 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 787 nicvf_send_msg_to_pf(nic, &mbx); 788 789 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 790 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | 791 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | 792 (qs->vnic_id << 0); 793 nicvf_send_msg_to_pf(nic, &mbx); 794 795 /* RQ drop config 796 * Enable CQ drop to reserve sufficient CQEs for all tx packets 797 */ 798 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 799 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | 800 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | 801 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); 802 nicvf_send_msg_to_pf(nic, &mbx); 803 804 if (!nic->sqs_mode && (qidx == 0)) { 805 /* Enable checking L3/L4 length and TCP/UDP checksums 806 * Also allow IPv6 pkts with zero UDP checksum. 807 */ 808 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 809 (BIT(24) | BIT(23) | BIT(21) | BIT(20))); 810 nicvf_config_vlan_stripping(nic, nic->netdev->features); 811 } 812 813 /* Enable Receive queue */ 814 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 815 rq_cfg.ena = 1; 816 rq_cfg.tcp_ena = 0; 817 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 818 } 819 820 /* Configures completion queue */ 821 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 822 int qidx, bool enable) 823 { 824 struct cmp_queue *cq; 825 struct cq_cfg cq_cfg; 826 827 cq = &qs->cq[qidx]; 828 cq->enable = enable; 829 830 if (!cq->enable) { 831 nicvf_reclaim_cmp_queue(nic, qs, qidx); 832 return; 833 } 834 835 /* Reset completion queue */ 836 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 837 838 if (!cq->enable) 839 return; 840 841 spin_lock_init(&cq->lock); 842 /* Set completion queue base address */ 843 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 844 qidx, (u64)(cq->dmem.phys_base)); 845 846 /* Enable Completion queue */ 847 memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 848 cq_cfg.ena = 1; 849 cq_cfg.reset = 0; 850 cq_cfg.caching = 0; 851 cq_cfg.qsize = ilog2(qs->cq_len >> 10); 852 cq_cfg.avg_con = 0; 853 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 854 855 /* Set threshold value for interrupt generation */ 856 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 857 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 858 qidx, CMP_QUEUE_TIMER_THRESH); 859 } 860 861 /* Configures transmit queue */ 862 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 863 int qidx, bool enable) 864 { 865 union nic_mbx mbx = {}; 866 struct snd_queue *sq; 867 struct sq_cfg sq_cfg; 868 869 sq = &qs->sq[qidx]; 870 sq->enable = enable; 871 872 if (!sq->enable) { 873 nicvf_reclaim_snd_queue(nic, qs, qidx); 874 return; 875 } 876 877 /* Reset send queue */ 878 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 879 880 sq->cq_qs = qs->vnic_id; 881 sq->cq_idx = qidx; 882 883 /* Send a mailbox msg to PF to config SQ */ 884 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 885 mbx.sq.qs_num = qs->vnic_id; 886 mbx.sq.sq_num = qidx; 887 mbx.sq.sqs_mode = nic->sqs_mode; 888 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 889 nicvf_send_msg_to_pf(nic, &mbx); 890 891 /* Set queue base address */ 892 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 893 qidx, (u64)(sq->dmem.phys_base)); 894 895 /* Enable send queue & set queue size */ 896 memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 897 sq_cfg.ena = 1; 898 sq_cfg.reset = 0; 899 sq_cfg.ldwb = 0; 900 sq_cfg.qsize = ilog2(qs->sq_len >> 10); 901 sq_cfg.tstmp_bgx_intf = 0; 902 /* CQ's level at which HW will stop processing SQEs to avoid 903 * transmitting a pkt with no space in CQ to post CQE_TX. 904 */ 905 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; 906 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 907 908 /* Set threshold value for interrupt generation */ 909 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 910 911 /* Set queue:cpu affinity for better load distribution */ 912 if (cpu_online(qidx)) { 913 cpumask_set_cpu(qidx, &sq->affinity_mask); 914 netif_set_xps_queue(nic->netdev, 915 &sq->affinity_mask, qidx); 916 } 917 } 918 919 /* Configures receive buffer descriptor ring */ 920 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 921 int qidx, bool enable) 922 { 923 struct rbdr *rbdr; 924 struct rbdr_cfg rbdr_cfg; 925 926 rbdr = &qs->rbdr[qidx]; 927 nicvf_reclaim_rbdr(nic, rbdr, qidx); 928 if (!enable) 929 return; 930 931 /* Set descriptor base address */ 932 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 933 qidx, (u64)(rbdr->dmem.phys_base)); 934 935 /* Enable RBDR & set queue size */ 936 /* Buffer size should be in multiples of 128 bytes */ 937 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 938 rbdr_cfg.ena = 1; 939 rbdr_cfg.reset = 0; 940 rbdr_cfg.ldwb = 0; 941 rbdr_cfg.qsize = RBDR_SIZE; 942 rbdr_cfg.avg_con = 0; 943 rbdr_cfg.lines = rbdr->dma_size / 128; 944 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 945 qidx, *(u64 *)&rbdr_cfg); 946 947 /* Notify HW */ 948 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 949 qidx, qs->rbdr_len - 1); 950 951 /* Set threshold value for interrupt generation */ 952 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 953 qidx, rbdr->thresh - 1); 954 } 955 956 /* Requests PF to assign and enable Qset */ 957 void nicvf_qset_config(struct nicvf *nic, bool enable) 958 { 959 union nic_mbx mbx = {}; 960 struct queue_set *qs = nic->qs; 961 struct qs_cfg *qs_cfg; 962 963 if (!qs) { 964 netdev_warn(nic->netdev, 965 "Qset is still not allocated, don't init queues\n"); 966 return; 967 } 968 969 qs->enable = enable; 970 qs->vnic_id = nic->vf_id; 971 972 /* Send a mailbox msg to PF to config Qset */ 973 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 974 mbx.qs.num = qs->vnic_id; 975 mbx.qs.sqs_count = nic->sqs_count; 976 977 mbx.qs.cfg = 0; 978 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 979 if (qs->enable) { 980 qs_cfg->ena = 1; 981 #ifdef __BIG_ENDIAN 982 qs_cfg->be = 1; 983 #endif 984 qs_cfg->vnic = qs->vnic_id; 985 } 986 nicvf_send_msg_to_pf(nic, &mbx); 987 } 988 989 static void nicvf_free_resources(struct nicvf *nic) 990 { 991 int qidx; 992 struct queue_set *qs = nic->qs; 993 994 /* Free receive buffer descriptor ring */ 995 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 996 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 997 998 /* Free completion queue */ 999 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1000 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 1001 1002 /* Free send queue */ 1003 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1004 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 1005 } 1006 1007 static int nicvf_alloc_resources(struct nicvf *nic) 1008 { 1009 int qidx; 1010 struct queue_set *qs = nic->qs; 1011 1012 /* Alloc receive buffer descriptor ring */ 1013 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1014 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 1015 DMA_BUFFER_LEN)) 1016 goto alloc_fail; 1017 } 1018 1019 /* Alloc send queue */ 1020 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 1021 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) 1022 goto alloc_fail; 1023 } 1024 1025 /* Alloc completion queue */ 1026 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1027 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 1028 goto alloc_fail; 1029 } 1030 1031 return 0; 1032 alloc_fail: 1033 nicvf_free_resources(nic); 1034 return -ENOMEM; 1035 } 1036 1037 int nicvf_set_qset_resources(struct nicvf *nic) 1038 { 1039 struct queue_set *qs; 1040 1041 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 1042 if (!qs) 1043 return -ENOMEM; 1044 nic->qs = qs; 1045 1046 /* Set count of each queue */ 1047 qs->rbdr_cnt = DEFAULT_RBDR_CNT; 1048 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); 1049 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); 1050 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); 1051 1052 /* Set queue lengths */ 1053 qs->rbdr_len = RCV_BUF_COUNT; 1054 qs->sq_len = SND_QUEUE_LEN; 1055 qs->cq_len = CMP_QUEUE_LEN; 1056 1057 nic->rx_queues = qs->rq_cnt; 1058 nic->tx_queues = qs->sq_cnt; 1059 nic->xdp_tx_queues = 0; 1060 1061 return 0; 1062 } 1063 1064 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 1065 { 1066 bool disable = false; 1067 struct queue_set *qs = nic->qs; 1068 struct queue_set *pqs = nic->pnicvf->qs; 1069 int qidx; 1070 1071 if (!qs) 1072 return 0; 1073 1074 /* Take primary VF's queue lengths. 1075 * This is needed to take queue lengths set from ethtool 1076 * into consideration. 1077 */ 1078 if (nic->sqs_mode && pqs) { 1079 qs->cq_len = pqs->cq_len; 1080 qs->sq_len = pqs->sq_len; 1081 } 1082 1083 if (enable) { 1084 if (nicvf_alloc_resources(nic)) 1085 return -ENOMEM; 1086 1087 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1088 nicvf_snd_queue_config(nic, qs, qidx, enable); 1089 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1090 nicvf_cmp_queue_config(nic, qs, qidx, enable); 1091 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1092 nicvf_rbdr_config(nic, qs, qidx, enable); 1093 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1094 nicvf_rcv_queue_config(nic, qs, qidx, enable); 1095 } else { 1096 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1097 nicvf_rcv_queue_config(nic, qs, qidx, disable); 1098 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1099 nicvf_rbdr_config(nic, qs, qidx, disable); 1100 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1101 nicvf_snd_queue_config(nic, qs, qidx, disable); 1102 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1103 nicvf_cmp_queue_config(nic, qs, qidx, disable); 1104 1105 nicvf_free_resources(nic); 1106 } 1107 1108 /* Reset RXQ's stats. 1109 * SQ's stats will get reset automatically once SQ is reset. 1110 */ 1111 nicvf_reset_rcv_queue_stats(nic); 1112 1113 return 0; 1114 } 1115 1116 /* Get a free desc from SQ 1117 * returns descriptor ponter & descriptor number 1118 */ 1119 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 1120 { 1121 int qentry; 1122 1123 qentry = sq->tail; 1124 if (!sq->is_xdp) 1125 atomic_sub(desc_cnt, &sq->free_cnt); 1126 else 1127 sq->xdp_free_cnt -= desc_cnt; 1128 sq->tail += desc_cnt; 1129 sq->tail &= (sq->dmem.q_len - 1); 1130 1131 return qentry; 1132 } 1133 1134 /* Rollback to previous tail pointer when descriptors not used */ 1135 static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, 1136 int qentry, int desc_cnt) 1137 { 1138 sq->tail = qentry; 1139 atomic_add(desc_cnt, &sq->free_cnt); 1140 } 1141 1142 /* Free descriptor back to SQ for future use */ 1143 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 1144 { 1145 if (!sq->is_xdp) 1146 atomic_add(desc_cnt, &sq->free_cnt); 1147 else 1148 sq->xdp_free_cnt += desc_cnt; 1149 sq->head += desc_cnt; 1150 sq->head &= (sq->dmem.q_len - 1); 1151 } 1152 1153 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 1154 { 1155 qentry++; 1156 qentry &= (sq->dmem.q_len - 1); 1157 return qentry; 1158 } 1159 1160 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 1161 { 1162 u64 sq_cfg; 1163 1164 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1165 sq_cfg |= NICVF_SQ_EN; 1166 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1167 /* Ring doorbell so that H/W restarts processing SQEs */ 1168 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 1169 } 1170 1171 void nicvf_sq_disable(struct nicvf *nic, int qidx) 1172 { 1173 u64 sq_cfg; 1174 1175 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 1176 sq_cfg &= ~NICVF_SQ_EN; 1177 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 1178 } 1179 1180 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 1181 int qidx) 1182 { 1183 u64 head, tail; 1184 struct sk_buff *skb; 1185 struct nicvf *nic = netdev_priv(netdev); 1186 struct sq_hdr_subdesc *hdr; 1187 1188 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 1189 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 1190 while (sq->head != head) { 1191 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 1192 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 1193 nicvf_put_sq_desc(sq, 1); 1194 continue; 1195 } 1196 skb = (struct sk_buff *)sq->skbuff[sq->head]; 1197 if (skb) 1198 dev_kfree_skb_any(skb); 1199 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 1200 atomic64_add(hdr->tot_len, 1201 (atomic64_t *)&netdev->stats.tx_bytes); 1202 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 1203 } 1204 } 1205 1206 /* XDP Transmit APIs */ 1207 void nicvf_xdp_sq_doorbell(struct nicvf *nic, 1208 struct snd_queue *sq, int sq_num) 1209 { 1210 if (!sq->xdp_desc_cnt) 1211 return; 1212 1213 /* make sure all memory stores are done before ringing doorbell */ 1214 wmb(); 1215 1216 /* Inform HW to xmit all TSO segments */ 1217 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1218 sq_num, sq->xdp_desc_cnt); 1219 sq->xdp_desc_cnt = 0; 1220 } 1221 1222 static inline void 1223 nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 1224 int subdesc_cnt, u64 data, int len) 1225 { 1226 struct sq_hdr_subdesc *hdr; 1227 1228 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1229 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1230 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1231 hdr->subdesc_cnt = subdesc_cnt; 1232 hdr->tot_len = len; 1233 hdr->post_cqe = 1; 1234 sq->xdp_page[qentry] = (u64)virt_to_page((void *)data); 1235 } 1236 1237 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, 1238 u64 bufaddr, u64 dma_addr, u16 len) 1239 { 1240 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 1241 int qentry; 1242 1243 if (subdesc_cnt > sq->xdp_free_cnt) 1244 return -1; 1245 1246 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1247 1248 nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len); 1249 1250 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1251 nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr); 1252 1253 sq->xdp_desc_cnt += subdesc_cnt; 1254 1255 return 0; 1256 } 1257 1258 /* Calculate no of SQ subdescriptors needed to transmit all 1259 * segments of this TSO packet. 1260 * Taken from 'Tilera network driver' with a minor modification. 1261 */ 1262 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 1263 { 1264 struct skb_shared_info *sh = skb_shinfo(skb); 1265 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1266 unsigned int data_len = skb->len - sh_len; 1267 unsigned int p_len = sh->gso_size; 1268 long f_id = -1; /* id of the current fragment */ 1269 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1270 long f_used = 0; /* bytes used from the current fragment */ 1271 long n; /* size of the current piece of payload */ 1272 int num_edescs = 0; 1273 int segment; 1274 1275 for (segment = 0; segment < sh->gso_segs; segment++) { 1276 unsigned int p_used = 0; 1277 1278 /* One edesc for header and for each piece of the payload. */ 1279 for (num_edescs++; p_used < p_len; num_edescs++) { 1280 /* Advance as needed. */ 1281 while (f_used >= f_size) { 1282 f_id++; 1283 f_size = skb_frag_size(&sh->frags[f_id]); 1284 f_used = 0; 1285 } 1286 1287 /* Use bytes from the current fragment. */ 1288 n = p_len - p_used; 1289 if (n > f_size - f_used) 1290 n = f_size - f_used; 1291 f_used += n; 1292 p_used += n; 1293 } 1294 1295 /* The last segment may be less than gso_size. */ 1296 data_len -= p_len; 1297 if (data_len < p_len) 1298 p_len = data_len; 1299 } 1300 1301 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 1302 return num_edescs + sh->gso_segs; 1303 } 1304 1305 #define POST_CQE_DESC_COUNT 2 1306 1307 /* Get the number of SQ descriptors needed to xmit this skb */ 1308 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 1309 { 1310 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 1311 1312 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { 1313 subdesc_cnt = nicvf_tso_count_subdescs(skb); 1314 return subdesc_cnt; 1315 } 1316 1317 /* Dummy descriptors to get TSO pkt completion notification */ 1318 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) 1319 subdesc_cnt += POST_CQE_DESC_COUNT; 1320 1321 if (skb_shinfo(skb)->nr_frags) 1322 subdesc_cnt += skb_shinfo(skb)->nr_frags; 1323 1324 return subdesc_cnt; 1325 } 1326 1327 /* Add SQ HEADER subdescriptor. 1328 * First subdescriptor for every send descriptor. 1329 */ 1330 static inline void 1331 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, 1332 int subdesc_cnt, struct sk_buff *skb, int len) 1333 { 1334 int proto; 1335 struct sq_hdr_subdesc *hdr; 1336 union { 1337 struct iphdr *v4; 1338 struct ipv6hdr *v6; 1339 unsigned char *hdr; 1340 } ip; 1341 1342 ip.hdr = skb_network_header(skb); 1343 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1344 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1345 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1346 1347 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { 1348 /* post_cqe = 0, to avoid HW posting a CQE for every TSO 1349 * segment transmitted on 88xx. 1350 */ 1351 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; 1352 } else { 1353 sq->skbuff[qentry] = (u64)skb; 1354 /* Enable notification via CQE after processing SQE */ 1355 hdr->post_cqe = 1; 1356 /* No of subdescriptors following this */ 1357 hdr->subdesc_cnt = subdesc_cnt; 1358 } 1359 hdr->tot_len = len; 1360 1361 /* Offload checksum calculation to HW */ 1362 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1363 if (ip.v4->version == 4) 1364 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1365 hdr->l3_offset = skb_network_offset(skb); 1366 hdr->l4_offset = skb_transport_offset(skb); 1367 1368 proto = (ip.v4->version == 4) ? ip.v4->protocol : 1369 ip.v6->nexthdr; 1370 1371 switch (proto) { 1372 case IPPROTO_TCP: 1373 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1374 break; 1375 case IPPROTO_UDP: 1376 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1377 break; 1378 case IPPROTO_SCTP: 1379 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1380 break; 1381 } 1382 } 1383 1384 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { 1385 hdr->tso = 1; 1386 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); 1387 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1388 /* For non-tunneled pkts, point this to L2 ethertype */ 1389 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1390 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1391 } 1392 } 1393 1394 /* SQ GATHER subdescriptor 1395 * Must follow HDR descriptor 1396 */ 1397 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1398 int size, u64 data) 1399 { 1400 struct sq_gather_subdesc *gather; 1401 1402 qentry &= (sq->dmem.q_len - 1); 1403 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1404 1405 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1406 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1407 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1408 gather->size = size; 1409 gather->addr = data; 1410 } 1411 1412 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO 1413 * packet so that a CQE is posted as a notifation for transmission of 1414 * TSO packet. 1415 */ 1416 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, 1417 int tso_sqe, struct sk_buff *skb) 1418 { 1419 struct sq_imm_subdesc *imm; 1420 struct sq_hdr_subdesc *hdr; 1421 1422 sq->skbuff[qentry] = (u64)skb; 1423 1424 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1425 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1426 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1427 /* Enable notification via CQE after processing SQE */ 1428 hdr->post_cqe = 1; 1429 /* There is no packet to transmit here */ 1430 hdr->dont_send = 1; 1431 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; 1432 hdr->tot_len = 1; 1433 /* Actual TSO header SQE index, needed for cleanup */ 1434 hdr->rsvd2 = tso_sqe; 1435 1436 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1437 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); 1438 memset(imm, 0, SND_QUEUE_DESC_SIZE); 1439 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; 1440 imm->len = 1; 1441 } 1442 1443 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, 1444 int sq_num, int desc_cnt) 1445 { 1446 struct netdev_queue *txq; 1447 1448 txq = netdev_get_tx_queue(nic->pnicvf->netdev, 1449 skb_get_queue_mapping(skb)); 1450 1451 netdev_tx_sent_queue(txq, skb->len); 1452 1453 /* make sure all memory stores are done before ringing doorbell */ 1454 smp_wmb(); 1455 1456 /* Inform HW to xmit all TSO segments */ 1457 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1458 sq_num, desc_cnt); 1459 } 1460 1461 /* Segment a TSO packet into 'gso_size' segments and append 1462 * them to SQ for transfer 1463 */ 1464 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1465 int sq_num, int qentry, struct sk_buff *skb) 1466 { 1467 struct tso_t tso; 1468 int seg_subdescs = 0, desc_cnt = 0; 1469 int seg_len, total_len, data_left; 1470 int hdr_qentry = qentry; 1471 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1472 1473 tso_start(skb, &tso); 1474 total_len = skb->len - hdr_len; 1475 while (total_len > 0) { 1476 char *hdr; 1477 1478 /* Save Qentry for adding HDR_SUBDESC at the end */ 1479 hdr_qentry = qentry; 1480 1481 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1482 total_len -= data_left; 1483 1484 /* Add segment's header */ 1485 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1486 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1487 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1488 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1489 sq->tso_hdrs_phys + 1490 qentry * TSO_HEADER_SIZE); 1491 /* HDR_SUDESC + GATHER */ 1492 seg_subdescs = 2; 1493 seg_len = hdr_len; 1494 1495 /* Add segment's payload fragments */ 1496 while (data_left > 0) { 1497 int size; 1498 1499 size = min_t(int, tso.size, data_left); 1500 1501 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1502 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1503 virt_to_phys(tso.data)); 1504 seg_subdescs++; 1505 seg_len += size; 1506 1507 data_left -= size; 1508 tso_build_data(skb, &tso, size); 1509 } 1510 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, 1511 seg_subdescs - 1, skb, seg_len); 1512 sq->skbuff[hdr_qentry] = (u64)NULL; 1513 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1514 1515 desc_cnt += seg_subdescs; 1516 } 1517 /* Save SKB in the last segment for freeing */ 1518 sq->skbuff[hdr_qentry] = (u64)skb; 1519 1520 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1521 1522 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1523 return 1; 1524 } 1525 1526 /* Append an skb to a SQ for packet transfer. */ 1527 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, 1528 struct sk_buff *skb, u8 sq_num) 1529 { 1530 int i, size; 1531 int subdesc_cnt, hdr_sqe = 0; 1532 int qentry; 1533 u64 dma_addr; 1534 1535 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1536 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1537 goto append_fail; 1538 1539 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1540 1541 /* Check if its a TSO packet */ 1542 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) 1543 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1544 1545 /* Add SQ header subdesc */ 1546 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1547 skb, skb->len); 1548 hdr_sqe = qentry; 1549 1550 /* Add SQ gather subdescs */ 1551 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1552 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1553 /* HW will ensure data coherency, CPU sync not required */ 1554 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data), 1555 offset_in_page(skb->data), size, 1556 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1557 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { 1558 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); 1559 return 0; 1560 } 1561 1562 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); 1563 1564 /* Check for scattered buffer */ 1565 if (!skb_is_nonlinear(skb)) 1566 goto doorbell; 1567 1568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1569 const struct skb_frag_struct *frag; 1570 1571 frag = &skb_shinfo(skb)->frags[i]; 1572 1573 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1574 size = skb_frag_size(frag); 1575 dma_addr = dma_map_page_attrs(&nic->pdev->dev, 1576 skb_frag_page(frag), 1577 frag->page_offset, size, 1578 DMA_TO_DEVICE, 1579 DMA_ATTR_SKIP_CPU_SYNC); 1580 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { 1581 /* Free entire chain of mapped buffers 1582 * here 'i' = frags mapped + above mapped skb->data 1583 */ 1584 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); 1585 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); 1586 return 0; 1587 } 1588 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); 1589 } 1590 1591 doorbell: 1592 if (nic->t88 && skb_shinfo(skb)->gso_size) { 1593 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1594 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); 1595 } 1596 1597 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); 1598 1599 return 1; 1600 1601 append_fail: 1602 /* Use original PCI dev for debug log */ 1603 nic = nic->pnicvf; 1604 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1605 return 0; 1606 } 1607 1608 static inline unsigned frag_num(unsigned i) 1609 { 1610 #ifdef __BIG_ENDIAN 1611 return (i & ~3) + 3 - (i & 3); 1612 #else 1613 return i; 1614 #endif 1615 } 1616 1617 static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, 1618 u64 buf_addr, bool xdp) 1619 { 1620 struct page *page = NULL; 1621 int len = RCV_FRAG_LEN; 1622 1623 if (xdp) { 1624 page = virt_to_page(phys_to_virt(buf_addr)); 1625 /* Check if it's a recycled page, if not 1626 * unmap the DMA mapping. 1627 * 1628 * Recycled page holds an extra reference. 1629 */ 1630 if (page_ref_count(page) != 1) 1631 return; 1632 1633 len += XDP_HEADROOM; 1634 /* Receive buffers in XDP mode are mapped from page start */ 1635 dma_addr &= PAGE_MASK; 1636 } 1637 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len, 1638 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1639 } 1640 1641 /* Returns SKB for a received packet */ 1642 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, 1643 struct cqe_rx_t *cqe_rx, bool xdp) 1644 { 1645 int frag; 1646 int payload_len = 0; 1647 struct sk_buff *skb = NULL; 1648 struct page *page; 1649 int offset; 1650 u16 *rb_lens = NULL; 1651 u64 *rb_ptrs = NULL; 1652 u64 phys_addr; 1653 1654 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1655 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to 1656 * CQE_RX at word6, hence buffer pointers move by word 1657 * 1658 * Use existing 'hw_tso' flag which will be set for all chips 1659 * except 88xx pass1 instead of a additional cache line 1660 * access (or miss) by using pci dev's revision. 1661 */ 1662 if (!nic->hw_tso) 1663 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1664 else 1665 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); 1666 1667 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1668 payload_len = rb_lens[frag_num(frag)]; 1669 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); 1670 if (!phys_addr) { 1671 if (skb) 1672 dev_kfree_skb_any(skb); 1673 return NULL; 1674 } 1675 1676 if (!frag) { 1677 /* First fragment */ 1678 nicvf_unmap_rcv_buffer(nic, 1679 *rb_ptrs - cqe_rx->align_pad, 1680 phys_addr, xdp); 1681 skb = nicvf_rb_ptr_to_skb(nic, 1682 phys_addr - cqe_rx->align_pad, 1683 payload_len); 1684 if (!skb) 1685 return NULL; 1686 skb_reserve(skb, cqe_rx->align_pad); 1687 skb_put(skb, payload_len); 1688 } else { 1689 /* Add fragments */ 1690 nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp); 1691 page = virt_to_page(phys_to_virt(phys_addr)); 1692 offset = phys_to_virt(phys_addr) - page_address(page); 1693 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1694 offset, payload_len, RCV_FRAG_LEN); 1695 } 1696 /* Next buffer pointer */ 1697 rb_ptrs++; 1698 } 1699 return skb; 1700 } 1701 1702 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) 1703 { 1704 u64 reg_val; 1705 1706 switch (int_type) { 1707 case NICVF_INTR_CQ: 1708 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1709 break; 1710 case NICVF_INTR_SQ: 1711 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1712 break; 1713 case NICVF_INTR_RBDR: 1714 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1715 break; 1716 case NICVF_INTR_PKT_DROP: 1717 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1718 break; 1719 case NICVF_INTR_TCP_TIMER: 1720 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1721 break; 1722 case NICVF_INTR_MBOX: 1723 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1724 break; 1725 case NICVF_INTR_QS_ERR: 1726 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1727 break; 1728 default: 1729 reg_val = 0; 1730 } 1731 1732 return reg_val; 1733 } 1734 1735 /* Enable interrupt */ 1736 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1737 { 1738 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1739 1740 if (!mask) { 1741 netdev_dbg(nic->netdev, 1742 "Failed to enable interrupt: unknown type\n"); 1743 return; 1744 } 1745 nicvf_reg_write(nic, NIC_VF_ENA_W1S, 1746 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); 1747 } 1748 1749 /* Disable interrupt */ 1750 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1751 { 1752 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1753 1754 if (!mask) { 1755 netdev_dbg(nic->netdev, 1756 "Failed to disable interrupt: unknown type\n"); 1757 return; 1758 } 1759 1760 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); 1761 } 1762 1763 /* Clear interrupt */ 1764 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1765 { 1766 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1767 1768 if (!mask) { 1769 netdev_dbg(nic->netdev, 1770 "Failed to clear interrupt: unknown type\n"); 1771 return; 1772 } 1773 1774 nicvf_reg_write(nic, NIC_VF_INT, mask); 1775 } 1776 1777 /* Check if interrupt is enabled */ 1778 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1779 { 1780 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1781 /* If interrupt type is unknown, we treat it disabled. */ 1782 if (!mask) { 1783 netdev_dbg(nic->netdev, 1784 "Failed to check interrupt enable: unknown type\n"); 1785 return 0; 1786 } 1787 1788 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1789 } 1790 1791 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1792 { 1793 struct rcv_queue *rq; 1794 1795 #define GET_RQ_STATS(reg) \ 1796 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1797 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1798 1799 rq = &nic->qs->rq[rq_idx]; 1800 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1801 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1802 } 1803 1804 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1805 { 1806 struct snd_queue *sq; 1807 1808 #define GET_SQ_STATS(reg) \ 1809 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1810 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1811 1812 sq = &nic->qs->sq[sq_idx]; 1813 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1814 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1815 } 1816 1817 /* Check for errors in the receive cmp.queue entry */ 1818 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1819 { 1820 netif_err(nic, rx_err, nic->netdev, 1821 "RX error CQE err_level 0x%x err_opcode 0x%x\n", 1822 cqe_rx->err_level, cqe_rx->err_opcode); 1823 1824 switch (cqe_rx->err_opcode) { 1825 case CQ_RX_ERROP_RE_PARTIAL: 1826 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); 1827 break; 1828 case CQ_RX_ERROP_RE_JABBER: 1829 this_cpu_inc(nic->drv_stats->rx_jabber_errs); 1830 break; 1831 case CQ_RX_ERROP_RE_FCS: 1832 this_cpu_inc(nic->drv_stats->rx_fcs_errs); 1833 break; 1834 case CQ_RX_ERROP_RE_RX_CTL: 1835 this_cpu_inc(nic->drv_stats->rx_bgx_errs); 1836 break; 1837 case CQ_RX_ERROP_PREL2_ERR: 1838 this_cpu_inc(nic->drv_stats->rx_prel2_errs); 1839 break; 1840 case CQ_RX_ERROP_L2_MAL: 1841 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); 1842 break; 1843 case CQ_RX_ERROP_L2_OVERSIZE: 1844 this_cpu_inc(nic->drv_stats->rx_oversize); 1845 break; 1846 case CQ_RX_ERROP_L2_UNDERSIZE: 1847 this_cpu_inc(nic->drv_stats->rx_undersize); 1848 break; 1849 case CQ_RX_ERROP_L2_LENMISM: 1850 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); 1851 break; 1852 case CQ_RX_ERROP_L2_PCLP: 1853 this_cpu_inc(nic->drv_stats->rx_l2_pclp); 1854 break; 1855 case CQ_RX_ERROP_IP_NOT: 1856 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); 1857 break; 1858 case CQ_RX_ERROP_IP_CSUM_ERR: 1859 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); 1860 break; 1861 case CQ_RX_ERROP_IP_MAL: 1862 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); 1863 break; 1864 case CQ_RX_ERROP_IP_MALD: 1865 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); 1866 break; 1867 case CQ_RX_ERROP_IP_HOP: 1868 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); 1869 break; 1870 case CQ_RX_ERROP_L3_PCLP: 1871 this_cpu_inc(nic->drv_stats->rx_l3_pclp); 1872 break; 1873 case CQ_RX_ERROP_L4_MAL: 1874 this_cpu_inc(nic->drv_stats->rx_l4_malformed); 1875 break; 1876 case CQ_RX_ERROP_L4_CHK: 1877 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); 1878 break; 1879 case CQ_RX_ERROP_UDP_LEN: 1880 this_cpu_inc(nic->drv_stats->rx_udp_len_errs); 1881 break; 1882 case CQ_RX_ERROP_L4_PORT: 1883 this_cpu_inc(nic->drv_stats->rx_l4_port_errs); 1884 break; 1885 case CQ_RX_ERROP_TCP_FLAG: 1886 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); 1887 break; 1888 case CQ_RX_ERROP_TCP_OFFSET: 1889 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); 1890 break; 1891 case CQ_RX_ERROP_L4_PCLP: 1892 this_cpu_inc(nic->drv_stats->rx_l4_pclp); 1893 break; 1894 case CQ_RX_ERROP_RBDR_TRUNC: 1895 this_cpu_inc(nic->drv_stats->rx_truncated_pkts); 1896 break; 1897 } 1898 1899 return 1; 1900 } 1901 1902 /* Check for errors in the send cmp.queue entry */ 1903 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) 1904 { 1905 switch (cqe_tx->send_status) { 1906 case CQ_TX_ERROP_DESC_FAULT: 1907 this_cpu_inc(nic->drv_stats->tx_desc_fault); 1908 break; 1909 case CQ_TX_ERROP_HDR_CONS_ERR: 1910 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); 1911 break; 1912 case CQ_TX_ERROP_SUBDC_ERR: 1913 this_cpu_inc(nic->drv_stats->tx_subdesc_err); 1914 break; 1915 case CQ_TX_ERROP_MAX_SIZE_VIOL: 1916 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); 1917 break; 1918 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1919 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); 1920 break; 1921 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1922 this_cpu_inc(nic->drv_stats->tx_data_seq_err); 1923 break; 1924 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1925 this_cpu_inc(nic->drv_stats->tx_mem_seq_err); 1926 break; 1927 case CQ_TX_ERROP_LOCK_VIOL: 1928 this_cpu_inc(nic->drv_stats->tx_lock_viol); 1929 break; 1930 case CQ_TX_ERROP_DATA_FAULT: 1931 this_cpu_inc(nic->drv_stats->tx_data_fault); 1932 break; 1933 case CQ_TX_ERROP_TSTMP_CONFLICT: 1934 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); 1935 break; 1936 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1937 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); 1938 break; 1939 case CQ_TX_ERROP_MEM_FAULT: 1940 this_cpu_inc(nic->drv_stats->tx_mem_fault); 1941 break; 1942 case CQ_TX_ERROP_CK_OVERLAP: 1943 this_cpu_inc(nic->drv_stats->tx_csum_overlap); 1944 break; 1945 case CQ_TX_ERROP_CK_OFLOW: 1946 this_cpu_inc(nic->drv_stats->tx_csum_overflow); 1947 break; 1948 } 1949 1950 return 1; 1951 } 1952