1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 static void nicvf_get_page(struct nicvf *nic) 22 { 23 if (!nic->rb_pageref || !nic->rb_page) 24 return; 25 26 page_ref_add(nic->rb_page, nic->rb_pageref); 27 nic->rb_pageref = 0; 28 } 29 30 /* Poll a register for a specific value */ 31 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 32 u64 reg, int bit_pos, int bits, int val) 33 { 34 u64 bit_mask; 35 u64 reg_val; 36 int timeout = 10; 37 38 bit_mask = (1ULL << bits) - 1; 39 bit_mask = (bit_mask << bit_pos); 40 41 while (timeout) { 42 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 43 if (((reg_val & bit_mask) >> bit_pos) == val) 44 return 0; 45 usleep_range(1000, 2000); 46 timeout--; 47 } 48 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 49 return 1; 50 } 51 52 /* Allocate memory for a queue's descriptors */ 53 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 54 int q_len, int desc_size, int align_bytes) 55 { 56 dmem->q_len = q_len; 57 dmem->size = (desc_size * q_len) + align_bytes; 58 /* Save address, need it while freeing */ 59 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 60 &dmem->dma, GFP_KERNEL); 61 if (!dmem->unalign_base) 62 return -ENOMEM; 63 64 /* Align memory address for 'align_bytes' */ 65 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 66 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 67 return 0; 68 } 69 70 /* Free queue's descriptor memory */ 71 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 72 { 73 if (!dmem) 74 return; 75 76 dma_free_coherent(&nic->pdev->dev, dmem->size, 77 dmem->unalign_base, dmem->dma); 78 dmem->unalign_base = NULL; 79 dmem->base = NULL; 80 } 81 82 /* Allocate buffer for packet reception 83 * HW returns memory address where packet is DMA'ed but not a pointer 84 * into RBDR ring, so save buffer address at the start of fragment and 85 * align the start address to a cache aligned address 86 */ 87 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 88 u32 buf_len, u64 **rbuf) 89 { 90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; 91 92 /* Check if request can be accomodated in previous allocated page */ 93 if (nic->rb_page && 94 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { 95 nic->rb_pageref++; 96 goto ret; 97 } 98 99 nicvf_get_page(nic); 100 nic->rb_page = NULL; 101 102 /* Allocate a new page */ 103 if (!nic->rb_page) { 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 105 order); 106 if (!nic->rb_page) { 107 nic->drv_stats.rcv_buffer_alloc_failures++; 108 return -ENOMEM; 109 } 110 nic->rb_page_offset = 0; 111 } 112 113 ret: 114 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 115 nic->rb_page_offset += buf_len; 116 117 return 0; 118 } 119 120 /* Build skb around receive buffer */ 121 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 122 u64 rb_ptr, int len) 123 { 124 void *data; 125 struct sk_buff *skb; 126 127 data = phys_to_virt(rb_ptr); 128 129 /* Now build an skb to give to stack */ 130 skb = build_skb(data, RCV_FRAG_LEN); 131 if (!skb) { 132 put_page(virt_to_page(data)); 133 return NULL; 134 } 135 136 prefetch(skb->data); 137 return skb; 138 } 139 140 /* Allocate RBDR ring and populate receive buffers */ 141 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 142 int ring_len, int buf_size) 143 { 144 int idx; 145 u64 *rbuf; 146 struct rbdr_entry_t *desc; 147 int err; 148 149 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 150 sizeof(struct rbdr_entry_t), 151 NICVF_RCV_BUF_ALIGN_BYTES); 152 if (err) 153 return err; 154 155 rbdr->desc = rbdr->dmem.base; 156 /* Buffer size has to be in multiples of 128 bytes */ 157 rbdr->dma_size = buf_size; 158 rbdr->enable = true; 159 rbdr->thresh = RBDR_THRESH; 160 161 nic->rb_page = NULL; 162 for (idx = 0; idx < ring_len; idx++) { 163 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 164 &rbuf); 165 if (err) 166 return err; 167 168 desc = GET_RBDR_DESC(rbdr, idx); 169 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 170 } 171 172 nicvf_get_page(nic); 173 174 return 0; 175 } 176 177 /* Free RBDR ring and its receive buffers */ 178 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 179 { 180 int head, tail; 181 u64 buf_addr; 182 struct rbdr_entry_t *desc; 183 184 if (!rbdr) 185 return; 186 187 rbdr->enable = false; 188 if (!rbdr->dmem.base) 189 return; 190 191 head = rbdr->head; 192 tail = rbdr->tail; 193 194 /* Free SKBs */ 195 while (head != tail) { 196 desc = GET_RBDR_DESC(rbdr, head); 197 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 198 put_page(virt_to_page(phys_to_virt(buf_addr))); 199 head++; 200 head &= (rbdr->dmem.q_len - 1); 201 } 202 /* Free SKB of tail desc */ 203 desc = GET_RBDR_DESC(rbdr, tail); 204 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 205 put_page(virt_to_page(phys_to_virt(buf_addr))); 206 207 /* Free RBDR ring */ 208 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 209 } 210 211 /* Refill receive buffer descriptors with new buffers. 212 */ 213 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 214 { 215 struct queue_set *qs = nic->qs; 216 int rbdr_idx = qs->rbdr_cnt; 217 int tail, qcount; 218 int refill_rb_cnt; 219 struct rbdr *rbdr; 220 struct rbdr_entry_t *desc; 221 u64 *rbuf; 222 int new_rb = 0; 223 224 refill: 225 if (!rbdr_idx) 226 return; 227 rbdr_idx--; 228 rbdr = &qs->rbdr[rbdr_idx]; 229 /* Check if it's enabled */ 230 if (!rbdr->enable) 231 goto next_rbdr; 232 233 /* Get no of desc's to be refilled */ 234 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 235 qcount &= 0x7FFFF; 236 /* Doorbell can be ringed with a max of ring size minus 1 */ 237 if (qcount >= (qs->rbdr_len - 1)) 238 goto next_rbdr; 239 else 240 refill_rb_cnt = qs->rbdr_len - qcount - 1; 241 242 /* Start filling descs from tail */ 243 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 244 while (refill_rb_cnt) { 245 tail++; 246 tail &= (rbdr->dmem.q_len - 1); 247 248 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 249 break; 250 251 desc = GET_RBDR_DESC(rbdr, tail); 252 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 253 refill_rb_cnt--; 254 new_rb++; 255 } 256 257 nicvf_get_page(nic); 258 259 /* make sure all memory stores are done before ringing doorbell */ 260 smp_wmb(); 261 262 /* Check if buffer allocation failed */ 263 if (refill_rb_cnt) 264 nic->rb_alloc_fail = true; 265 else 266 nic->rb_alloc_fail = false; 267 268 /* Notify HW */ 269 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 270 rbdr_idx, new_rb); 271 next_rbdr: 272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 273 if (!nic->rb_alloc_fail && rbdr->enable) 274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 275 276 if (rbdr_idx) 277 goto refill; 278 } 279 280 /* Alloc rcv buffers in non-atomic mode for better success */ 281 void nicvf_rbdr_work(struct work_struct *work) 282 { 283 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 284 285 nicvf_refill_rbdr(nic, GFP_KERNEL); 286 if (nic->rb_alloc_fail) 287 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 288 else 289 nic->rb_work_scheduled = false; 290 } 291 292 /* In Softirq context, alloc rcv buffers in atomic mode */ 293 void nicvf_rbdr_task(unsigned long data) 294 { 295 struct nicvf *nic = (struct nicvf *)data; 296 297 nicvf_refill_rbdr(nic, GFP_ATOMIC); 298 if (nic->rb_alloc_fail) { 299 nic->rb_work_scheduled = true; 300 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 301 } 302 } 303 304 /* Initialize completion queue */ 305 static int nicvf_init_cmp_queue(struct nicvf *nic, 306 struct cmp_queue *cq, int q_len) 307 { 308 int err; 309 310 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 311 NICVF_CQ_BASE_ALIGN_BYTES); 312 if (err) 313 return err; 314 315 cq->desc = cq->dmem.base; 316 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; 317 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 318 319 return 0; 320 } 321 322 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 323 { 324 if (!cq) 325 return; 326 if (!cq->dmem.base) 327 return; 328 329 nicvf_free_q_desc_mem(nic, &cq->dmem); 330 } 331 332 /* Initialize transmit queue */ 333 static int nicvf_init_snd_queue(struct nicvf *nic, 334 struct snd_queue *sq, int q_len) 335 { 336 int err; 337 338 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 339 NICVF_SQ_BASE_ALIGN_BYTES); 340 if (err) 341 return err; 342 343 sq->desc = sq->dmem.base; 344 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 345 if (!sq->skbuff) 346 return -ENOMEM; 347 sq->head = 0; 348 sq->tail = 0; 349 atomic_set(&sq->free_cnt, q_len - 1); 350 sq->thresh = SND_QUEUE_THRESH; 351 352 /* Preallocate memory for TSO segment's header */ 353 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 354 q_len * TSO_HEADER_SIZE, 355 &sq->tso_hdrs_phys, GFP_KERNEL); 356 if (!sq->tso_hdrs) 357 return -ENOMEM; 358 359 return 0; 360 } 361 362 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 363 { 364 if (!sq) 365 return; 366 if (!sq->dmem.base) 367 return; 368 369 if (sq->tso_hdrs) 370 dma_free_coherent(&nic->pdev->dev, 371 sq->dmem.q_len * TSO_HEADER_SIZE, 372 sq->tso_hdrs, sq->tso_hdrs_phys); 373 374 kfree(sq->skbuff); 375 nicvf_free_q_desc_mem(nic, &sq->dmem); 376 } 377 378 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 379 struct queue_set *qs, int qidx) 380 { 381 /* Disable send queue */ 382 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 383 /* Check if SQ is stopped */ 384 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 385 return; 386 /* Reset send queue */ 387 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 388 } 389 390 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 391 struct queue_set *qs, int qidx) 392 { 393 union nic_mbx mbx = {}; 394 395 /* Make sure all packets in the pipeline are written back into mem */ 396 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 397 nicvf_send_msg_to_pf(nic, &mbx); 398 } 399 400 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 401 struct queue_set *qs, int qidx) 402 { 403 /* Disable timer threshold (doesn't get reset upon CQ reset */ 404 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 405 /* Disable completion queue */ 406 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 407 /* Reset completion queue */ 408 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 409 } 410 411 static void nicvf_reclaim_rbdr(struct nicvf *nic, 412 struct rbdr *rbdr, int qidx) 413 { 414 u64 tmp, fifo_state; 415 int timeout = 10; 416 417 /* Save head and tail pointers for feeing up buffers */ 418 rbdr->head = nicvf_queue_reg_read(nic, 419 NIC_QSET_RBDR_0_1_HEAD, 420 qidx) >> 3; 421 rbdr->tail = nicvf_queue_reg_read(nic, 422 NIC_QSET_RBDR_0_1_TAIL, 423 qidx) >> 3; 424 425 /* If RBDR FIFO is in 'FAIL' state then do a reset first 426 * before relaiming. 427 */ 428 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 429 if (((fifo_state >> 62) & 0x03) == 0x3) 430 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 431 qidx, NICVF_RBDR_RESET); 432 433 /* Disable RBDR */ 434 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 435 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 436 return; 437 while (1) { 438 tmp = nicvf_queue_reg_read(nic, 439 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 440 qidx); 441 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 442 break; 443 usleep_range(1000, 2000); 444 timeout--; 445 if (!timeout) { 446 netdev_err(nic->netdev, 447 "Failed polling on prefetch status\n"); 448 return; 449 } 450 } 451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 452 qidx, NICVF_RBDR_RESET); 453 454 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 455 return; 456 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 457 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 458 return; 459 } 460 461 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 462 { 463 u64 rq_cfg; 464 int sqs; 465 466 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 467 468 /* Enable first VLAN stripping */ 469 if (features & NETIF_F_HW_VLAN_CTAG_RX) 470 rq_cfg |= (1ULL << 25); 471 else 472 rq_cfg &= ~(1ULL << 25); 473 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 474 475 /* Configure Secondary Qsets, if any */ 476 for (sqs = 0; sqs < nic->sqs_count; sqs++) 477 if (nic->snicvf[sqs]) 478 nicvf_queue_reg_write(nic->snicvf[sqs], 479 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 480 } 481 482 /* Configures receive queue */ 483 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 484 int qidx, bool enable) 485 { 486 union nic_mbx mbx = {}; 487 struct rcv_queue *rq; 488 struct rq_cfg rq_cfg; 489 490 rq = &qs->rq[qidx]; 491 rq->enable = enable; 492 493 /* Disable receive queue */ 494 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 495 496 if (!rq->enable) { 497 nicvf_reclaim_rcv_queue(nic, qs, qidx); 498 return; 499 } 500 501 rq->cq_qs = qs->vnic_id; 502 rq->cq_idx = qidx; 503 rq->start_rbdr_qs = qs->vnic_id; 504 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 505 rq->cont_rbdr_qs = qs->vnic_id; 506 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 507 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 508 rq->caching = 1; 509 510 /* Send a mailbox msg to PF to config RQ */ 511 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 512 mbx.rq.qs_num = qs->vnic_id; 513 mbx.rq.rq_num = qidx; 514 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 515 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 516 (rq->cont_qs_rbdr_idx << 8) | 517 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 518 nicvf_send_msg_to_pf(nic, &mbx); 519 520 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 521 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); 522 nicvf_send_msg_to_pf(nic, &mbx); 523 524 /* RQ drop config 525 * Enable CQ drop to reserve sufficient CQEs for all tx packets 526 */ 527 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 528 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 529 nicvf_send_msg_to_pf(nic, &mbx); 530 531 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 532 if (!nic->sqs_mode) 533 nicvf_config_vlan_stripping(nic, nic->netdev->features); 534 535 /* Enable Receive queue */ 536 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 537 rq_cfg.ena = 1; 538 rq_cfg.tcp_ena = 0; 539 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 540 } 541 542 /* Configures completion queue */ 543 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 544 int qidx, bool enable) 545 { 546 struct cmp_queue *cq; 547 struct cq_cfg cq_cfg; 548 549 cq = &qs->cq[qidx]; 550 cq->enable = enable; 551 552 if (!cq->enable) { 553 nicvf_reclaim_cmp_queue(nic, qs, qidx); 554 return; 555 } 556 557 /* Reset completion queue */ 558 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 559 560 if (!cq->enable) 561 return; 562 563 spin_lock_init(&cq->lock); 564 /* Set completion queue base address */ 565 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 566 qidx, (u64)(cq->dmem.phys_base)); 567 568 /* Enable Completion queue */ 569 memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 570 cq_cfg.ena = 1; 571 cq_cfg.reset = 0; 572 cq_cfg.caching = 0; 573 cq_cfg.qsize = CMP_QSIZE; 574 cq_cfg.avg_con = 0; 575 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 576 577 /* Set threshold value for interrupt generation */ 578 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 579 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 580 qidx, CMP_QUEUE_TIMER_THRESH); 581 } 582 583 /* Configures transmit queue */ 584 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 585 int qidx, bool enable) 586 { 587 union nic_mbx mbx = {}; 588 struct snd_queue *sq; 589 struct sq_cfg sq_cfg; 590 591 sq = &qs->sq[qidx]; 592 sq->enable = enable; 593 594 if (!sq->enable) { 595 nicvf_reclaim_snd_queue(nic, qs, qidx); 596 return; 597 } 598 599 /* Reset send queue */ 600 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 601 602 sq->cq_qs = qs->vnic_id; 603 sq->cq_idx = qidx; 604 605 /* Send a mailbox msg to PF to config SQ */ 606 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 607 mbx.sq.qs_num = qs->vnic_id; 608 mbx.sq.sq_num = qidx; 609 mbx.sq.sqs_mode = nic->sqs_mode; 610 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 611 nicvf_send_msg_to_pf(nic, &mbx); 612 613 /* Set queue base address */ 614 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 615 qidx, (u64)(sq->dmem.phys_base)); 616 617 /* Enable send queue & set queue size */ 618 memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 619 sq_cfg.ena = 1; 620 sq_cfg.reset = 0; 621 sq_cfg.ldwb = 0; 622 sq_cfg.qsize = SND_QSIZE; 623 sq_cfg.tstmp_bgx_intf = 0; 624 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 625 626 /* Set threshold value for interrupt generation */ 627 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 628 629 /* Set queue:cpu affinity for better load distribution */ 630 if (cpu_online(qidx)) { 631 cpumask_set_cpu(qidx, &sq->affinity_mask); 632 netif_set_xps_queue(nic->netdev, 633 &sq->affinity_mask, qidx); 634 } 635 } 636 637 /* Configures receive buffer descriptor ring */ 638 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 639 int qidx, bool enable) 640 { 641 struct rbdr *rbdr; 642 struct rbdr_cfg rbdr_cfg; 643 644 rbdr = &qs->rbdr[qidx]; 645 nicvf_reclaim_rbdr(nic, rbdr, qidx); 646 if (!enable) 647 return; 648 649 /* Set descriptor base address */ 650 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 651 qidx, (u64)(rbdr->dmem.phys_base)); 652 653 /* Enable RBDR & set queue size */ 654 /* Buffer size should be in multiples of 128 bytes */ 655 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 656 rbdr_cfg.ena = 1; 657 rbdr_cfg.reset = 0; 658 rbdr_cfg.ldwb = 0; 659 rbdr_cfg.qsize = RBDR_SIZE; 660 rbdr_cfg.avg_con = 0; 661 rbdr_cfg.lines = rbdr->dma_size / 128; 662 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 663 qidx, *(u64 *)&rbdr_cfg); 664 665 /* Notify HW */ 666 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 667 qidx, qs->rbdr_len - 1); 668 669 /* Set threshold value for interrupt generation */ 670 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 671 qidx, rbdr->thresh - 1); 672 } 673 674 /* Requests PF to assign and enable Qset */ 675 void nicvf_qset_config(struct nicvf *nic, bool enable) 676 { 677 union nic_mbx mbx = {}; 678 struct queue_set *qs = nic->qs; 679 struct qs_cfg *qs_cfg; 680 681 if (!qs) { 682 netdev_warn(nic->netdev, 683 "Qset is still not allocated, don't init queues\n"); 684 return; 685 } 686 687 qs->enable = enable; 688 qs->vnic_id = nic->vf_id; 689 690 /* Send a mailbox msg to PF to config Qset */ 691 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 692 mbx.qs.num = qs->vnic_id; 693 mbx.qs.sqs_count = nic->sqs_count; 694 695 mbx.qs.cfg = 0; 696 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 697 if (qs->enable) { 698 qs_cfg->ena = 1; 699 #ifdef __BIG_ENDIAN 700 qs_cfg->be = 1; 701 #endif 702 qs_cfg->vnic = qs->vnic_id; 703 } 704 nicvf_send_msg_to_pf(nic, &mbx); 705 } 706 707 static void nicvf_free_resources(struct nicvf *nic) 708 { 709 int qidx; 710 struct queue_set *qs = nic->qs; 711 712 /* Free receive buffer descriptor ring */ 713 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 714 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 715 716 /* Free completion queue */ 717 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 718 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 719 720 /* Free send queue */ 721 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 722 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 723 } 724 725 static int nicvf_alloc_resources(struct nicvf *nic) 726 { 727 int qidx; 728 struct queue_set *qs = nic->qs; 729 730 /* Alloc receive buffer descriptor ring */ 731 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 732 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 733 DMA_BUFFER_LEN)) 734 goto alloc_fail; 735 } 736 737 /* Alloc send queue */ 738 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 739 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 740 goto alloc_fail; 741 } 742 743 /* Alloc completion queue */ 744 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 745 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 746 goto alloc_fail; 747 } 748 749 return 0; 750 alloc_fail: 751 nicvf_free_resources(nic); 752 return -ENOMEM; 753 } 754 755 int nicvf_set_qset_resources(struct nicvf *nic) 756 { 757 struct queue_set *qs; 758 759 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 760 if (!qs) 761 return -ENOMEM; 762 nic->qs = qs; 763 764 /* Set count of each queue */ 765 qs->rbdr_cnt = RBDR_CNT; 766 qs->rq_cnt = RCV_QUEUE_CNT; 767 qs->sq_cnt = SND_QUEUE_CNT; 768 qs->cq_cnt = CMP_QUEUE_CNT; 769 770 /* Set queue lengths */ 771 qs->rbdr_len = RCV_BUF_COUNT; 772 qs->sq_len = SND_QUEUE_LEN; 773 qs->cq_len = CMP_QUEUE_LEN; 774 775 nic->rx_queues = qs->rq_cnt; 776 nic->tx_queues = qs->sq_cnt; 777 778 return 0; 779 } 780 781 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 782 { 783 bool disable = false; 784 struct queue_set *qs = nic->qs; 785 int qidx; 786 787 if (!qs) 788 return 0; 789 790 if (enable) { 791 if (nicvf_alloc_resources(nic)) 792 return -ENOMEM; 793 794 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 795 nicvf_snd_queue_config(nic, qs, qidx, enable); 796 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 797 nicvf_cmp_queue_config(nic, qs, qidx, enable); 798 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 799 nicvf_rbdr_config(nic, qs, qidx, enable); 800 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 801 nicvf_rcv_queue_config(nic, qs, qidx, enable); 802 } else { 803 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 804 nicvf_rcv_queue_config(nic, qs, qidx, disable); 805 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 806 nicvf_rbdr_config(nic, qs, qidx, disable); 807 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 808 nicvf_snd_queue_config(nic, qs, qidx, disable); 809 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 810 nicvf_cmp_queue_config(nic, qs, qidx, disable); 811 812 nicvf_free_resources(nic); 813 } 814 815 return 0; 816 } 817 818 /* Get a free desc from SQ 819 * returns descriptor ponter & descriptor number 820 */ 821 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 822 { 823 int qentry; 824 825 qentry = sq->tail; 826 atomic_sub(desc_cnt, &sq->free_cnt); 827 sq->tail += desc_cnt; 828 sq->tail &= (sq->dmem.q_len - 1); 829 830 return qentry; 831 } 832 833 /* Free descriptor back to SQ for future use */ 834 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 835 { 836 atomic_add(desc_cnt, &sq->free_cnt); 837 sq->head += desc_cnt; 838 sq->head &= (sq->dmem.q_len - 1); 839 } 840 841 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 842 { 843 qentry++; 844 qentry &= (sq->dmem.q_len - 1); 845 return qentry; 846 } 847 848 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 849 { 850 u64 sq_cfg; 851 852 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 853 sq_cfg |= NICVF_SQ_EN; 854 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 855 /* Ring doorbell so that H/W restarts processing SQEs */ 856 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 857 } 858 859 void nicvf_sq_disable(struct nicvf *nic, int qidx) 860 { 861 u64 sq_cfg; 862 863 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 864 sq_cfg &= ~NICVF_SQ_EN; 865 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 866 } 867 868 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 869 int qidx) 870 { 871 u64 head, tail; 872 struct sk_buff *skb; 873 struct nicvf *nic = netdev_priv(netdev); 874 struct sq_hdr_subdesc *hdr; 875 876 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 877 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 878 while (sq->head != head) { 879 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 880 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 881 nicvf_put_sq_desc(sq, 1); 882 continue; 883 } 884 skb = (struct sk_buff *)sq->skbuff[sq->head]; 885 if (skb) 886 dev_kfree_skb_any(skb); 887 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 888 atomic64_add(hdr->tot_len, 889 (atomic64_t *)&netdev->stats.tx_bytes); 890 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 891 } 892 } 893 894 /* Calculate no of SQ subdescriptors needed to transmit all 895 * segments of this TSO packet. 896 * Taken from 'Tilera network driver' with a minor modification. 897 */ 898 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 899 { 900 struct skb_shared_info *sh = skb_shinfo(skb); 901 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 902 unsigned int data_len = skb->len - sh_len; 903 unsigned int p_len = sh->gso_size; 904 long f_id = -1; /* id of the current fragment */ 905 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 906 long f_used = 0; /* bytes used from the current fragment */ 907 long n; /* size of the current piece of payload */ 908 int num_edescs = 0; 909 int segment; 910 911 for (segment = 0; segment < sh->gso_segs; segment++) { 912 unsigned int p_used = 0; 913 914 /* One edesc for header and for each piece of the payload. */ 915 for (num_edescs++; p_used < p_len; num_edescs++) { 916 /* Advance as needed. */ 917 while (f_used >= f_size) { 918 f_id++; 919 f_size = skb_frag_size(&sh->frags[f_id]); 920 f_used = 0; 921 } 922 923 /* Use bytes from the current fragment. */ 924 n = p_len - p_used; 925 if (n > f_size - f_used) 926 n = f_size - f_used; 927 f_used += n; 928 p_used += n; 929 } 930 931 /* The last segment may be less than gso_size. */ 932 data_len -= p_len; 933 if (data_len < p_len) 934 p_len = data_len; 935 } 936 937 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 938 return num_edescs + sh->gso_segs; 939 } 940 941 /* Get the number of SQ descriptors needed to xmit this skb */ 942 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 943 { 944 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 945 946 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { 947 subdesc_cnt = nicvf_tso_count_subdescs(skb); 948 return subdesc_cnt; 949 } 950 951 if (skb_shinfo(skb)->nr_frags) 952 subdesc_cnt += skb_shinfo(skb)->nr_frags; 953 954 return subdesc_cnt; 955 } 956 957 /* Add SQ HEADER subdescriptor. 958 * First subdescriptor for every send descriptor. 959 */ 960 static inline void 961 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, 962 int subdesc_cnt, struct sk_buff *skb, int len) 963 { 964 int proto; 965 struct sq_hdr_subdesc *hdr; 966 967 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 968 sq->skbuff[qentry] = (u64)skb; 969 970 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 971 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 972 /* Enable notification via CQE after processing SQE */ 973 hdr->post_cqe = 1; 974 /* No of subdescriptors following this */ 975 hdr->subdesc_cnt = subdesc_cnt; 976 hdr->tot_len = len; 977 978 /* Offload checksum calculation to HW */ 979 if (skb->ip_summed == CHECKSUM_PARTIAL) { 980 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 981 hdr->l3_offset = skb_network_offset(skb); 982 hdr->l4_offset = skb_transport_offset(skb); 983 984 proto = ip_hdr(skb)->protocol; 985 switch (proto) { 986 case IPPROTO_TCP: 987 hdr->csum_l4 = SEND_L4_CSUM_TCP; 988 break; 989 case IPPROTO_UDP: 990 hdr->csum_l4 = SEND_L4_CSUM_UDP; 991 break; 992 case IPPROTO_SCTP: 993 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 994 break; 995 } 996 } 997 998 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { 999 hdr->tso = 1; 1000 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); 1001 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1002 /* For non-tunneled pkts, point this to L2 ethertype */ 1003 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1004 nic->drv_stats.tx_tso++; 1005 } 1006 } 1007 1008 /* SQ GATHER subdescriptor 1009 * Must follow HDR descriptor 1010 */ 1011 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1012 int size, u64 data) 1013 { 1014 struct sq_gather_subdesc *gather; 1015 1016 qentry &= (sq->dmem.q_len - 1); 1017 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1018 1019 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1020 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1021 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1022 gather->size = size; 1023 gather->addr = data; 1024 } 1025 1026 /* Segment a TSO packet into 'gso_size' segments and append 1027 * them to SQ for transfer 1028 */ 1029 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1030 int sq_num, int qentry, struct sk_buff *skb) 1031 { 1032 struct tso_t tso; 1033 int seg_subdescs = 0, desc_cnt = 0; 1034 int seg_len, total_len, data_left; 1035 int hdr_qentry = qentry; 1036 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1037 1038 tso_start(skb, &tso); 1039 total_len = skb->len - hdr_len; 1040 while (total_len > 0) { 1041 char *hdr; 1042 1043 /* Save Qentry for adding HDR_SUBDESC at the end */ 1044 hdr_qentry = qentry; 1045 1046 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1047 total_len -= data_left; 1048 1049 /* Add segment's header */ 1050 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1051 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1052 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1053 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1054 sq->tso_hdrs_phys + 1055 qentry * TSO_HEADER_SIZE); 1056 /* HDR_SUDESC + GATHER */ 1057 seg_subdescs = 2; 1058 seg_len = hdr_len; 1059 1060 /* Add segment's payload fragments */ 1061 while (data_left > 0) { 1062 int size; 1063 1064 size = min_t(int, tso.size, data_left); 1065 1066 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1067 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1068 virt_to_phys(tso.data)); 1069 seg_subdescs++; 1070 seg_len += size; 1071 1072 data_left -= size; 1073 tso_build_data(skb, &tso, size); 1074 } 1075 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, 1076 seg_subdescs - 1, skb, seg_len); 1077 sq->skbuff[hdr_qentry] = (u64)NULL; 1078 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1079 1080 desc_cnt += seg_subdescs; 1081 } 1082 /* Save SKB in the last segment for freeing */ 1083 sq->skbuff[hdr_qentry] = (u64)skb; 1084 1085 /* make sure all memory stores are done before ringing doorbell */ 1086 smp_wmb(); 1087 1088 /* Inform HW to xmit all TSO segments */ 1089 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1090 sq_num, desc_cnt); 1091 nic->drv_stats.tx_tso++; 1092 return 1; 1093 } 1094 1095 /* Append an skb to a SQ for packet transfer. */ 1096 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1097 { 1098 int i, size; 1099 int subdesc_cnt; 1100 int sq_num, qentry; 1101 struct queue_set *qs; 1102 struct snd_queue *sq; 1103 1104 sq_num = skb_get_queue_mapping(skb); 1105 if (sq_num >= MAX_SND_QUEUES_PER_QS) { 1106 /* Get secondary Qset's SQ structure */ 1107 i = sq_num / MAX_SND_QUEUES_PER_QS; 1108 if (!nic->snicvf[i - 1]) { 1109 netdev_warn(nic->netdev, 1110 "Secondary Qset#%d's ptr not initialized\n", 1111 i - 1); 1112 return 1; 1113 } 1114 nic = (struct nicvf *)nic->snicvf[i - 1]; 1115 sq_num = sq_num % MAX_SND_QUEUES_PER_QS; 1116 } 1117 1118 qs = nic->qs; 1119 sq = &qs->sq[sq_num]; 1120 1121 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1122 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1123 goto append_fail; 1124 1125 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1126 1127 /* Check if its a TSO packet */ 1128 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) 1129 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1130 1131 /* Add SQ header subdesc */ 1132 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1133 skb, skb->len); 1134 1135 /* Add SQ gather subdescs */ 1136 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1137 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1138 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1139 1140 /* Check for scattered buffer */ 1141 if (!skb_is_nonlinear(skb)) 1142 goto doorbell; 1143 1144 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1145 const struct skb_frag_struct *frag; 1146 1147 frag = &skb_shinfo(skb)->frags[i]; 1148 1149 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1150 size = skb_frag_size(frag); 1151 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1152 virt_to_phys( 1153 skb_frag_address(frag))); 1154 } 1155 1156 doorbell: 1157 /* make sure all memory stores are done before ringing doorbell */ 1158 smp_wmb(); 1159 1160 /* Inform HW to xmit new packet */ 1161 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1162 sq_num, subdesc_cnt); 1163 return 1; 1164 1165 append_fail: 1166 /* Use original PCI dev for debug log */ 1167 nic = nic->pnicvf; 1168 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1169 return 0; 1170 } 1171 1172 static inline unsigned frag_num(unsigned i) 1173 { 1174 #ifdef __BIG_ENDIAN 1175 return (i & ~3) + 3 - (i & 3); 1176 #else 1177 return i; 1178 #endif 1179 } 1180 1181 /* Returns SKB for a received packet */ 1182 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1183 { 1184 int frag; 1185 int payload_len = 0; 1186 struct sk_buff *skb = NULL; 1187 struct sk_buff *skb_frag = NULL; 1188 struct sk_buff *prev_frag = NULL; 1189 u16 *rb_lens = NULL; 1190 u64 *rb_ptrs = NULL; 1191 1192 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1193 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1194 1195 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1196 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1197 1198 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1199 payload_len = rb_lens[frag_num(frag)]; 1200 if (!frag) { 1201 /* First fragment */ 1202 skb = nicvf_rb_ptr_to_skb(nic, 1203 *rb_ptrs - cqe_rx->align_pad, 1204 payload_len); 1205 if (!skb) 1206 return NULL; 1207 skb_reserve(skb, cqe_rx->align_pad); 1208 skb_put(skb, payload_len); 1209 } else { 1210 /* Add fragments */ 1211 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, 1212 payload_len); 1213 if (!skb_frag) { 1214 dev_kfree_skb(skb); 1215 return NULL; 1216 } 1217 1218 if (!skb_shinfo(skb)->frag_list) 1219 skb_shinfo(skb)->frag_list = skb_frag; 1220 else 1221 prev_frag->next = skb_frag; 1222 1223 prev_frag = skb_frag; 1224 skb->len += payload_len; 1225 skb->data_len += payload_len; 1226 skb_frag->len = payload_len; 1227 } 1228 /* Next buffer pointer */ 1229 rb_ptrs++; 1230 } 1231 return skb; 1232 } 1233 1234 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) 1235 { 1236 u64 reg_val; 1237 1238 switch (int_type) { 1239 case NICVF_INTR_CQ: 1240 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1241 break; 1242 case NICVF_INTR_SQ: 1243 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1244 break; 1245 case NICVF_INTR_RBDR: 1246 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1247 break; 1248 case NICVF_INTR_PKT_DROP: 1249 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1250 break; 1251 case NICVF_INTR_TCP_TIMER: 1252 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1253 break; 1254 case NICVF_INTR_MBOX: 1255 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1256 break; 1257 case NICVF_INTR_QS_ERR: 1258 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1259 break; 1260 default: 1261 reg_val = 0; 1262 } 1263 1264 return reg_val; 1265 } 1266 1267 /* Enable interrupt */ 1268 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1269 { 1270 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1271 1272 if (!mask) { 1273 netdev_dbg(nic->netdev, 1274 "Failed to enable interrupt: unknown type\n"); 1275 return; 1276 } 1277 nicvf_reg_write(nic, NIC_VF_ENA_W1S, 1278 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); 1279 } 1280 1281 /* Disable interrupt */ 1282 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1283 { 1284 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1285 1286 if (!mask) { 1287 netdev_dbg(nic->netdev, 1288 "Failed to disable interrupt: unknown type\n"); 1289 return; 1290 } 1291 1292 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); 1293 } 1294 1295 /* Clear interrupt */ 1296 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1297 { 1298 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1299 1300 if (!mask) { 1301 netdev_dbg(nic->netdev, 1302 "Failed to clear interrupt: unknown type\n"); 1303 return; 1304 } 1305 1306 nicvf_reg_write(nic, NIC_VF_INT, mask); 1307 } 1308 1309 /* Check if interrupt is enabled */ 1310 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1311 { 1312 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1313 /* If interrupt type is unknown, we treat it disabled. */ 1314 if (!mask) { 1315 netdev_dbg(nic->netdev, 1316 "Failed to check interrupt enable: unknown type\n"); 1317 return 0; 1318 } 1319 1320 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1321 } 1322 1323 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1324 { 1325 struct rcv_queue *rq; 1326 1327 #define GET_RQ_STATS(reg) \ 1328 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1329 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1330 1331 rq = &nic->qs->rq[rq_idx]; 1332 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1333 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1334 } 1335 1336 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1337 { 1338 struct snd_queue *sq; 1339 1340 #define GET_SQ_STATS(reg) \ 1341 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1342 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1343 1344 sq = &nic->qs->sq[sq_idx]; 1345 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1346 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1347 } 1348 1349 /* Check for errors in the receive cmp.queue entry */ 1350 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1351 { 1352 struct nicvf_hw_stats *stats = &nic->hw_stats; 1353 1354 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1355 return 0; 1356 1357 if (netif_msg_rx_err(nic)) 1358 netdev_err(nic->netdev, 1359 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1360 nic->netdev->name, 1361 cqe_rx->err_level, cqe_rx->err_opcode); 1362 1363 switch (cqe_rx->err_opcode) { 1364 case CQ_RX_ERROP_RE_PARTIAL: 1365 stats->rx_bgx_truncated_pkts++; 1366 break; 1367 case CQ_RX_ERROP_RE_JABBER: 1368 stats->rx_jabber_errs++; 1369 break; 1370 case CQ_RX_ERROP_RE_FCS: 1371 stats->rx_fcs_errs++; 1372 break; 1373 case CQ_RX_ERROP_RE_RX_CTL: 1374 stats->rx_bgx_errs++; 1375 break; 1376 case CQ_RX_ERROP_PREL2_ERR: 1377 stats->rx_prel2_errs++; 1378 break; 1379 case CQ_RX_ERROP_L2_MAL: 1380 stats->rx_l2_hdr_malformed++; 1381 break; 1382 case CQ_RX_ERROP_L2_OVERSIZE: 1383 stats->rx_oversize++; 1384 break; 1385 case CQ_RX_ERROP_L2_UNDERSIZE: 1386 stats->rx_undersize++; 1387 break; 1388 case CQ_RX_ERROP_L2_LENMISM: 1389 stats->rx_l2_len_mismatch++; 1390 break; 1391 case CQ_RX_ERROP_L2_PCLP: 1392 stats->rx_l2_pclp++; 1393 break; 1394 case CQ_RX_ERROP_IP_NOT: 1395 stats->rx_ip_ver_errs++; 1396 break; 1397 case CQ_RX_ERROP_IP_CSUM_ERR: 1398 stats->rx_ip_csum_errs++; 1399 break; 1400 case CQ_RX_ERROP_IP_MAL: 1401 stats->rx_ip_hdr_malformed++; 1402 break; 1403 case CQ_RX_ERROP_IP_MALD: 1404 stats->rx_ip_payload_malformed++; 1405 break; 1406 case CQ_RX_ERROP_IP_HOP: 1407 stats->rx_ip_ttl_errs++; 1408 break; 1409 case CQ_RX_ERROP_L3_PCLP: 1410 stats->rx_l3_pclp++; 1411 break; 1412 case CQ_RX_ERROP_L4_MAL: 1413 stats->rx_l4_malformed++; 1414 break; 1415 case CQ_RX_ERROP_L4_CHK: 1416 stats->rx_l4_csum_errs++; 1417 break; 1418 case CQ_RX_ERROP_UDP_LEN: 1419 stats->rx_udp_len_errs++; 1420 break; 1421 case CQ_RX_ERROP_L4_PORT: 1422 stats->rx_l4_port_errs++; 1423 break; 1424 case CQ_RX_ERROP_TCP_FLAG: 1425 stats->rx_tcp_flag_errs++; 1426 break; 1427 case CQ_RX_ERROP_TCP_OFFSET: 1428 stats->rx_tcp_offset_errs++; 1429 break; 1430 case CQ_RX_ERROP_L4_PCLP: 1431 stats->rx_l4_pclp++; 1432 break; 1433 case CQ_RX_ERROP_RBDR_TRUNC: 1434 stats->rx_truncated_pkts++; 1435 break; 1436 } 1437 1438 return 1; 1439 } 1440 1441 /* Check for errors in the send cmp.queue entry */ 1442 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1443 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1444 { 1445 struct cmp_queue_stats *stats = &cq->stats; 1446 1447 switch (cqe_tx->send_status) { 1448 case CQ_TX_ERROP_GOOD: 1449 stats->tx.good++; 1450 return 0; 1451 case CQ_TX_ERROP_DESC_FAULT: 1452 stats->tx.desc_fault++; 1453 break; 1454 case CQ_TX_ERROP_HDR_CONS_ERR: 1455 stats->tx.hdr_cons_err++; 1456 break; 1457 case CQ_TX_ERROP_SUBDC_ERR: 1458 stats->tx.subdesc_err++; 1459 break; 1460 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1461 stats->tx.imm_size_oflow++; 1462 break; 1463 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1464 stats->tx.data_seq_err++; 1465 break; 1466 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1467 stats->tx.mem_seq_err++; 1468 break; 1469 case CQ_TX_ERROP_LOCK_VIOL: 1470 stats->tx.lock_viol++; 1471 break; 1472 case CQ_TX_ERROP_DATA_FAULT: 1473 stats->tx.data_fault++; 1474 break; 1475 case CQ_TX_ERROP_TSTMP_CONFLICT: 1476 stats->tx.tstmp_conflict++; 1477 break; 1478 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1479 stats->tx.tstmp_timeout++; 1480 break; 1481 case CQ_TX_ERROP_MEM_FAULT: 1482 stats->tx.mem_fault++; 1483 break; 1484 case CQ_TX_ERROP_CK_OVERLAP: 1485 stats->tx.csum_overlap++; 1486 break; 1487 case CQ_TX_ERROP_CK_OFLOW: 1488 stats->tx.csum_overflow++; 1489 break; 1490 } 1491 1492 return 1; 1493 } 1494