1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 static void nicvf_get_page(struct nicvf *nic) 22 { 23 if (!nic->rb_pageref || !nic->rb_page) 24 return; 25 26 page_ref_add(nic->rb_page, nic->rb_pageref); 27 nic->rb_pageref = 0; 28 } 29 30 /* Poll a register for a specific value */ 31 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 32 u64 reg, int bit_pos, int bits, int val) 33 { 34 u64 bit_mask; 35 u64 reg_val; 36 int timeout = 10; 37 38 bit_mask = (1ULL << bits) - 1; 39 bit_mask = (bit_mask << bit_pos); 40 41 while (timeout) { 42 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 43 if (((reg_val & bit_mask) >> bit_pos) == val) 44 return 0; 45 usleep_range(1000, 2000); 46 timeout--; 47 } 48 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 49 return 1; 50 } 51 52 /* Allocate memory for a queue's descriptors */ 53 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 54 int q_len, int desc_size, int align_bytes) 55 { 56 dmem->q_len = q_len; 57 dmem->size = (desc_size * q_len) + align_bytes; 58 /* Save address, need it while freeing */ 59 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 60 &dmem->dma, GFP_KERNEL); 61 if (!dmem->unalign_base) 62 return -ENOMEM; 63 64 /* Align memory address for 'align_bytes' */ 65 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 66 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 67 return 0; 68 } 69 70 /* Free queue's descriptor memory */ 71 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 72 { 73 if (!dmem) 74 return; 75 76 dma_free_coherent(&nic->pdev->dev, dmem->size, 77 dmem->unalign_base, dmem->dma); 78 dmem->unalign_base = NULL; 79 dmem->base = NULL; 80 } 81 82 /* Allocate buffer for packet reception 83 * HW returns memory address where packet is DMA'ed but not a pointer 84 * into RBDR ring, so save buffer address at the start of fragment and 85 * align the start address to a cache aligned address 86 */ 87 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 88 u32 buf_len, u64 **rbuf) 89 { 90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; 91 92 /* Check if request can be accomodated in previous allocated page */ 93 if (nic->rb_page && 94 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { 95 nic->rb_pageref++; 96 goto ret; 97 } 98 99 nicvf_get_page(nic); 100 nic->rb_page = NULL; 101 102 /* Allocate a new page */ 103 if (!nic->rb_page) { 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 105 order); 106 if (!nic->rb_page) { 107 this_cpu_inc(nic->pnicvf->drv_stats-> 108 rcv_buffer_alloc_failures); 109 return -ENOMEM; 110 } 111 nic->rb_page_offset = 0; 112 } 113 114 ret: 115 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 116 nic->rb_page_offset += buf_len; 117 118 return 0; 119 } 120 121 /* Build skb around receive buffer */ 122 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 123 u64 rb_ptr, int len) 124 { 125 void *data; 126 struct sk_buff *skb; 127 128 data = phys_to_virt(rb_ptr); 129 130 /* Now build an skb to give to stack */ 131 skb = build_skb(data, RCV_FRAG_LEN); 132 if (!skb) { 133 put_page(virt_to_page(data)); 134 return NULL; 135 } 136 137 prefetch(skb->data); 138 return skb; 139 } 140 141 /* Allocate RBDR ring and populate receive buffers */ 142 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 143 int ring_len, int buf_size) 144 { 145 int idx; 146 u64 *rbuf; 147 struct rbdr_entry_t *desc; 148 int err; 149 150 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 151 sizeof(struct rbdr_entry_t), 152 NICVF_RCV_BUF_ALIGN_BYTES); 153 if (err) 154 return err; 155 156 rbdr->desc = rbdr->dmem.base; 157 /* Buffer size has to be in multiples of 128 bytes */ 158 rbdr->dma_size = buf_size; 159 rbdr->enable = true; 160 rbdr->thresh = RBDR_THRESH; 161 162 nic->rb_page = NULL; 163 for (idx = 0; idx < ring_len; idx++) { 164 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 165 &rbuf); 166 if (err) 167 return err; 168 169 desc = GET_RBDR_DESC(rbdr, idx); 170 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 171 } 172 173 nicvf_get_page(nic); 174 175 return 0; 176 } 177 178 /* Free RBDR ring and its receive buffers */ 179 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 180 { 181 int head, tail; 182 u64 buf_addr; 183 struct rbdr_entry_t *desc; 184 185 if (!rbdr) 186 return; 187 188 rbdr->enable = false; 189 if (!rbdr->dmem.base) 190 return; 191 192 head = rbdr->head; 193 tail = rbdr->tail; 194 195 /* Free SKBs */ 196 while (head != tail) { 197 desc = GET_RBDR_DESC(rbdr, head); 198 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 199 put_page(virt_to_page(phys_to_virt(buf_addr))); 200 head++; 201 head &= (rbdr->dmem.q_len - 1); 202 } 203 /* Free SKB of tail desc */ 204 desc = GET_RBDR_DESC(rbdr, tail); 205 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 206 put_page(virt_to_page(phys_to_virt(buf_addr))); 207 208 /* Free RBDR ring */ 209 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 210 } 211 212 /* Refill receive buffer descriptors with new buffers. 213 */ 214 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 215 { 216 struct queue_set *qs = nic->qs; 217 int rbdr_idx = qs->rbdr_cnt; 218 int tail, qcount; 219 int refill_rb_cnt; 220 struct rbdr *rbdr; 221 struct rbdr_entry_t *desc; 222 u64 *rbuf; 223 int new_rb = 0; 224 225 refill: 226 if (!rbdr_idx) 227 return; 228 rbdr_idx--; 229 rbdr = &qs->rbdr[rbdr_idx]; 230 /* Check if it's enabled */ 231 if (!rbdr->enable) 232 goto next_rbdr; 233 234 /* Get no of desc's to be refilled */ 235 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 236 qcount &= 0x7FFFF; 237 /* Doorbell can be ringed with a max of ring size minus 1 */ 238 if (qcount >= (qs->rbdr_len - 1)) 239 goto next_rbdr; 240 else 241 refill_rb_cnt = qs->rbdr_len - qcount - 1; 242 243 /* Start filling descs from tail */ 244 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 245 while (refill_rb_cnt) { 246 tail++; 247 tail &= (rbdr->dmem.q_len - 1); 248 249 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 250 break; 251 252 desc = GET_RBDR_DESC(rbdr, tail); 253 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 254 refill_rb_cnt--; 255 new_rb++; 256 } 257 258 nicvf_get_page(nic); 259 260 /* make sure all memory stores are done before ringing doorbell */ 261 smp_wmb(); 262 263 /* Check if buffer allocation failed */ 264 if (refill_rb_cnt) 265 nic->rb_alloc_fail = true; 266 else 267 nic->rb_alloc_fail = false; 268 269 /* Notify HW */ 270 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 271 rbdr_idx, new_rb); 272 next_rbdr: 273 /* Re-enable RBDR interrupts only if buffer allocation is success */ 274 if (!nic->rb_alloc_fail && rbdr->enable && 275 netif_running(nic->pnicvf->netdev)) 276 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 277 278 if (rbdr_idx) 279 goto refill; 280 } 281 282 /* Alloc rcv buffers in non-atomic mode for better success */ 283 void nicvf_rbdr_work(struct work_struct *work) 284 { 285 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 286 287 nicvf_refill_rbdr(nic, GFP_KERNEL); 288 if (nic->rb_alloc_fail) 289 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 290 else 291 nic->rb_work_scheduled = false; 292 } 293 294 /* In Softirq context, alloc rcv buffers in atomic mode */ 295 void nicvf_rbdr_task(unsigned long data) 296 { 297 struct nicvf *nic = (struct nicvf *)data; 298 299 nicvf_refill_rbdr(nic, GFP_ATOMIC); 300 if (nic->rb_alloc_fail) { 301 nic->rb_work_scheduled = true; 302 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 303 } 304 } 305 306 /* Initialize completion queue */ 307 static int nicvf_init_cmp_queue(struct nicvf *nic, 308 struct cmp_queue *cq, int q_len) 309 { 310 int err; 311 312 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 313 NICVF_CQ_BASE_ALIGN_BYTES); 314 if (err) 315 return err; 316 317 cq->desc = cq->dmem.base; 318 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; 319 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 320 321 return 0; 322 } 323 324 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 325 { 326 if (!cq) 327 return; 328 if (!cq->dmem.base) 329 return; 330 331 nicvf_free_q_desc_mem(nic, &cq->dmem); 332 } 333 334 /* Initialize transmit queue */ 335 static int nicvf_init_snd_queue(struct nicvf *nic, 336 struct snd_queue *sq, int q_len) 337 { 338 int err; 339 340 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 341 NICVF_SQ_BASE_ALIGN_BYTES); 342 if (err) 343 return err; 344 345 sq->desc = sq->dmem.base; 346 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 347 if (!sq->skbuff) 348 return -ENOMEM; 349 sq->head = 0; 350 sq->tail = 0; 351 atomic_set(&sq->free_cnt, q_len - 1); 352 sq->thresh = SND_QUEUE_THRESH; 353 354 /* Preallocate memory for TSO segment's header */ 355 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 356 q_len * TSO_HEADER_SIZE, 357 &sq->tso_hdrs_phys, GFP_KERNEL); 358 if (!sq->tso_hdrs) 359 return -ENOMEM; 360 361 return 0; 362 } 363 364 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 365 { 366 struct sk_buff *skb; 367 368 if (!sq) 369 return; 370 if (!sq->dmem.base) 371 return; 372 373 if (sq->tso_hdrs) 374 dma_free_coherent(&nic->pdev->dev, 375 sq->dmem.q_len * TSO_HEADER_SIZE, 376 sq->tso_hdrs, sq->tso_hdrs_phys); 377 378 /* Free pending skbs in the queue */ 379 smp_rmb(); 380 while (sq->head != sq->tail) { 381 skb = (struct sk_buff *)sq->skbuff[sq->head]; 382 if (skb) 383 dev_kfree_skb_any(skb); 384 sq->head++; 385 sq->head &= (sq->dmem.q_len - 1); 386 } 387 kfree(sq->skbuff); 388 nicvf_free_q_desc_mem(nic, &sq->dmem); 389 } 390 391 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 392 struct queue_set *qs, int qidx) 393 { 394 /* Disable send queue */ 395 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 396 /* Check if SQ is stopped */ 397 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 398 return; 399 /* Reset send queue */ 400 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 401 } 402 403 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 404 struct queue_set *qs, int qidx) 405 { 406 union nic_mbx mbx = {}; 407 408 /* Make sure all packets in the pipeline are written back into mem */ 409 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 410 nicvf_send_msg_to_pf(nic, &mbx); 411 } 412 413 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 414 struct queue_set *qs, int qidx) 415 { 416 /* Disable timer threshold (doesn't get reset upon CQ reset */ 417 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 418 /* Disable completion queue */ 419 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 420 /* Reset completion queue */ 421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 422 } 423 424 static void nicvf_reclaim_rbdr(struct nicvf *nic, 425 struct rbdr *rbdr, int qidx) 426 { 427 u64 tmp, fifo_state; 428 int timeout = 10; 429 430 /* Save head and tail pointers for feeing up buffers */ 431 rbdr->head = nicvf_queue_reg_read(nic, 432 NIC_QSET_RBDR_0_1_HEAD, 433 qidx) >> 3; 434 rbdr->tail = nicvf_queue_reg_read(nic, 435 NIC_QSET_RBDR_0_1_TAIL, 436 qidx) >> 3; 437 438 /* If RBDR FIFO is in 'FAIL' state then do a reset first 439 * before relaiming. 440 */ 441 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 442 if (((fifo_state >> 62) & 0x03) == 0x3) 443 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 444 qidx, NICVF_RBDR_RESET); 445 446 /* Disable RBDR */ 447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 448 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 449 return; 450 while (1) { 451 tmp = nicvf_queue_reg_read(nic, 452 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 453 qidx); 454 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 455 break; 456 usleep_range(1000, 2000); 457 timeout--; 458 if (!timeout) { 459 netdev_err(nic->netdev, 460 "Failed polling on prefetch status\n"); 461 return; 462 } 463 } 464 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 465 qidx, NICVF_RBDR_RESET); 466 467 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 468 return; 469 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 470 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 471 return; 472 } 473 474 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 475 { 476 u64 rq_cfg; 477 int sqs; 478 479 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 480 481 /* Enable first VLAN stripping */ 482 if (features & NETIF_F_HW_VLAN_CTAG_RX) 483 rq_cfg |= (1ULL << 25); 484 else 485 rq_cfg &= ~(1ULL << 25); 486 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 487 488 /* Configure Secondary Qsets, if any */ 489 for (sqs = 0; sqs < nic->sqs_count; sqs++) 490 if (nic->snicvf[sqs]) 491 nicvf_queue_reg_write(nic->snicvf[sqs], 492 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 493 } 494 495 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) 496 { 497 union nic_mbx mbx = {}; 498 499 /* Reset all RQ/SQ and VF stats */ 500 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 501 mbx.reset_stat.rx_stat_mask = 0x3FFF; 502 mbx.reset_stat.tx_stat_mask = 0x1F; 503 mbx.reset_stat.rq_stat_mask = 0xFFFF; 504 mbx.reset_stat.sq_stat_mask = 0xFFFF; 505 nicvf_send_msg_to_pf(nic, &mbx); 506 } 507 508 /* Configures receive queue */ 509 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 510 int qidx, bool enable) 511 { 512 union nic_mbx mbx = {}; 513 struct rcv_queue *rq; 514 struct rq_cfg rq_cfg; 515 516 rq = &qs->rq[qidx]; 517 rq->enable = enable; 518 519 /* Disable receive queue */ 520 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 521 522 if (!rq->enable) { 523 nicvf_reclaim_rcv_queue(nic, qs, qidx); 524 return; 525 } 526 527 rq->cq_qs = qs->vnic_id; 528 rq->cq_idx = qidx; 529 rq->start_rbdr_qs = qs->vnic_id; 530 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 531 rq->cont_rbdr_qs = qs->vnic_id; 532 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 533 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 534 rq->caching = 1; 535 536 /* Send a mailbox msg to PF to config RQ */ 537 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 538 mbx.rq.qs_num = qs->vnic_id; 539 mbx.rq.rq_num = qidx; 540 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 541 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 542 (rq->cont_qs_rbdr_idx << 8) | 543 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 544 nicvf_send_msg_to_pf(nic, &mbx); 545 546 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 547 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | 548 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | 549 (qs->vnic_id << 0); 550 nicvf_send_msg_to_pf(nic, &mbx); 551 552 /* RQ drop config 553 * Enable CQ drop to reserve sufficient CQEs for all tx packets 554 */ 555 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 556 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | 557 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | 558 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); 559 nicvf_send_msg_to_pf(nic, &mbx); 560 561 if (!nic->sqs_mode && (qidx == 0)) { 562 /* Enable checking L3/L4 length and TCP/UDP checksums */ 563 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 564 (BIT(24) | BIT(23) | BIT(21))); 565 nicvf_config_vlan_stripping(nic, nic->netdev->features); 566 } 567 568 /* Enable Receive queue */ 569 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 570 rq_cfg.ena = 1; 571 rq_cfg.tcp_ena = 0; 572 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 573 } 574 575 /* Configures completion queue */ 576 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 577 int qidx, bool enable) 578 { 579 struct cmp_queue *cq; 580 struct cq_cfg cq_cfg; 581 582 cq = &qs->cq[qidx]; 583 cq->enable = enable; 584 585 if (!cq->enable) { 586 nicvf_reclaim_cmp_queue(nic, qs, qidx); 587 return; 588 } 589 590 /* Reset completion queue */ 591 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 592 593 if (!cq->enable) 594 return; 595 596 spin_lock_init(&cq->lock); 597 /* Set completion queue base address */ 598 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 599 qidx, (u64)(cq->dmem.phys_base)); 600 601 /* Enable Completion queue */ 602 memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 603 cq_cfg.ena = 1; 604 cq_cfg.reset = 0; 605 cq_cfg.caching = 0; 606 cq_cfg.qsize = ilog2(qs->cq_len >> 10); 607 cq_cfg.avg_con = 0; 608 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 609 610 /* Set threshold value for interrupt generation */ 611 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 612 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 613 qidx, CMP_QUEUE_TIMER_THRESH); 614 } 615 616 /* Configures transmit queue */ 617 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 618 int qidx, bool enable) 619 { 620 union nic_mbx mbx = {}; 621 struct snd_queue *sq; 622 struct sq_cfg sq_cfg; 623 624 sq = &qs->sq[qidx]; 625 sq->enable = enable; 626 627 if (!sq->enable) { 628 nicvf_reclaim_snd_queue(nic, qs, qidx); 629 return; 630 } 631 632 /* Reset send queue */ 633 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 634 635 sq->cq_qs = qs->vnic_id; 636 sq->cq_idx = qidx; 637 638 /* Send a mailbox msg to PF to config SQ */ 639 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 640 mbx.sq.qs_num = qs->vnic_id; 641 mbx.sq.sq_num = qidx; 642 mbx.sq.sqs_mode = nic->sqs_mode; 643 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 644 nicvf_send_msg_to_pf(nic, &mbx); 645 646 /* Set queue base address */ 647 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 648 qidx, (u64)(sq->dmem.phys_base)); 649 650 /* Enable send queue & set queue size */ 651 memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 652 sq_cfg.ena = 1; 653 sq_cfg.reset = 0; 654 sq_cfg.ldwb = 0; 655 sq_cfg.qsize = ilog2(qs->sq_len >> 10); 656 sq_cfg.tstmp_bgx_intf = 0; 657 /* CQ's level at which HW will stop processing SQEs to avoid 658 * transmitting a pkt with no space in CQ to post CQE_TX. 659 */ 660 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; 661 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 662 663 /* Set threshold value for interrupt generation */ 664 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 665 666 /* Set queue:cpu affinity for better load distribution */ 667 if (cpu_online(qidx)) { 668 cpumask_set_cpu(qidx, &sq->affinity_mask); 669 netif_set_xps_queue(nic->netdev, 670 &sq->affinity_mask, qidx); 671 } 672 } 673 674 /* Configures receive buffer descriptor ring */ 675 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 676 int qidx, bool enable) 677 { 678 struct rbdr *rbdr; 679 struct rbdr_cfg rbdr_cfg; 680 681 rbdr = &qs->rbdr[qidx]; 682 nicvf_reclaim_rbdr(nic, rbdr, qidx); 683 if (!enable) 684 return; 685 686 /* Set descriptor base address */ 687 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 688 qidx, (u64)(rbdr->dmem.phys_base)); 689 690 /* Enable RBDR & set queue size */ 691 /* Buffer size should be in multiples of 128 bytes */ 692 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 693 rbdr_cfg.ena = 1; 694 rbdr_cfg.reset = 0; 695 rbdr_cfg.ldwb = 0; 696 rbdr_cfg.qsize = RBDR_SIZE; 697 rbdr_cfg.avg_con = 0; 698 rbdr_cfg.lines = rbdr->dma_size / 128; 699 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 700 qidx, *(u64 *)&rbdr_cfg); 701 702 /* Notify HW */ 703 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 704 qidx, qs->rbdr_len - 1); 705 706 /* Set threshold value for interrupt generation */ 707 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 708 qidx, rbdr->thresh - 1); 709 } 710 711 /* Requests PF to assign and enable Qset */ 712 void nicvf_qset_config(struct nicvf *nic, bool enable) 713 { 714 union nic_mbx mbx = {}; 715 struct queue_set *qs = nic->qs; 716 struct qs_cfg *qs_cfg; 717 718 if (!qs) { 719 netdev_warn(nic->netdev, 720 "Qset is still not allocated, don't init queues\n"); 721 return; 722 } 723 724 qs->enable = enable; 725 qs->vnic_id = nic->vf_id; 726 727 /* Send a mailbox msg to PF to config Qset */ 728 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 729 mbx.qs.num = qs->vnic_id; 730 mbx.qs.sqs_count = nic->sqs_count; 731 732 mbx.qs.cfg = 0; 733 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 734 if (qs->enable) { 735 qs_cfg->ena = 1; 736 #ifdef __BIG_ENDIAN 737 qs_cfg->be = 1; 738 #endif 739 qs_cfg->vnic = qs->vnic_id; 740 } 741 nicvf_send_msg_to_pf(nic, &mbx); 742 } 743 744 static void nicvf_free_resources(struct nicvf *nic) 745 { 746 int qidx; 747 struct queue_set *qs = nic->qs; 748 749 /* Free receive buffer descriptor ring */ 750 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 751 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 752 753 /* Free completion queue */ 754 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 755 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 756 757 /* Free send queue */ 758 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 759 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 760 } 761 762 static int nicvf_alloc_resources(struct nicvf *nic) 763 { 764 int qidx; 765 struct queue_set *qs = nic->qs; 766 767 /* Alloc receive buffer descriptor ring */ 768 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 769 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 770 DMA_BUFFER_LEN)) 771 goto alloc_fail; 772 } 773 774 /* Alloc send queue */ 775 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 776 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 777 goto alloc_fail; 778 } 779 780 /* Alloc completion queue */ 781 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 782 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 783 goto alloc_fail; 784 } 785 786 return 0; 787 alloc_fail: 788 nicvf_free_resources(nic); 789 return -ENOMEM; 790 } 791 792 int nicvf_set_qset_resources(struct nicvf *nic) 793 { 794 struct queue_set *qs; 795 796 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 797 if (!qs) 798 return -ENOMEM; 799 nic->qs = qs; 800 801 /* Set count of each queue */ 802 qs->rbdr_cnt = DEFAULT_RBDR_CNT; 803 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); 804 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); 805 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); 806 807 /* Set queue lengths */ 808 qs->rbdr_len = RCV_BUF_COUNT; 809 qs->sq_len = SND_QUEUE_LEN; 810 qs->cq_len = CMP_QUEUE_LEN; 811 812 nic->rx_queues = qs->rq_cnt; 813 nic->tx_queues = qs->sq_cnt; 814 815 return 0; 816 } 817 818 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 819 { 820 bool disable = false; 821 struct queue_set *qs = nic->qs; 822 struct queue_set *pqs = nic->pnicvf->qs; 823 int qidx; 824 825 if (!qs) 826 return 0; 827 828 /* Take primary VF's queue lengths. 829 * This is needed to take queue lengths set from ethtool 830 * into consideration. 831 */ 832 if (nic->sqs_mode && pqs) { 833 qs->cq_len = pqs->cq_len; 834 qs->sq_len = pqs->sq_len; 835 } 836 837 if (enable) { 838 if (nicvf_alloc_resources(nic)) 839 return -ENOMEM; 840 841 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 842 nicvf_snd_queue_config(nic, qs, qidx, enable); 843 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 844 nicvf_cmp_queue_config(nic, qs, qidx, enable); 845 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 846 nicvf_rbdr_config(nic, qs, qidx, enable); 847 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 848 nicvf_rcv_queue_config(nic, qs, qidx, enable); 849 } else { 850 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 851 nicvf_rcv_queue_config(nic, qs, qidx, disable); 852 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 853 nicvf_rbdr_config(nic, qs, qidx, disable); 854 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 855 nicvf_snd_queue_config(nic, qs, qidx, disable); 856 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 857 nicvf_cmp_queue_config(nic, qs, qidx, disable); 858 859 nicvf_free_resources(nic); 860 } 861 862 /* Reset RXQ's stats. 863 * SQ's stats will get reset automatically once SQ is reset. 864 */ 865 nicvf_reset_rcv_queue_stats(nic); 866 867 return 0; 868 } 869 870 /* Get a free desc from SQ 871 * returns descriptor ponter & descriptor number 872 */ 873 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 874 { 875 int qentry; 876 877 qentry = sq->tail; 878 atomic_sub(desc_cnt, &sq->free_cnt); 879 sq->tail += desc_cnt; 880 sq->tail &= (sq->dmem.q_len - 1); 881 882 return qentry; 883 } 884 885 /* Free descriptor back to SQ for future use */ 886 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 887 { 888 atomic_add(desc_cnt, &sq->free_cnt); 889 sq->head += desc_cnt; 890 sq->head &= (sq->dmem.q_len - 1); 891 } 892 893 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 894 { 895 qentry++; 896 qentry &= (sq->dmem.q_len - 1); 897 return qentry; 898 } 899 900 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 901 { 902 u64 sq_cfg; 903 904 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 905 sq_cfg |= NICVF_SQ_EN; 906 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 907 /* Ring doorbell so that H/W restarts processing SQEs */ 908 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 909 } 910 911 void nicvf_sq_disable(struct nicvf *nic, int qidx) 912 { 913 u64 sq_cfg; 914 915 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 916 sq_cfg &= ~NICVF_SQ_EN; 917 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 918 } 919 920 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 921 int qidx) 922 { 923 u64 head, tail; 924 struct sk_buff *skb; 925 struct nicvf *nic = netdev_priv(netdev); 926 struct sq_hdr_subdesc *hdr; 927 928 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 929 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 930 while (sq->head != head) { 931 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 932 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 933 nicvf_put_sq_desc(sq, 1); 934 continue; 935 } 936 skb = (struct sk_buff *)sq->skbuff[sq->head]; 937 if (skb) 938 dev_kfree_skb_any(skb); 939 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 940 atomic64_add(hdr->tot_len, 941 (atomic64_t *)&netdev->stats.tx_bytes); 942 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 943 } 944 } 945 946 /* Calculate no of SQ subdescriptors needed to transmit all 947 * segments of this TSO packet. 948 * Taken from 'Tilera network driver' with a minor modification. 949 */ 950 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 951 { 952 struct skb_shared_info *sh = skb_shinfo(skb); 953 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 954 unsigned int data_len = skb->len - sh_len; 955 unsigned int p_len = sh->gso_size; 956 long f_id = -1; /* id of the current fragment */ 957 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 958 long f_used = 0; /* bytes used from the current fragment */ 959 long n; /* size of the current piece of payload */ 960 int num_edescs = 0; 961 int segment; 962 963 for (segment = 0; segment < sh->gso_segs; segment++) { 964 unsigned int p_used = 0; 965 966 /* One edesc for header and for each piece of the payload. */ 967 for (num_edescs++; p_used < p_len; num_edescs++) { 968 /* Advance as needed. */ 969 while (f_used >= f_size) { 970 f_id++; 971 f_size = skb_frag_size(&sh->frags[f_id]); 972 f_used = 0; 973 } 974 975 /* Use bytes from the current fragment. */ 976 n = p_len - p_used; 977 if (n > f_size - f_used) 978 n = f_size - f_used; 979 f_used += n; 980 p_used += n; 981 } 982 983 /* The last segment may be less than gso_size. */ 984 data_len -= p_len; 985 if (data_len < p_len) 986 p_len = data_len; 987 } 988 989 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 990 return num_edescs + sh->gso_segs; 991 } 992 993 #define POST_CQE_DESC_COUNT 2 994 995 /* Get the number of SQ descriptors needed to xmit this skb */ 996 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 997 { 998 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 999 1000 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { 1001 subdesc_cnt = nicvf_tso_count_subdescs(skb); 1002 return subdesc_cnt; 1003 } 1004 1005 /* Dummy descriptors to get TSO pkt completion notification */ 1006 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) 1007 subdesc_cnt += POST_CQE_DESC_COUNT; 1008 1009 if (skb_shinfo(skb)->nr_frags) 1010 subdesc_cnt += skb_shinfo(skb)->nr_frags; 1011 1012 return subdesc_cnt; 1013 } 1014 1015 /* Add SQ HEADER subdescriptor. 1016 * First subdescriptor for every send descriptor. 1017 */ 1018 static inline void 1019 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, 1020 int subdesc_cnt, struct sk_buff *skb, int len) 1021 { 1022 int proto; 1023 struct sq_hdr_subdesc *hdr; 1024 1025 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1026 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1027 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1028 1029 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { 1030 /* post_cqe = 0, to avoid HW posting a CQE for every TSO 1031 * segment transmitted on 88xx. 1032 */ 1033 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; 1034 } else { 1035 sq->skbuff[qentry] = (u64)skb; 1036 /* Enable notification via CQE after processing SQE */ 1037 hdr->post_cqe = 1; 1038 /* No of subdescriptors following this */ 1039 hdr->subdesc_cnt = subdesc_cnt; 1040 } 1041 hdr->tot_len = len; 1042 1043 /* Offload checksum calculation to HW */ 1044 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1045 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1046 hdr->l3_offset = skb_network_offset(skb); 1047 hdr->l4_offset = skb_transport_offset(skb); 1048 1049 proto = ip_hdr(skb)->protocol; 1050 switch (proto) { 1051 case IPPROTO_TCP: 1052 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1053 break; 1054 case IPPROTO_UDP: 1055 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1056 break; 1057 case IPPROTO_SCTP: 1058 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1059 break; 1060 } 1061 } 1062 1063 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { 1064 hdr->tso = 1; 1065 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); 1066 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1067 /* For non-tunneled pkts, point this to L2 ethertype */ 1068 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1069 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1070 } 1071 } 1072 1073 /* SQ GATHER subdescriptor 1074 * Must follow HDR descriptor 1075 */ 1076 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1077 int size, u64 data) 1078 { 1079 struct sq_gather_subdesc *gather; 1080 1081 qentry &= (sq->dmem.q_len - 1); 1082 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1083 1084 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1085 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1086 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1087 gather->size = size; 1088 gather->addr = data; 1089 } 1090 1091 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO 1092 * packet so that a CQE is posted as a notifation for transmission of 1093 * TSO packet. 1094 */ 1095 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, 1096 int tso_sqe, struct sk_buff *skb) 1097 { 1098 struct sq_imm_subdesc *imm; 1099 struct sq_hdr_subdesc *hdr; 1100 1101 sq->skbuff[qentry] = (u64)skb; 1102 1103 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1104 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1105 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1106 /* Enable notification via CQE after processing SQE */ 1107 hdr->post_cqe = 1; 1108 /* There is no packet to transmit here */ 1109 hdr->dont_send = 1; 1110 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; 1111 hdr->tot_len = 1; 1112 /* Actual TSO header SQE index, needed for cleanup */ 1113 hdr->rsvd2 = tso_sqe; 1114 1115 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1116 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); 1117 memset(imm, 0, SND_QUEUE_DESC_SIZE); 1118 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; 1119 imm->len = 1; 1120 } 1121 1122 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, 1123 int sq_num, int desc_cnt) 1124 { 1125 struct netdev_queue *txq; 1126 1127 txq = netdev_get_tx_queue(nic->pnicvf->netdev, 1128 skb_get_queue_mapping(skb)); 1129 1130 netdev_tx_sent_queue(txq, skb->len); 1131 1132 /* make sure all memory stores are done before ringing doorbell */ 1133 smp_wmb(); 1134 1135 /* Inform HW to xmit all TSO segments */ 1136 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1137 sq_num, desc_cnt); 1138 } 1139 1140 /* Segment a TSO packet into 'gso_size' segments and append 1141 * them to SQ for transfer 1142 */ 1143 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1144 int sq_num, int qentry, struct sk_buff *skb) 1145 { 1146 struct tso_t tso; 1147 int seg_subdescs = 0, desc_cnt = 0; 1148 int seg_len, total_len, data_left; 1149 int hdr_qentry = qentry; 1150 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1151 1152 tso_start(skb, &tso); 1153 total_len = skb->len - hdr_len; 1154 while (total_len > 0) { 1155 char *hdr; 1156 1157 /* Save Qentry for adding HDR_SUBDESC at the end */ 1158 hdr_qentry = qentry; 1159 1160 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1161 total_len -= data_left; 1162 1163 /* Add segment's header */ 1164 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1165 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1166 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1167 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1168 sq->tso_hdrs_phys + 1169 qentry * TSO_HEADER_SIZE); 1170 /* HDR_SUDESC + GATHER */ 1171 seg_subdescs = 2; 1172 seg_len = hdr_len; 1173 1174 /* Add segment's payload fragments */ 1175 while (data_left > 0) { 1176 int size; 1177 1178 size = min_t(int, tso.size, data_left); 1179 1180 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1181 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1182 virt_to_phys(tso.data)); 1183 seg_subdescs++; 1184 seg_len += size; 1185 1186 data_left -= size; 1187 tso_build_data(skb, &tso, size); 1188 } 1189 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, 1190 seg_subdescs - 1, skb, seg_len); 1191 sq->skbuff[hdr_qentry] = (u64)NULL; 1192 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1193 1194 desc_cnt += seg_subdescs; 1195 } 1196 /* Save SKB in the last segment for freeing */ 1197 sq->skbuff[hdr_qentry] = (u64)skb; 1198 1199 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1200 1201 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); 1202 return 1; 1203 } 1204 1205 /* Append an skb to a SQ for packet transfer. */ 1206 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, 1207 struct sk_buff *skb, u8 sq_num) 1208 { 1209 int i, size; 1210 int subdesc_cnt, tso_sqe = 0; 1211 int qentry; 1212 1213 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1214 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1215 goto append_fail; 1216 1217 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1218 1219 /* Check if its a TSO packet */ 1220 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) 1221 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1222 1223 /* Add SQ header subdesc */ 1224 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1225 skb, skb->len); 1226 tso_sqe = qentry; 1227 1228 /* Add SQ gather subdescs */ 1229 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1230 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1231 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1232 1233 /* Check for scattered buffer */ 1234 if (!skb_is_nonlinear(skb)) 1235 goto doorbell; 1236 1237 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1238 const struct skb_frag_struct *frag; 1239 1240 frag = &skb_shinfo(skb)->frags[i]; 1241 1242 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1243 size = skb_frag_size(frag); 1244 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1245 virt_to_phys( 1246 skb_frag_address(frag))); 1247 } 1248 1249 doorbell: 1250 if (nic->t88 && skb_shinfo(skb)->gso_size) { 1251 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1252 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); 1253 } 1254 1255 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); 1256 1257 return 1; 1258 1259 append_fail: 1260 /* Use original PCI dev for debug log */ 1261 nic = nic->pnicvf; 1262 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1263 return 0; 1264 } 1265 1266 static inline unsigned frag_num(unsigned i) 1267 { 1268 #ifdef __BIG_ENDIAN 1269 return (i & ~3) + 3 - (i & 3); 1270 #else 1271 return i; 1272 #endif 1273 } 1274 1275 /* Returns SKB for a received packet */ 1276 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1277 { 1278 int frag; 1279 int payload_len = 0; 1280 struct sk_buff *skb = NULL; 1281 struct page *page; 1282 int offset; 1283 u16 *rb_lens = NULL; 1284 u64 *rb_ptrs = NULL; 1285 1286 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1287 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to 1288 * CQE_RX at word6, hence buffer pointers move by word 1289 * 1290 * Use existing 'hw_tso' flag which will be set for all chips 1291 * except 88xx pass1 instead of a additional cache line 1292 * access (or miss) by using pci dev's revision. 1293 */ 1294 if (!nic->hw_tso) 1295 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1296 else 1297 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); 1298 1299 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1300 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1301 1302 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1303 payload_len = rb_lens[frag_num(frag)]; 1304 if (!frag) { 1305 /* First fragment */ 1306 skb = nicvf_rb_ptr_to_skb(nic, 1307 *rb_ptrs - cqe_rx->align_pad, 1308 payload_len); 1309 if (!skb) 1310 return NULL; 1311 skb_reserve(skb, cqe_rx->align_pad); 1312 skb_put(skb, payload_len); 1313 } else { 1314 /* Add fragments */ 1315 page = virt_to_page(phys_to_virt(*rb_ptrs)); 1316 offset = phys_to_virt(*rb_ptrs) - page_address(page); 1317 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1318 offset, payload_len, RCV_FRAG_LEN); 1319 } 1320 /* Next buffer pointer */ 1321 rb_ptrs++; 1322 } 1323 return skb; 1324 } 1325 1326 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) 1327 { 1328 u64 reg_val; 1329 1330 switch (int_type) { 1331 case NICVF_INTR_CQ: 1332 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1333 break; 1334 case NICVF_INTR_SQ: 1335 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1336 break; 1337 case NICVF_INTR_RBDR: 1338 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1339 break; 1340 case NICVF_INTR_PKT_DROP: 1341 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1342 break; 1343 case NICVF_INTR_TCP_TIMER: 1344 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1345 break; 1346 case NICVF_INTR_MBOX: 1347 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1348 break; 1349 case NICVF_INTR_QS_ERR: 1350 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1351 break; 1352 default: 1353 reg_val = 0; 1354 } 1355 1356 return reg_val; 1357 } 1358 1359 /* Enable interrupt */ 1360 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1361 { 1362 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1363 1364 if (!mask) { 1365 netdev_dbg(nic->netdev, 1366 "Failed to enable interrupt: unknown type\n"); 1367 return; 1368 } 1369 nicvf_reg_write(nic, NIC_VF_ENA_W1S, 1370 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); 1371 } 1372 1373 /* Disable interrupt */ 1374 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1375 { 1376 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1377 1378 if (!mask) { 1379 netdev_dbg(nic->netdev, 1380 "Failed to disable interrupt: unknown type\n"); 1381 return; 1382 } 1383 1384 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); 1385 } 1386 1387 /* Clear interrupt */ 1388 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1389 { 1390 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1391 1392 if (!mask) { 1393 netdev_dbg(nic->netdev, 1394 "Failed to clear interrupt: unknown type\n"); 1395 return; 1396 } 1397 1398 nicvf_reg_write(nic, NIC_VF_INT, mask); 1399 } 1400 1401 /* Check if interrupt is enabled */ 1402 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1403 { 1404 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1405 /* If interrupt type is unknown, we treat it disabled. */ 1406 if (!mask) { 1407 netdev_dbg(nic->netdev, 1408 "Failed to check interrupt enable: unknown type\n"); 1409 return 0; 1410 } 1411 1412 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1413 } 1414 1415 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1416 { 1417 struct rcv_queue *rq; 1418 1419 #define GET_RQ_STATS(reg) \ 1420 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1421 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1422 1423 rq = &nic->qs->rq[rq_idx]; 1424 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1425 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1426 } 1427 1428 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1429 { 1430 struct snd_queue *sq; 1431 1432 #define GET_SQ_STATS(reg) \ 1433 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1434 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1435 1436 sq = &nic->qs->sq[sq_idx]; 1437 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1438 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1439 } 1440 1441 /* Check for errors in the receive cmp.queue entry */ 1442 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1443 { 1444 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1445 return 0; 1446 1447 if (netif_msg_rx_err(nic)) 1448 netdev_err(nic->netdev, 1449 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1450 nic->netdev->name, 1451 cqe_rx->err_level, cqe_rx->err_opcode); 1452 1453 switch (cqe_rx->err_opcode) { 1454 case CQ_RX_ERROP_RE_PARTIAL: 1455 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); 1456 break; 1457 case CQ_RX_ERROP_RE_JABBER: 1458 this_cpu_inc(nic->drv_stats->rx_jabber_errs); 1459 break; 1460 case CQ_RX_ERROP_RE_FCS: 1461 this_cpu_inc(nic->drv_stats->rx_fcs_errs); 1462 break; 1463 case CQ_RX_ERROP_RE_RX_CTL: 1464 this_cpu_inc(nic->drv_stats->rx_bgx_errs); 1465 break; 1466 case CQ_RX_ERROP_PREL2_ERR: 1467 this_cpu_inc(nic->drv_stats->rx_prel2_errs); 1468 break; 1469 case CQ_RX_ERROP_L2_MAL: 1470 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); 1471 break; 1472 case CQ_RX_ERROP_L2_OVERSIZE: 1473 this_cpu_inc(nic->drv_stats->rx_oversize); 1474 break; 1475 case CQ_RX_ERROP_L2_UNDERSIZE: 1476 this_cpu_inc(nic->drv_stats->rx_undersize); 1477 break; 1478 case CQ_RX_ERROP_L2_LENMISM: 1479 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); 1480 break; 1481 case CQ_RX_ERROP_L2_PCLP: 1482 this_cpu_inc(nic->drv_stats->rx_l2_pclp); 1483 break; 1484 case CQ_RX_ERROP_IP_NOT: 1485 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); 1486 break; 1487 case CQ_RX_ERROP_IP_CSUM_ERR: 1488 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); 1489 break; 1490 case CQ_RX_ERROP_IP_MAL: 1491 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); 1492 break; 1493 case CQ_RX_ERROP_IP_MALD: 1494 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); 1495 break; 1496 case CQ_RX_ERROP_IP_HOP: 1497 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); 1498 break; 1499 case CQ_RX_ERROP_L3_PCLP: 1500 this_cpu_inc(nic->drv_stats->rx_l3_pclp); 1501 break; 1502 case CQ_RX_ERROP_L4_MAL: 1503 this_cpu_inc(nic->drv_stats->rx_l4_malformed); 1504 break; 1505 case CQ_RX_ERROP_L4_CHK: 1506 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); 1507 break; 1508 case CQ_RX_ERROP_UDP_LEN: 1509 this_cpu_inc(nic->drv_stats->rx_udp_len_errs); 1510 break; 1511 case CQ_RX_ERROP_L4_PORT: 1512 this_cpu_inc(nic->drv_stats->rx_l4_port_errs); 1513 break; 1514 case CQ_RX_ERROP_TCP_FLAG: 1515 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); 1516 break; 1517 case CQ_RX_ERROP_TCP_OFFSET: 1518 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); 1519 break; 1520 case CQ_RX_ERROP_L4_PCLP: 1521 this_cpu_inc(nic->drv_stats->rx_l4_pclp); 1522 break; 1523 case CQ_RX_ERROP_RBDR_TRUNC: 1524 this_cpu_inc(nic->drv_stats->rx_truncated_pkts); 1525 break; 1526 } 1527 1528 return 1; 1529 } 1530 1531 /* Check for errors in the send cmp.queue entry */ 1532 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) 1533 { 1534 switch (cqe_tx->send_status) { 1535 case CQ_TX_ERROP_GOOD: 1536 return 0; 1537 case CQ_TX_ERROP_DESC_FAULT: 1538 this_cpu_inc(nic->drv_stats->tx_desc_fault); 1539 break; 1540 case CQ_TX_ERROP_HDR_CONS_ERR: 1541 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); 1542 break; 1543 case CQ_TX_ERROP_SUBDC_ERR: 1544 this_cpu_inc(nic->drv_stats->tx_subdesc_err); 1545 break; 1546 case CQ_TX_ERROP_MAX_SIZE_VIOL: 1547 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); 1548 break; 1549 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1550 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); 1551 break; 1552 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1553 this_cpu_inc(nic->drv_stats->tx_data_seq_err); 1554 break; 1555 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1556 this_cpu_inc(nic->drv_stats->tx_mem_seq_err); 1557 break; 1558 case CQ_TX_ERROP_LOCK_VIOL: 1559 this_cpu_inc(nic->drv_stats->tx_lock_viol); 1560 break; 1561 case CQ_TX_ERROP_DATA_FAULT: 1562 this_cpu_inc(nic->drv_stats->tx_data_fault); 1563 break; 1564 case CQ_TX_ERROP_TSTMP_CONFLICT: 1565 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); 1566 break; 1567 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1568 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); 1569 break; 1570 case CQ_TX_ERROP_MEM_FAULT: 1571 this_cpu_inc(nic->drv_stats->tx_mem_fault); 1572 break; 1573 case CQ_TX_ERROP_CK_OVERLAP: 1574 this_cpu_inc(nic->drv_stats->tx_csum_overlap); 1575 break; 1576 case CQ_TX_ERROP_CK_OFLOW: 1577 this_cpu_inc(nic->drv_stats->tx_csum_overflow); 1578 break; 1579 } 1580 1581 return 1; 1582 } 1583