1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 static void nicvf_get_page(struct nicvf *nic) 22 { 23 if (!nic->rb_pageref || !nic->rb_page) 24 return; 25 26 page_ref_add(nic->rb_page, nic->rb_pageref); 27 nic->rb_pageref = 0; 28 } 29 30 /* Poll a register for a specific value */ 31 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 32 u64 reg, int bit_pos, int bits, int val) 33 { 34 u64 bit_mask; 35 u64 reg_val; 36 int timeout = 10; 37 38 bit_mask = (1ULL << bits) - 1; 39 bit_mask = (bit_mask << bit_pos); 40 41 while (timeout) { 42 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 43 if (((reg_val & bit_mask) >> bit_pos) == val) 44 return 0; 45 usleep_range(1000, 2000); 46 timeout--; 47 } 48 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 49 return 1; 50 } 51 52 /* Allocate memory for a queue's descriptors */ 53 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 54 int q_len, int desc_size, int align_bytes) 55 { 56 dmem->q_len = q_len; 57 dmem->size = (desc_size * q_len) + align_bytes; 58 /* Save address, need it while freeing */ 59 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 60 &dmem->dma, GFP_KERNEL); 61 if (!dmem->unalign_base) 62 return -ENOMEM; 63 64 /* Align memory address for 'align_bytes' */ 65 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 66 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 67 return 0; 68 } 69 70 /* Free queue's descriptor memory */ 71 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 72 { 73 if (!dmem) 74 return; 75 76 dma_free_coherent(&nic->pdev->dev, dmem->size, 77 dmem->unalign_base, dmem->dma); 78 dmem->unalign_base = NULL; 79 dmem->base = NULL; 80 } 81 82 /* Allocate buffer for packet reception 83 * HW returns memory address where packet is DMA'ed but not a pointer 84 * into RBDR ring, so save buffer address at the start of fragment and 85 * align the start address to a cache aligned address 86 */ 87 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 88 u32 buf_len, u64 **rbuf) 89 { 90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; 91 92 /* Check if request can be accomodated in previous allocated page */ 93 if (nic->rb_page && 94 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { 95 nic->rb_pageref++; 96 goto ret; 97 } 98 99 nicvf_get_page(nic); 100 nic->rb_page = NULL; 101 102 /* Allocate a new page */ 103 if (!nic->rb_page) { 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 105 order); 106 if (!nic->rb_page) { 107 nic->drv_stats.rcv_buffer_alloc_failures++; 108 return -ENOMEM; 109 } 110 nic->rb_page_offset = 0; 111 } 112 113 ret: 114 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 115 nic->rb_page_offset += buf_len; 116 117 return 0; 118 } 119 120 /* Build skb around receive buffer */ 121 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 122 u64 rb_ptr, int len) 123 { 124 void *data; 125 struct sk_buff *skb; 126 127 data = phys_to_virt(rb_ptr); 128 129 /* Now build an skb to give to stack */ 130 skb = build_skb(data, RCV_FRAG_LEN); 131 if (!skb) { 132 put_page(virt_to_page(data)); 133 return NULL; 134 } 135 136 prefetch(skb->data); 137 return skb; 138 } 139 140 /* Allocate RBDR ring and populate receive buffers */ 141 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 142 int ring_len, int buf_size) 143 { 144 int idx; 145 u64 *rbuf; 146 struct rbdr_entry_t *desc; 147 int err; 148 149 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 150 sizeof(struct rbdr_entry_t), 151 NICVF_RCV_BUF_ALIGN_BYTES); 152 if (err) 153 return err; 154 155 rbdr->desc = rbdr->dmem.base; 156 /* Buffer size has to be in multiples of 128 bytes */ 157 rbdr->dma_size = buf_size; 158 rbdr->enable = true; 159 rbdr->thresh = RBDR_THRESH; 160 161 nic->rb_page = NULL; 162 for (idx = 0; idx < ring_len; idx++) { 163 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 164 &rbuf); 165 if (err) 166 return err; 167 168 desc = GET_RBDR_DESC(rbdr, idx); 169 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 170 } 171 172 nicvf_get_page(nic); 173 174 return 0; 175 } 176 177 /* Free RBDR ring and its receive buffers */ 178 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 179 { 180 int head, tail; 181 u64 buf_addr; 182 struct rbdr_entry_t *desc; 183 184 if (!rbdr) 185 return; 186 187 rbdr->enable = false; 188 if (!rbdr->dmem.base) 189 return; 190 191 head = rbdr->head; 192 tail = rbdr->tail; 193 194 /* Free SKBs */ 195 while (head != tail) { 196 desc = GET_RBDR_DESC(rbdr, head); 197 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 198 put_page(virt_to_page(phys_to_virt(buf_addr))); 199 head++; 200 head &= (rbdr->dmem.q_len - 1); 201 } 202 /* Free SKB of tail desc */ 203 desc = GET_RBDR_DESC(rbdr, tail); 204 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 205 put_page(virt_to_page(phys_to_virt(buf_addr))); 206 207 /* Free RBDR ring */ 208 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 209 } 210 211 /* Refill receive buffer descriptors with new buffers. 212 */ 213 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 214 { 215 struct queue_set *qs = nic->qs; 216 int rbdr_idx = qs->rbdr_cnt; 217 int tail, qcount; 218 int refill_rb_cnt; 219 struct rbdr *rbdr; 220 struct rbdr_entry_t *desc; 221 u64 *rbuf; 222 int new_rb = 0; 223 224 refill: 225 if (!rbdr_idx) 226 return; 227 rbdr_idx--; 228 rbdr = &qs->rbdr[rbdr_idx]; 229 /* Check if it's enabled */ 230 if (!rbdr->enable) 231 goto next_rbdr; 232 233 /* Get no of desc's to be refilled */ 234 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 235 qcount &= 0x7FFFF; 236 /* Doorbell can be ringed with a max of ring size minus 1 */ 237 if (qcount >= (qs->rbdr_len - 1)) 238 goto next_rbdr; 239 else 240 refill_rb_cnt = qs->rbdr_len - qcount - 1; 241 242 /* Start filling descs from tail */ 243 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 244 while (refill_rb_cnt) { 245 tail++; 246 tail &= (rbdr->dmem.q_len - 1); 247 248 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 249 break; 250 251 desc = GET_RBDR_DESC(rbdr, tail); 252 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 253 refill_rb_cnt--; 254 new_rb++; 255 } 256 257 nicvf_get_page(nic); 258 259 /* make sure all memory stores are done before ringing doorbell */ 260 smp_wmb(); 261 262 /* Check if buffer allocation failed */ 263 if (refill_rb_cnt) 264 nic->rb_alloc_fail = true; 265 else 266 nic->rb_alloc_fail = false; 267 268 /* Notify HW */ 269 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 270 rbdr_idx, new_rb); 271 next_rbdr: 272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 273 if (!nic->rb_alloc_fail && rbdr->enable) 274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 275 276 if (rbdr_idx) 277 goto refill; 278 } 279 280 /* Alloc rcv buffers in non-atomic mode for better success */ 281 void nicvf_rbdr_work(struct work_struct *work) 282 { 283 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 284 285 nicvf_refill_rbdr(nic, GFP_KERNEL); 286 if (nic->rb_alloc_fail) 287 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 288 else 289 nic->rb_work_scheduled = false; 290 } 291 292 /* In Softirq context, alloc rcv buffers in atomic mode */ 293 void nicvf_rbdr_task(unsigned long data) 294 { 295 struct nicvf *nic = (struct nicvf *)data; 296 297 nicvf_refill_rbdr(nic, GFP_ATOMIC); 298 if (nic->rb_alloc_fail) { 299 nic->rb_work_scheduled = true; 300 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 301 } 302 } 303 304 /* Initialize completion queue */ 305 static int nicvf_init_cmp_queue(struct nicvf *nic, 306 struct cmp_queue *cq, int q_len) 307 { 308 int err; 309 310 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 311 NICVF_CQ_BASE_ALIGN_BYTES); 312 if (err) 313 return err; 314 315 cq->desc = cq->dmem.base; 316 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; 317 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 318 319 return 0; 320 } 321 322 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 323 { 324 if (!cq) 325 return; 326 if (!cq->dmem.base) 327 return; 328 329 nicvf_free_q_desc_mem(nic, &cq->dmem); 330 } 331 332 /* Initialize transmit queue */ 333 static int nicvf_init_snd_queue(struct nicvf *nic, 334 struct snd_queue *sq, int q_len) 335 { 336 int err; 337 338 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 339 NICVF_SQ_BASE_ALIGN_BYTES); 340 if (err) 341 return err; 342 343 sq->desc = sq->dmem.base; 344 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 345 if (!sq->skbuff) 346 return -ENOMEM; 347 sq->head = 0; 348 sq->tail = 0; 349 atomic_set(&sq->free_cnt, q_len - 1); 350 sq->thresh = SND_QUEUE_THRESH; 351 352 /* Preallocate memory for TSO segment's header */ 353 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 354 q_len * TSO_HEADER_SIZE, 355 &sq->tso_hdrs_phys, GFP_KERNEL); 356 if (!sq->tso_hdrs) 357 return -ENOMEM; 358 359 return 0; 360 } 361 362 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 363 { 364 if (!sq) 365 return; 366 if (!sq->dmem.base) 367 return; 368 369 if (sq->tso_hdrs) 370 dma_free_coherent(&nic->pdev->dev, 371 sq->dmem.q_len * TSO_HEADER_SIZE, 372 sq->tso_hdrs, sq->tso_hdrs_phys); 373 374 kfree(sq->skbuff); 375 nicvf_free_q_desc_mem(nic, &sq->dmem); 376 } 377 378 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 379 struct queue_set *qs, int qidx) 380 { 381 /* Disable send queue */ 382 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 383 /* Check if SQ is stopped */ 384 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 385 return; 386 /* Reset send queue */ 387 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 388 } 389 390 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 391 struct queue_set *qs, int qidx) 392 { 393 union nic_mbx mbx = {}; 394 395 /* Make sure all packets in the pipeline are written back into mem */ 396 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 397 nicvf_send_msg_to_pf(nic, &mbx); 398 } 399 400 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 401 struct queue_set *qs, int qidx) 402 { 403 /* Disable timer threshold (doesn't get reset upon CQ reset */ 404 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 405 /* Disable completion queue */ 406 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 407 /* Reset completion queue */ 408 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 409 } 410 411 static void nicvf_reclaim_rbdr(struct nicvf *nic, 412 struct rbdr *rbdr, int qidx) 413 { 414 u64 tmp, fifo_state; 415 int timeout = 10; 416 417 /* Save head and tail pointers for feeing up buffers */ 418 rbdr->head = nicvf_queue_reg_read(nic, 419 NIC_QSET_RBDR_0_1_HEAD, 420 qidx) >> 3; 421 rbdr->tail = nicvf_queue_reg_read(nic, 422 NIC_QSET_RBDR_0_1_TAIL, 423 qidx) >> 3; 424 425 /* If RBDR FIFO is in 'FAIL' state then do a reset first 426 * before relaiming. 427 */ 428 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 429 if (((fifo_state >> 62) & 0x03) == 0x3) 430 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 431 qidx, NICVF_RBDR_RESET); 432 433 /* Disable RBDR */ 434 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 435 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 436 return; 437 while (1) { 438 tmp = nicvf_queue_reg_read(nic, 439 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 440 qidx); 441 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 442 break; 443 usleep_range(1000, 2000); 444 timeout--; 445 if (!timeout) { 446 netdev_err(nic->netdev, 447 "Failed polling on prefetch status\n"); 448 return; 449 } 450 } 451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 452 qidx, NICVF_RBDR_RESET); 453 454 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 455 return; 456 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 457 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 458 return; 459 } 460 461 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) 462 { 463 u64 rq_cfg; 464 int sqs; 465 466 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); 467 468 /* Enable first VLAN stripping */ 469 if (features & NETIF_F_HW_VLAN_CTAG_RX) 470 rq_cfg |= (1ULL << 25); 471 else 472 rq_cfg &= ~(1ULL << 25); 473 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 474 475 /* Configure Secondary Qsets, if any */ 476 for (sqs = 0; sqs < nic->sqs_count; sqs++) 477 if (nic->snicvf[sqs]) 478 nicvf_queue_reg_write(nic->snicvf[sqs], 479 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); 480 } 481 482 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) 483 { 484 union nic_mbx mbx = {}; 485 486 /* Reset all RXQ's stats */ 487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 488 mbx.reset_stat.rq_stat_mask = 0xFFFF; 489 nicvf_send_msg_to_pf(nic, &mbx); 490 } 491 492 /* Configures receive queue */ 493 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 494 int qidx, bool enable) 495 { 496 union nic_mbx mbx = {}; 497 struct rcv_queue *rq; 498 struct rq_cfg rq_cfg; 499 500 rq = &qs->rq[qidx]; 501 rq->enable = enable; 502 503 /* Disable receive queue */ 504 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 505 506 if (!rq->enable) { 507 nicvf_reclaim_rcv_queue(nic, qs, qidx); 508 return; 509 } 510 511 rq->cq_qs = qs->vnic_id; 512 rq->cq_idx = qidx; 513 rq->start_rbdr_qs = qs->vnic_id; 514 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 515 rq->cont_rbdr_qs = qs->vnic_id; 516 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 517 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 518 rq->caching = 1; 519 520 /* Send a mailbox msg to PF to config RQ */ 521 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 522 mbx.rq.qs_num = qs->vnic_id; 523 mbx.rq.rq_num = qidx; 524 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 525 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 526 (rq->cont_qs_rbdr_idx << 8) | 527 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 528 nicvf_send_msg_to_pf(nic, &mbx); 529 530 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 531 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); 532 nicvf_send_msg_to_pf(nic, &mbx); 533 534 /* RQ drop config 535 * Enable CQ drop to reserve sufficient CQEs for all tx packets 536 */ 537 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 539 nicvf_send_msg_to_pf(nic, &mbx); 540 541 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 542 if (!nic->sqs_mode) 543 nicvf_config_vlan_stripping(nic, nic->netdev->features); 544 545 /* Enable Receive queue */ 546 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 547 rq_cfg.ena = 1; 548 rq_cfg.tcp_ena = 0; 549 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 550 } 551 552 /* Configures completion queue */ 553 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 554 int qidx, bool enable) 555 { 556 struct cmp_queue *cq; 557 struct cq_cfg cq_cfg; 558 559 cq = &qs->cq[qidx]; 560 cq->enable = enable; 561 562 if (!cq->enable) { 563 nicvf_reclaim_cmp_queue(nic, qs, qidx); 564 return; 565 } 566 567 /* Reset completion queue */ 568 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 569 570 if (!cq->enable) 571 return; 572 573 spin_lock_init(&cq->lock); 574 /* Set completion queue base address */ 575 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 576 qidx, (u64)(cq->dmem.phys_base)); 577 578 /* Enable Completion queue */ 579 memset(&cq_cfg, 0, sizeof(struct cq_cfg)); 580 cq_cfg.ena = 1; 581 cq_cfg.reset = 0; 582 cq_cfg.caching = 0; 583 cq_cfg.qsize = CMP_QSIZE; 584 cq_cfg.avg_con = 0; 585 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 586 587 /* Set threshold value for interrupt generation */ 588 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 589 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 590 qidx, CMP_QUEUE_TIMER_THRESH); 591 } 592 593 /* Configures transmit queue */ 594 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 595 int qidx, bool enable) 596 { 597 union nic_mbx mbx = {}; 598 struct snd_queue *sq; 599 struct sq_cfg sq_cfg; 600 601 sq = &qs->sq[qidx]; 602 sq->enable = enable; 603 604 if (!sq->enable) { 605 nicvf_reclaim_snd_queue(nic, qs, qidx); 606 return; 607 } 608 609 /* Reset send queue */ 610 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 611 612 sq->cq_qs = qs->vnic_id; 613 sq->cq_idx = qidx; 614 615 /* Send a mailbox msg to PF to config SQ */ 616 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 617 mbx.sq.qs_num = qs->vnic_id; 618 mbx.sq.sq_num = qidx; 619 mbx.sq.sqs_mode = nic->sqs_mode; 620 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 621 nicvf_send_msg_to_pf(nic, &mbx); 622 623 /* Set queue base address */ 624 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 625 qidx, (u64)(sq->dmem.phys_base)); 626 627 /* Enable send queue & set queue size */ 628 memset(&sq_cfg, 0, sizeof(struct sq_cfg)); 629 sq_cfg.ena = 1; 630 sq_cfg.reset = 0; 631 sq_cfg.ldwb = 0; 632 sq_cfg.qsize = SND_QSIZE; 633 sq_cfg.tstmp_bgx_intf = 0; 634 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 635 636 /* Set threshold value for interrupt generation */ 637 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 638 639 /* Set queue:cpu affinity for better load distribution */ 640 if (cpu_online(qidx)) { 641 cpumask_set_cpu(qidx, &sq->affinity_mask); 642 netif_set_xps_queue(nic->netdev, 643 &sq->affinity_mask, qidx); 644 } 645 } 646 647 /* Configures receive buffer descriptor ring */ 648 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 649 int qidx, bool enable) 650 { 651 struct rbdr *rbdr; 652 struct rbdr_cfg rbdr_cfg; 653 654 rbdr = &qs->rbdr[qidx]; 655 nicvf_reclaim_rbdr(nic, rbdr, qidx); 656 if (!enable) 657 return; 658 659 /* Set descriptor base address */ 660 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 661 qidx, (u64)(rbdr->dmem.phys_base)); 662 663 /* Enable RBDR & set queue size */ 664 /* Buffer size should be in multiples of 128 bytes */ 665 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); 666 rbdr_cfg.ena = 1; 667 rbdr_cfg.reset = 0; 668 rbdr_cfg.ldwb = 0; 669 rbdr_cfg.qsize = RBDR_SIZE; 670 rbdr_cfg.avg_con = 0; 671 rbdr_cfg.lines = rbdr->dma_size / 128; 672 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 673 qidx, *(u64 *)&rbdr_cfg); 674 675 /* Notify HW */ 676 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 677 qidx, qs->rbdr_len - 1); 678 679 /* Set threshold value for interrupt generation */ 680 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 681 qidx, rbdr->thresh - 1); 682 } 683 684 /* Requests PF to assign and enable Qset */ 685 void nicvf_qset_config(struct nicvf *nic, bool enable) 686 { 687 union nic_mbx mbx = {}; 688 struct queue_set *qs = nic->qs; 689 struct qs_cfg *qs_cfg; 690 691 if (!qs) { 692 netdev_warn(nic->netdev, 693 "Qset is still not allocated, don't init queues\n"); 694 return; 695 } 696 697 qs->enable = enable; 698 qs->vnic_id = nic->vf_id; 699 700 /* Send a mailbox msg to PF to config Qset */ 701 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 702 mbx.qs.num = qs->vnic_id; 703 mbx.qs.sqs_count = nic->sqs_count; 704 705 mbx.qs.cfg = 0; 706 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 707 if (qs->enable) { 708 qs_cfg->ena = 1; 709 #ifdef __BIG_ENDIAN 710 qs_cfg->be = 1; 711 #endif 712 qs_cfg->vnic = qs->vnic_id; 713 } 714 nicvf_send_msg_to_pf(nic, &mbx); 715 } 716 717 static void nicvf_free_resources(struct nicvf *nic) 718 { 719 int qidx; 720 struct queue_set *qs = nic->qs; 721 722 /* Free receive buffer descriptor ring */ 723 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 724 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 725 726 /* Free completion queue */ 727 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 728 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 729 730 /* Free send queue */ 731 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 732 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 733 } 734 735 static int nicvf_alloc_resources(struct nicvf *nic) 736 { 737 int qidx; 738 struct queue_set *qs = nic->qs; 739 740 /* Alloc receive buffer descriptor ring */ 741 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 742 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 743 DMA_BUFFER_LEN)) 744 goto alloc_fail; 745 } 746 747 /* Alloc send queue */ 748 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 749 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 750 goto alloc_fail; 751 } 752 753 /* Alloc completion queue */ 754 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 755 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 756 goto alloc_fail; 757 } 758 759 return 0; 760 alloc_fail: 761 nicvf_free_resources(nic); 762 return -ENOMEM; 763 } 764 765 int nicvf_set_qset_resources(struct nicvf *nic) 766 { 767 struct queue_set *qs; 768 769 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 770 if (!qs) 771 return -ENOMEM; 772 nic->qs = qs; 773 774 /* Set count of each queue */ 775 qs->rbdr_cnt = DEFAULT_RBDR_CNT; 776 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); 777 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); 778 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); 779 780 /* Set queue lengths */ 781 qs->rbdr_len = RCV_BUF_COUNT; 782 qs->sq_len = SND_QUEUE_LEN; 783 qs->cq_len = CMP_QUEUE_LEN; 784 785 nic->rx_queues = qs->rq_cnt; 786 nic->tx_queues = qs->sq_cnt; 787 788 return 0; 789 } 790 791 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 792 { 793 bool disable = false; 794 struct queue_set *qs = nic->qs; 795 int qidx; 796 797 if (!qs) 798 return 0; 799 800 if (enable) { 801 if (nicvf_alloc_resources(nic)) 802 return -ENOMEM; 803 804 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 805 nicvf_snd_queue_config(nic, qs, qidx, enable); 806 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 807 nicvf_cmp_queue_config(nic, qs, qidx, enable); 808 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 809 nicvf_rbdr_config(nic, qs, qidx, enable); 810 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 811 nicvf_rcv_queue_config(nic, qs, qidx, enable); 812 } else { 813 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 814 nicvf_rcv_queue_config(nic, qs, qidx, disable); 815 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 816 nicvf_rbdr_config(nic, qs, qidx, disable); 817 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 818 nicvf_snd_queue_config(nic, qs, qidx, disable); 819 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 820 nicvf_cmp_queue_config(nic, qs, qidx, disable); 821 822 nicvf_free_resources(nic); 823 } 824 825 /* Reset RXQ's stats. 826 * SQ's stats will get reset automatically once SQ is reset. 827 */ 828 nicvf_reset_rcv_queue_stats(nic); 829 830 return 0; 831 } 832 833 /* Get a free desc from SQ 834 * returns descriptor ponter & descriptor number 835 */ 836 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 837 { 838 int qentry; 839 840 qentry = sq->tail; 841 atomic_sub(desc_cnt, &sq->free_cnt); 842 sq->tail += desc_cnt; 843 sq->tail &= (sq->dmem.q_len - 1); 844 845 return qentry; 846 } 847 848 /* Free descriptor back to SQ for future use */ 849 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 850 { 851 atomic_add(desc_cnt, &sq->free_cnt); 852 sq->head += desc_cnt; 853 sq->head &= (sq->dmem.q_len - 1); 854 } 855 856 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 857 { 858 qentry++; 859 qentry &= (sq->dmem.q_len - 1); 860 return qentry; 861 } 862 863 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 864 { 865 u64 sq_cfg; 866 867 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 868 sq_cfg |= NICVF_SQ_EN; 869 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 870 /* Ring doorbell so that H/W restarts processing SQEs */ 871 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 872 } 873 874 void nicvf_sq_disable(struct nicvf *nic, int qidx) 875 { 876 u64 sq_cfg; 877 878 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 879 sq_cfg &= ~NICVF_SQ_EN; 880 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 881 } 882 883 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 884 int qidx) 885 { 886 u64 head, tail; 887 struct sk_buff *skb; 888 struct nicvf *nic = netdev_priv(netdev); 889 struct sq_hdr_subdesc *hdr; 890 891 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 892 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 893 while (sq->head != head) { 894 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 895 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 896 nicvf_put_sq_desc(sq, 1); 897 continue; 898 } 899 skb = (struct sk_buff *)sq->skbuff[sq->head]; 900 if (skb) 901 dev_kfree_skb_any(skb); 902 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 903 atomic64_add(hdr->tot_len, 904 (atomic64_t *)&netdev->stats.tx_bytes); 905 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 906 } 907 } 908 909 /* Calculate no of SQ subdescriptors needed to transmit all 910 * segments of this TSO packet. 911 * Taken from 'Tilera network driver' with a minor modification. 912 */ 913 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 914 { 915 struct skb_shared_info *sh = skb_shinfo(skb); 916 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 917 unsigned int data_len = skb->len - sh_len; 918 unsigned int p_len = sh->gso_size; 919 long f_id = -1; /* id of the current fragment */ 920 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 921 long f_used = 0; /* bytes used from the current fragment */ 922 long n; /* size of the current piece of payload */ 923 int num_edescs = 0; 924 int segment; 925 926 for (segment = 0; segment < sh->gso_segs; segment++) { 927 unsigned int p_used = 0; 928 929 /* One edesc for header and for each piece of the payload. */ 930 for (num_edescs++; p_used < p_len; num_edescs++) { 931 /* Advance as needed. */ 932 while (f_used >= f_size) { 933 f_id++; 934 f_size = skb_frag_size(&sh->frags[f_id]); 935 f_used = 0; 936 } 937 938 /* Use bytes from the current fragment. */ 939 n = p_len - p_used; 940 if (n > f_size - f_used) 941 n = f_size - f_used; 942 f_used += n; 943 p_used += n; 944 } 945 946 /* The last segment may be less than gso_size. */ 947 data_len -= p_len; 948 if (data_len < p_len) 949 p_len = data_len; 950 } 951 952 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 953 return num_edescs + sh->gso_segs; 954 } 955 956 #define POST_CQE_DESC_COUNT 2 957 958 /* Get the number of SQ descriptors needed to xmit this skb */ 959 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 960 { 961 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 962 963 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { 964 subdesc_cnt = nicvf_tso_count_subdescs(skb); 965 return subdesc_cnt; 966 } 967 968 /* Dummy descriptors to get TSO pkt completion notification */ 969 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) 970 subdesc_cnt += POST_CQE_DESC_COUNT; 971 972 if (skb_shinfo(skb)->nr_frags) 973 subdesc_cnt += skb_shinfo(skb)->nr_frags; 974 975 return subdesc_cnt; 976 } 977 978 /* Add SQ HEADER subdescriptor. 979 * First subdescriptor for every send descriptor. 980 */ 981 static inline void 982 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, 983 int subdesc_cnt, struct sk_buff *skb, int len) 984 { 985 int proto; 986 struct sq_hdr_subdesc *hdr; 987 988 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 989 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 990 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 991 992 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { 993 /* post_cqe = 0, to avoid HW posting a CQE for every TSO 994 * segment transmitted on 88xx. 995 */ 996 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; 997 } else { 998 sq->skbuff[qentry] = (u64)skb; 999 /* Enable notification via CQE after processing SQE */ 1000 hdr->post_cqe = 1; 1001 /* No of subdescriptors following this */ 1002 hdr->subdesc_cnt = subdesc_cnt; 1003 } 1004 hdr->tot_len = len; 1005 1006 /* Offload checksum calculation to HW */ 1007 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1008 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 1009 hdr->l3_offset = skb_network_offset(skb); 1010 hdr->l4_offset = skb_transport_offset(skb); 1011 1012 proto = ip_hdr(skb)->protocol; 1013 switch (proto) { 1014 case IPPROTO_TCP: 1015 hdr->csum_l4 = SEND_L4_CSUM_TCP; 1016 break; 1017 case IPPROTO_UDP: 1018 hdr->csum_l4 = SEND_L4_CSUM_UDP; 1019 break; 1020 case IPPROTO_SCTP: 1021 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 1022 break; 1023 } 1024 } 1025 1026 if (nic->hw_tso && skb_shinfo(skb)->gso_size) { 1027 hdr->tso = 1; 1028 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); 1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1032 nic->drv_stats.tx_tso++; 1033 } 1034 } 1035 1036 /* SQ GATHER subdescriptor 1037 * Must follow HDR descriptor 1038 */ 1039 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 1040 int size, u64 data) 1041 { 1042 struct sq_gather_subdesc *gather; 1043 1044 qentry &= (sq->dmem.q_len - 1); 1045 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 1046 1047 memset(gather, 0, SND_QUEUE_DESC_SIZE); 1048 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 1049 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1050 gather->size = size; 1051 gather->addr = data; 1052 } 1053 1054 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO 1055 * packet so that a CQE is posted as a notifation for transmission of 1056 * TSO packet. 1057 */ 1058 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, 1059 int tso_sqe, struct sk_buff *skb) 1060 { 1061 struct sq_imm_subdesc *imm; 1062 struct sq_hdr_subdesc *hdr; 1063 1064 sq->skbuff[qentry] = (u64)skb; 1065 1066 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 1067 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 1068 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 1069 /* Enable notification via CQE after processing SQE */ 1070 hdr->post_cqe = 1; 1071 /* There is no packet to transmit here */ 1072 hdr->dont_send = 1; 1073 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; 1074 hdr->tot_len = 1; 1075 /* Actual TSO header SQE index, needed for cleanup */ 1076 hdr->rsvd2 = tso_sqe; 1077 1078 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1079 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); 1080 memset(imm, 0, SND_QUEUE_DESC_SIZE); 1081 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; 1082 imm->len = 1; 1083 } 1084 1085 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, 1086 int sq_num, int desc_cnt) 1087 { 1088 struct netdev_queue *txq; 1089 1090 txq = netdev_get_tx_queue(nic->pnicvf->netdev, 1091 skb_get_queue_mapping(skb)); 1092 1093 netdev_tx_sent_queue(txq, skb->len); 1094 1095 /* make sure all memory stores are done before ringing doorbell */ 1096 smp_wmb(); 1097 1098 /* Inform HW to xmit all TSO segments */ 1099 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1100 sq_num, desc_cnt); 1101 } 1102 1103 /* Segment a TSO packet into 'gso_size' segments and append 1104 * them to SQ for transfer 1105 */ 1106 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1107 int sq_num, int qentry, struct sk_buff *skb) 1108 { 1109 struct tso_t tso; 1110 int seg_subdescs = 0, desc_cnt = 0; 1111 int seg_len, total_len, data_left; 1112 int hdr_qentry = qentry; 1113 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1114 1115 tso_start(skb, &tso); 1116 total_len = skb->len - hdr_len; 1117 while (total_len > 0) { 1118 char *hdr; 1119 1120 /* Save Qentry for adding HDR_SUBDESC at the end */ 1121 hdr_qentry = qentry; 1122 1123 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1124 total_len -= data_left; 1125 1126 /* Add segment's header */ 1127 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1128 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1129 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1130 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1131 sq->tso_hdrs_phys + 1132 qentry * TSO_HEADER_SIZE); 1133 /* HDR_SUDESC + GATHER */ 1134 seg_subdescs = 2; 1135 seg_len = hdr_len; 1136 1137 /* Add segment's payload fragments */ 1138 while (data_left > 0) { 1139 int size; 1140 1141 size = min_t(int, tso.size, data_left); 1142 1143 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1144 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1145 virt_to_phys(tso.data)); 1146 seg_subdescs++; 1147 seg_len += size; 1148 1149 data_left -= size; 1150 tso_build_data(skb, &tso, size); 1151 } 1152 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, 1153 seg_subdescs - 1, skb, seg_len); 1154 sq->skbuff[hdr_qentry] = (u64)NULL; 1155 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1156 1157 desc_cnt += seg_subdescs; 1158 } 1159 /* Save SKB in the last segment for freeing */ 1160 sq->skbuff[hdr_qentry] = (u64)skb; 1161 1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1163 1164 nic->drv_stats.tx_tso++; 1165 return 1; 1166 } 1167 1168 /* Append an skb to a SQ for packet transfer. */ 1169 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1170 { 1171 int i, size; 1172 int subdesc_cnt, tso_sqe = 0; 1173 int sq_num, qentry; 1174 struct queue_set *qs; 1175 struct snd_queue *sq; 1176 1177 sq_num = skb_get_queue_mapping(skb); 1178 if (sq_num >= MAX_SND_QUEUES_PER_QS) { 1179 /* Get secondary Qset's SQ structure */ 1180 i = sq_num / MAX_SND_QUEUES_PER_QS; 1181 if (!nic->snicvf[i - 1]) { 1182 netdev_warn(nic->netdev, 1183 "Secondary Qset#%d's ptr not initialized\n", 1184 i - 1); 1185 return 1; 1186 } 1187 nic = (struct nicvf *)nic->snicvf[i - 1]; 1188 sq_num = sq_num % MAX_SND_QUEUES_PER_QS; 1189 } 1190 1191 qs = nic->qs; 1192 sq = &qs->sq[sq_num]; 1193 1194 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1195 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1196 goto append_fail; 1197 1198 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1199 1200 /* Check if its a TSO packet */ 1201 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) 1202 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); 1203 1204 /* Add SQ header subdesc */ 1205 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1206 skb, skb->len); 1207 tso_sqe = qentry; 1208 1209 /* Add SQ gather subdescs */ 1210 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1211 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1212 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1213 1214 /* Check for scattered buffer */ 1215 if (!skb_is_nonlinear(skb)) 1216 goto doorbell; 1217 1218 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1219 const struct skb_frag_struct *frag; 1220 1221 frag = &skb_shinfo(skb)->frags[i]; 1222 1223 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1224 size = skb_frag_size(frag); 1225 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1226 virt_to_phys( 1227 skb_frag_address(frag))); 1228 } 1229 1230 doorbell: 1231 if (nic->t88 && skb_shinfo(skb)->gso_size) { 1232 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1233 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); 1234 } 1235 1236 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); 1237 1238 return 1; 1239 1240 append_fail: 1241 /* Use original PCI dev for debug log */ 1242 nic = nic->pnicvf; 1243 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1244 return 0; 1245 } 1246 1247 static inline unsigned frag_num(unsigned i) 1248 { 1249 #ifdef __BIG_ENDIAN 1250 return (i & ~3) + 3 - (i & 3); 1251 #else 1252 return i; 1253 #endif 1254 } 1255 1256 /* Returns SKB for a received packet */ 1257 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1258 { 1259 int frag; 1260 int payload_len = 0; 1261 struct sk_buff *skb = NULL; 1262 struct page *page; 1263 int offset; 1264 u16 *rb_lens = NULL; 1265 u64 *rb_ptrs = NULL; 1266 1267 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1268 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to 1269 * CQE_RX at word6, hence buffer pointers move by word 1270 * 1271 * Use existing 'hw_tso' flag which will be set for all chips 1272 * except 88xx pass1 instead of a additional cache line 1273 * access (or miss) by using pci dev's revision. 1274 */ 1275 if (!nic->hw_tso) 1276 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1277 else 1278 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); 1279 1280 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1281 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1282 1283 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1284 payload_len = rb_lens[frag_num(frag)]; 1285 if (!frag) { 1286 /* First fragment */ 1287 skb = nicvf_rb_ptr_to_skb(nic, 1288 *rb_ptrs - cqe_rx->align_pad, 1289 payload_len); 1290 if (!skb) 1291 return NULL; 1292 skb_reserve(skb, cqe_rx->align_pad); 1293 skb_put(skb, payload_len); 1294 } else { 1295 /* Add fragments */ 1296 page = virt_to_page(phys_to_virt(*rb_ptrs)); 1297 offset = phys_to_virt(*rb_ptrs) - page_address(page); 1298 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1299 offset, payload_len, RCV_FRAG_LEN); 1300 } 1301 /* Next buffer pointer */ 1302 rb_ptrs++; 1303 } 1304 return skb; 1305 } 1306 1307 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) 1308 { 1309 u64 reg_val; 1310 1311 switch (int_type) { 1312 case NICVF_INTR_CQ: 1313 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1314 break; 1315 case NICVF_INTR_SQ: 1316 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1317 break; 1318 case NICVF_INTR_RBDR: 1319 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1320 break; 1321 case NICVF_INTR_PKT_DROP: 1322 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1323 break; 1324 case NICVF_INTR_TCP_TIMER: 1325 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1326 break; 1327 case NICVF_INTR_MBOX: 1328 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1329 break; 1330 case NICVF_INTR_QS_ERR: 1331 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1332 break; 1333 default: 1334 reg_val = 0; 1335 } 1336 1337 return reg_val; 1338 } 1339 1340 /* Enable interrupt */ 1341 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1342 { 1343 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1344 1345 if (!mask) { 1346 netdev_dbg(nic->netdev, 1347 "Failed to enable interrupt: unknown type\n"); 1348 return; 1349 } 1350 nicvf_reg_write(nic, NIC_VF_ENA_W1S, 1351 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); 1352 } 1353 1354 /* Disable interrupt */ 1355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1356 { 1357 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1358 1359 if (!mask) { 1360 netdev_dbg(nic->netdev, 1361 "Failed to disable interrupt: unknown type\n"); 1362 return; 1363 } 1364 1365 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); 1366 } 1367 1368 /* Clear interrupt */ 1369 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1370 { 1371 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1372 1373 if (!mask) { 1374 netdev_dbg(nic->netdev, 1375 "Failed to clear interrupt: unknown type\n"); 1376 return; 1377 } 1378 1379 nicvf_reg_write(nic, NIC_VF_INT, mask); 1380 } 1381 1382 /* Check if interrupt is enabled */ 1383 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1384 { 1385 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); 1386 /* If interrupt type is unknown, we treat it disabled. */ 1387 if (!mask) { 1388 netdev_dbg(nic->netdev, 1389 "Failed to check interrupt enable: unknown type\n"); 1390 return 0; 1391 } 1392 1393 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1394 } 1395 1396 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1397 { 1398 struct rcv_queue *rq; 1399 1400 #define GET_RQ_STATS(reg) \ 1401 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1402 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1403 1404 rq = &nic->qs->rq[rq_idx]; 1405 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1406 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1407 } 1408 1409 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1410 { 1411 struct snd_queue *sq; 1412 1413 #define GET_SQ_STATS(reg) \ 1414 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1415 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1416 1417 sq = &nic->qs->sq[sq_idx]; 1418 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1419 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1420 } 1421 1422 /* Check for errors in the receive cmp.queue entry */ 1423 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1424 { 1425 struct nicvf_hw_stats *stats = &nic->hw_stats; 1426 1427 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1428 return 0; 1429 1430 if (netif_msg_rx_err(nic)) 1431 netdev_err(nic->netdev, 1432 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1433 nic->netdev->name, 1434 cqe_rx->err_level, cqe_rx->err_opcode); 1435 1436 switch (cqe_rx->err_opcode) { 1437 case CQ_RX_ERROP_RE_PARTIAL: 1438 stats->rx_bgx_truncated_pkts++; 1439 break; 1440 case CQ_RX_ERROP_RE_JABBER: 1441 stats->rx_jabber_errs++; 1442 break; 1443 case CQ_RX_ERROP_RE_FCS: 1444 stats->rx_fcs_errs++; 1445 break; 1446 case CQ_RX_ERROP_RE_RX_CTL: 1447 stats->rx_bgx_errs++; 1448 break; 1449 case CQ_RX_ERROP_PREL2_ERR: 1450 stats->rx_prel2_errs++; 1451 break; 1452 case CQ_RX_ERROP_L2_MAL: 1453 stats->rx_l2_hdr_malformed++; 1454 break; 1455 case CQ_RX_ERROP_L2_OVERSIZE: 1456 stats->rx_oversize++; 1457 break; 1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1459 stats->rx_undersize++; 1460 break; 1461 case CQ_RX_ERROP_L2_LENMISM: 1462 stats->rx_l2_len_mismatch++; 1463 break; 1464 case CQ_RX_ERROP_L2_PCLP: 1465 stats->rx_l2_pclp++; 1466 break; 1467 case CQ_RX_ERROP_IP_NOT: 1468 stats->rx_ip_ver_errs++; 1469 break; 1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1471 stats->rx_ip_csum_errs++; 1472 break; 1473 case CQ_RX_ERROP_IP_MAL: 1474 stats->rx_ip_hdr_malformed++; 1475 break; 1476 case CQ_RX_ERROP_IP_MALD: 1477 stats->rx_ip_payload_malformed++; 1478 break; 1479 case CQ_RX_ERROP_IP_HOP: 1480 stats->rx_ip_ttl_errs++; 1481 break; 1482 case CQ_RX_ERROP_L3_PCLP: 1483 stats->rx_l3_pclp++; 1484 break; 1485 case CQ_RX_ERROP_L4_MAL: 1486 stats->rx_l4_malformed++; 1487 break; 1488 case CQ_RX_ERROP_L4_CHK: 1489 stats->rx_l4_csum_errs++; 1490 break; 1491 case CQ_RX_ERROP_UDP_LEN: 1492 stats->rx_udp_len_errs++; 1493 break; 1494 case CQ_RX_ERROP_L4_PORT: 1495 stats->rx_l4_port_errs++; 1496 break; 1497 case CQ_RX_ERROP_TCP_FLAG: 1498 stats->rx_tcp_flag_errs++; 1499 break; 1500 case CQ_RX_ERROP_TCP_OFFSET: 1501 stats->rx_tcp_offset_errs++; 1502 break; 1503 case CQ_RX_ERROP_L4_PCLP: 1504 stats->rx_l4_pclp++; 1505 break; 1506 case CQ_RX_ERROP_RBDR_TRUNC: 1507 stats->rx_truncated_pkts++; 1508 break; 1509 } 1510 1511 return 1; 1512 } 1513 1514 /* Check for errors in the send cmp.queue entry */ 1515 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1516 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1517 { 1518 struct cmp_queue_stats *stats = &cq->stats; 1519 1520 switch (cqe_tx->send_status) { 1521 case CQ_TX_ERROP_GOOD: 1522 stats->tx.good++; 1523 return 0; 1524 case CQ_TX_ERROP_DESC_FAULT: 1525 stats->tx.desc_fault++; 1526 break; 1527 case CQ_TX_ERROP_HDR_CONS_ERR: 1528 stats->tx.hdr_cons_err++; 1529 break; 1530 case CQ_TX_ERROP_SUBDC_ERR: 1531 stats->tx.subdesc_err++; 1532 break; 1533 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1534 stats->tx.imm_size_oflow++; 1535 break; 1536 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1537 stats->tx.data_seq_err++; 1538 break; 1539 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1540 stats->tx.mem_seq_err++; 1541 break; 1542 case CQ_TX_ERROP_LOCK_VIOL: 1543 stats->tx.lock_viol++; 1544 break; 1545 case CQ_TX_ERROP_DATA_FAULT: 1546 stats->tx.data_fault++; 1547 break; 1548 case CQ_TX_ERROP_TSTMP_CONFLICT: 1549 stats->tx.tstmp_conflict++; 1550 break; 1551 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1552 stats->tx.tstmp_timeout++; 1553 break; 1554 case CQ_TX_ERROP_MEM_FAULT: 1555 stats->tx.mem_fault++; 1556 break; 1557 case CQ_TX_ERROP_CK_OVERLAP: 1558 stats->tx.csum_overlap++; 1559 break; 1560 case CQ_TX_ERROP_CK_OFLOW: 1561 stats->tx.csum_overflow++; 1562 break; 1563 } 1564 1565 return 1; 1566 } 1567