1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/ip.h> 12 #include <linux/etherdevice.h> 13 #include <net/ip.h> 14 #include <net/tso.h> 15 16 #include "nic_reg.h" 17 #include "nic.h" 18 #include "q_struct.h" 19 #include "nicvf_queues.h" 20 21 struct rbuf_info { 22 struct page *page; 23 void *data; 24 u64 offset; 25 }; 26 27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) 28 29 /* Poll a register for a specific value */ 30 static int nicvf_poll_reg(struct nicvf *nic, int qidx, 31 u64 reg, int bit_pos, int bits, int val) 32 { 33 u64 bit_mask; 34 u64 reg_val; 35 int timeout = 10; 36 37 bit_mask = (1ULL << bits) - 1; 38 bit_mask = (bit_mask << bit_pos); 39 40 while (timeout) { 41 reg_val = nicvf_queue_reg_read(nic, reg, qidx); 42 if (((reg_val & bit_mask) >> bit_pos) == val) 43 return 0; 44 usleep_range(1000, 2000); 45 timeout--; 46 } 47 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); 48 return 1; 49 } 50 51 /* Allocate memory for a queue's descriptors */ 52 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, 53 int q_len, int desc_size, int align_bytes) 54 { 55 dmem->q_len = q_len; 56 dmem->size = (desc_size * q_len) + align_bytes; 57 /* Save address, need it while freeing */ 58 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 59 &dmem->dma, GFP_KERNEL); 60 if (!dmem->unalign_base) 61 return -ENOMEM; 62 63 /* Align memory address for 'align_bytes' */ 64 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); 65 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); 66 return 0; 67 } 68 69 /* Free queue's descriptor memory */ 70 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) 71 { 72 if (!dmem) 73 return; 74 75 dma_free_coherent(&nic->pdev->dev, dmem->size, 76 dmem->unalign_base, dmem->dma); 77 dmem->unalign_base = NULL; 78 dmem->base = NULL; 79 } 80 81 /* Allocate buffer for packet reception 82 * HW returns memory address where packet is DMA'ed but not a pointer 83 * into RBDR ring, so save buffer address at the start of fragment and 84 * align the start address to a cache aligned address 85 */ 86 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 87 u32 buf_len, u64 **rbuf) 88 { 89 u64 data; 90 struct rbuf_info *rinfo; 91 int order = get_order(buf_len); 92 93 /* Check if request can be accomodated in previous allocated page */ 94 if (nic->rb_page) { 95 if ((nic->rb_page_offset + buf_len + buf_len) > 96 (PAGE_SIZE << order)) { 97 nic->rb_page = NULL; 98 } else { 99 nic->rb_page_offset += buf_len; 100 get_page(nic->rb_page); 101 } 102 } 103 104 /* Allocate a new page */ 105 if (!nic->rb_page) { 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 107 order); 108 if (!nic->rb_page) { 109 netdev_err(nic->netdev, 110 "Failed to allocate new rcv buffer\n"); 111 return -ENOMEM; 112 } 113 nic->rb_page_offset = 0; 114 } 115 116 data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; 117 118 /* Align buffer addr to cache line i.e 128 bytes */ 119 rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); 120 /* Save page address for reference updation */ 121 rinfo->page = nic->rb_page; 122 /* Store start address for later retrieval */ 123 rinfo->data = (void *)data; 124 /* Store alignment offset */ 125 rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); 126 127 data += rinfo->offset; 128 129 /* Give next aligned address to hw for DMA */ 130 *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); 131 return 0; 132 } 133 134 /* Retrieve actual buffer start address and build skb for received packet */ 135 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, 136 u64 rb_ptr, int len) 137 { 138 struct sk_buff *skb; 139 struct rbuf_info *rinfo; 140 141 rb_ptr = (u64)phys_to_virt(rb_ptr); 142 /* Get buffer start address and alignment offset */ 143 rinfo = GET_RBUF_INFO(rb_ptr); 144 145 /* Now build an skb to give to stack */ 146 skb = build_skb(rinfo->data, RCV_FRAG_LEN); 147 if (!skb) { 148 put_page(rinfo->page); 149 return NULL; 150 } 151 152 /* Set correct skb->data */ 153 skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); 154 155 prefetch((void *)rb_ptr); 156 return skb; 157 } 158 159 /* Allocate RBDR ring and populate receive buffers */ 160 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, 161 int ring_len, int buf_size) 162 { 163 int idx; 164 u64 *rbuf; 165 struct rbdr_entry_t *desc; 166 int err; 167 168 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, 169 sizeof(struct rbdr_entry_t), 170 NICVF_RCV_BUF_ALIGN_BYTES); 171 if (err) 172 return err; 173 174 rbdr->desc = rbdr->dmem.base; 175 /* Buffer size has to be in multiples of 128 bytes */ 176 rbdr->dma_size = buf_size; 177 rbdr->enable = true; 178 rbdr->thresh = RBDR_THRESH; 179 180 nic->rb_page = NULL; 181 for (idx = 0; idx < ring_len; idx++) { 182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 183 &rbuf); 184 if (err) 185 return err; 186 187 desc = GET_RBDR_DESC(rbdr, idx); 188 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 189 } 190 return 0; 191 } 192 193 /* Free RBDR ring and its receive buffers */ 194 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 195 { 196 int head, tail; 197 u64 buf_addr; 198 struct rbdr_entry_t *desc; 199 struct rbuf_info *rinfo; 200 201 if (!rbdr) 202 return; 203 204 rbdr->enable = false; 205 if (!rbdr->dmem.base) 206 return; 207 208 head = rbdr->head; 209 tail = rbdr->tail; 210 211 /* Free SKBs */ 212 while (head != tail) { 213 desc = GET_RBDR_DESC(rbdr, head); 214 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 215 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 216 put_page(rinfo->page); 217 head++; 218 head &= (rbdr->dmem.q_len - 1); 219 } 220 /* Free SKB of tail desc */ 221 desc = GET_RBDR_DESC(rbdr, tail); 222 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 223 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); 224 put_page(rinfo->page); 225 226 /* Free RBDR ring */ 227 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 228 } 229 230 /* Refill receive buffer descriptors with new buffers. 231 */ 232 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) 233 { 234 struct queue_set *qs = nic->qs; 235 int rbdr_idx = qs->rbdr_cnt; 236 int tail, qcount; 237 int refill_rb_cnt; 238 struct rbdr *rbdr; 239 struct rbdr_entry_t *desc; 240 u64 *rbuf; 241 int new_rb = 0; 242 243 refill: 244 if (!rbdr_idx) 245 return; 246 rbdr_idx--; 247 rbdr = &qs->rbdr[rbdr_idx]; 248 /* Check if it's enabled */ 249 if (!rbdr->enable) 250 goto next_rbdr; 251 252 /* Get no of desc's to be refilled */ 253 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); 254 qcount &= 0x7FFFF; 255 /* Doorbell can be ringed with a max of ring size minus 1 */ 256 if (qcount >= (qs->rbdr_len - 1)) 257 goto next_rbdr; 258 else 259 refill_rb_cnt = qs->rbdr_len - qcount - 1; 260 261 /* Start filling descs from tail */ 262 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; 263 while (refill_rb_cnt) { 264 tail++; 265 tail &= (rbdr->dmem.q_len - 1); 266 267 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) 268 break; 269 270 desc = GET_RBDR_DESC(rbdr, tail); 271 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 272 refill_rb_cnt--; 273 new_rb++; 274 } 275 276 /* make sure all memory stores are done before ringing doorbell */ 277 smp_wmb(); 278 279 /* Check if buffer allocation failed */ 280 if (refill_rb_cnt) 281 nic->rb_alloc_fail = true; 282 else 283 nic->rb_alloc_fail = false; 284 285 /* Notify HW */ 286 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 287 rbdr_idx, new_rb); 288 next_rbdr: 289 /* Re-enable RBDR interrupts only if buffer allocation is success */ 290 if (!nic->rb_alloc_fail && rbdr->enable) 291 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 292 293 if (rbdr_idx) 294 goto refill; 295 } 296 297 /* Alloc rcv buffers in non-atomic mode for better success */ 298 void nicvf_rbdr_work(struct work_struct *work) 299 { 300 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); 301 302 nicvf_refill_rbdr(nic, GFP_KERNEL); 303 if (nic->rb_alloc_fail) 304 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 305 else 306 nic->rb_work_scheduled = false; 307 } 308 309 /* In Softirq context, alloc rcv buffers in atomic mode */ 310 void nicvf_rbdr_task(unsigned long data) 311 { 312 struct nicvf *nic = (struct nicvf *)data; 313 314 nicvf_refill_rbdr(nic, GFP_ATOMIC); 315 if (nic->rb_alloc_fail) { 316 nic->rb_work_scheduled = true; 317 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); 318 } 319 } 320 321 /* Initialize completion queue */ 322 static int nicvf_init_cmp_queue(struct nicvf *nic, 323 struct cmp_queue *cq, int q_len) 324 { 325 int err; 326 327 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, 328 NICVF_CQ_BASE_ALIGN_BYTES); 329 if (err) 330 return err; 331 332 cq->desc = cq->dmem.base; 333 cq->thresh = CMP_QUEUE_CQE_THRESH; 334 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; 335 336 return 0; 337 } 338 339 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) 340 { 341 if (!cq) 342 return; 343 if (!cq->dmem.base) 344 return; 345 346 nicvf_free_q_desc_mem(nic, &cq->dmem); 347 } 348 349 /* Initialize transmit queue */ 350 static int nicvf_init_snd_queue(struct nicvf *nic, 351 struct snd_queue *sq, int q_len) 352 { 353 int err; 354 355 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 356 NICVF_SQ_BASE_ALIGN_BYTES); 357 if (err) 358 return err; 359 360 sq->desc = sq->dmem.base; 361 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); 362 if (!sq->skbuff) 363 return -ENOMEM; 364 sq->head = 0; 365 sq->tail = 0; 366 atomic_set(&sq->free_cnt, q_len - 1); 367 sq->thresh = SND_QUEUE_THRESH; 368 369 /* Preallocate memory for TSO segment's header */ 370 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 371 q_len * TSO_HEADER_SIZE, 372 &sq->tso_hdrs_phys, GFP_KERNEL); 373 if (!sq->tso_hdrs) 374 return -ENOMEM; 375 376 return 0; 377 } 378 379 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 380 { 381 if (!sq) 382 return; 383 if (!sq->dmem.base) 384 return; 385 386 if (sq->tso_hdrs) 387 dma_free_coherent(&nic->pdev->dev, 388 sq->dmem.q_len * TSO_HEADER_SIZE, 389 sq->tso_hdrs, sq->tso_hdrs_phys); 390 391 kfree(sq->skbuff); 392 nicvf_free_q_desc_mem(nic, &sq->dmem); 393 } 394 395 static void nicvf_reclaim_snd_queue(struct nicvf *nic, 396 struct queue_set *qs, int qidx) 397 { 398 /* Disable send queue */ 399 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); 400 /* Check if SQ is stopped */ 401 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) 402 return; 403 /* Reset send queue */ 404 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 405 } 406 407 static void nicvf_reclaim_rcv_queue(struct nicvf *nic, 408 struct queue_set *qs, int qidx) 409 { 410 union nic_mbx mbx = {}; 411 412 /* Make sure all packets in the pipeline are written back into mem */ 413 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; 414 nicvf_send_msg_to_pf(nic, &mbx); 415 } 416 417 static void nicvf_reclaim_cmp_queue(struct nicvf *nic, 418 struct queue_set *qs, int qidx) 419 { 420 /* Disable timer threshold (doesn't get reset upon CQ reset */ 421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); 422 /* Disable completion queue */ 423 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); 424 /* Reset completion queue */ 425 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 426 } 427 428 static void nicvf_reclaim_rbdr(struct nicvf *nic, 429 struct rbdr *rbdr, int qidx) 430 { 431 u64 tmp, fifo_state; 432 int timeout = 10; 433 434 /* Save head and tail pointers for feeing up buffers */ 435 rbdr->head = nicvf_queue_reg_read(nic, 436 NIC_QSET_RBDR_0_1_HEAD, 437 qidx) >> 3; 438 rbdr->tail = nicvf_queue_reg_read(nic, 439 NIC_QSET_RBDR_0_1_TAIL, 440 qidx) >> 3; 441 442 /* If RBDR FIFO is in 'FAIL' state then do a reset first 443 * before relaiming. 444 */ 445 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); 446 if (((fifo_state >> 62) & 0x03) == 0x3) 447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 448 qidx, NICVF_RBDR_RESET); 449 450 /* Disable RBDR */ 451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); 452 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 453 return; 454 while (1) { 455 tmp = nicvf_queue_reg_read(nic, 456 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, 457 qidx); 458 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) 459 break; 460 usleep_range(1000, 2000); 461 timeout--; 462 if (!timeout) { 463 netdev_err(nic->netdev, 464 "Failed polling on prefetch status\n"); 465 return; 466 } 467 } 468 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 469 qidx, NICVF_RBDR_RESET); 470 471 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) 472 return; 473 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); 474 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) 475 return; 476 } 477 478 /* Configures receive queue */ 479 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, 480 int qidx, bool enable) 481 { 482 union nic_mbx mbx = {}; 483 struct rcv_queue *rq; 484 struct rq_cfg rq_cfg; 485 486 rq = &qs->rq[qidx]; 487 rq->enable = enable; 488 489 /* Disable receive queue */ 490 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); 491 492 if (!rq->enable) { 493 nicvf_reclaim_rcv_queue(nic, qs, qidx); 494 return; 495 } 496 497 rq->cq_qs = qs->vnic_id; 498 rq->cq_idx = qidx; 499 rq->start_rbdr_qs = qs->vnic_id; 500 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; 501 rq->cont_rbdr_qs = qs->vnic_id; 502 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; 503 /* all writes of RBDR data to be loaded into L2 Cache as well*/ 504 rq->caching = 1; 505 506 /* Send a mailbox msg to PF to config RQ */ 507 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; 508 mbx.rq.qs_num = qs->vnic_id; 509 mbx.rq.rq_num = qidx; 510 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | 511 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | 512 (rq->cont_qs_rbdr_idx << 8) | 513 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); 514 nicvf_send_msg_to_pf(nic, &mbx); 515 516 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; 517 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); 518 nicvf_send_msg_to_pf(nic, &mbx); 519 520 /* RQ drop config 521 * Enable CQ drop to reserve sufficient CQEs for all tx packets 522 */ 523 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; 524 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 525 nicvf_send_msg_to_pf(nic, &mbx); 526 527 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); 528 529 /* Enable Receive queue */ 530 rq_cfg.ena = 1; 531 rq_cfg.tcp_ena = 0; 532 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); 533 } 534 535 /* Configures completion queue */ 536 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 537 int qidx, bool enable) 538 { 539 struct cmp_queue *cq; 540 struct cq_cfg cq_cfg; 541 542 cq = &qs->cq[qidx]; 543 cq->enable = enable; 544 545 if (!cq->enable) { 546 nicvf_reclaim_cmp_queue(nic, qs, qidx); 547 return; 548 } 549 550 /* Reset completion queue */ 551 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); 552 553 if (!cq->enable) 554 return; 555 556 spin_lock_init(&cq->lock); 557 /* Set completion queue base address */ 558 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, 559 qidx, (u64)(cq->dmem.phys_base)); 560 561 /* Enable Completion queue */ 562 cq_cfg.ena = 1; 563 cq_cfg.reset = 0; 564 cq_cfg.caching = 0; 565 cq_cfg.qsize = CMP_QSIZE; 566 cq_cfg.avg_con = 0; 567 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); 568 569 /* Set threshold value for interrupt generation */ 570 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 571 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 572 qidx, nic->cq_coalesce_usecs); 573 } 574 575 /* Configures transmit queue */ 576 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, 577 int qidx, bool enable) 578 { 579 union nic_mbx mbx = {}; 580 struct snd_queue *sq; 581 struct sq_cfg sq_cfg; 582 583 sq = &qs->sq[qidx]; 584 sq->enable = enable; 585 586 if (!sq->enable) { 587 nicvf_reclaim_snd_queue(nic, qs, qidx); 588 return; 589 } 590 591 /* Reset send queue */ 592 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); 593 594 sq->cq_qs = qs->vnic_id; 595 sq->cq_idx = qidx; 596 597 /* Send a mailbox msg to PF to config SQ */ 598 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; 599 mbx.sq.qs_num = qs->vnic_id; 600 mbx.sq.sq_num = qidx; 601 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; 602 nicvf_send_msg_to_pf(nic, &mbx); 603 604 /* Set queue base address */ 605 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, 606 qidx, (u64)(sq->dmem.phys_base)); 607 608 /* Enable send queue & set queue size */ 609 sq_cfg.ena = 1; 610 sq_cfg.reset = 0; 611 sq_cfg.ldwb = 0; 612 sq_cfg.qsize = SND_QSIZE; 613 sq_cfg.tstmp_bgx_intf = 0; 614 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); 615 616 /* Set threshold value for interrupt generation */ 617 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); 618 619 /* Set queue:cpu affinity for better load distribution */ 620 if (cpu_online(qidx)) { 621 cpumask_set_cpu(qidx, &sq->affinity_mask); 622 netif_set_xps_queue(nic->netdev, 623 &sq->affinity_mask, qidx); 624 } 625 } 626 627 /* Configures receive buffer descriptor ring */ 628 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, 629 int qidx, bool enable) 630 { 631 struct rbdr *rbdr; 632 struct rbdr_cfg rbdr_cfg; 633 634 rbdr = &qs->rbdr[qidx]; 635 nicvf_reclaim_rbdr(nic, rbdr, qidx); 636 if (!enable) 637 return; 638 639 /* Set descriptor base address */ 640 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, 641 qidx, (u64)(rbdr->dmem.phys_base)); 642 643 /* Enable RBDR & set queue size */ 644 /* Buffer size should be in multiples of 128 bytes */ 645 rbdr_cfg.ena = 1; 646 rbdr_cfg.reset = 0; 647 rbdr_cfg.ldwb = 0; 648 rbdr_cfg.qsize = RBDR_SIZE; 649 rbdr_cfg.avg_con = 0; 650 rbdr_cfg.lines = rbdr->dma_size / 128; 651 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, 652 qidx, *(u64 *)&rbdr_cfg); 653 654 /* Notify HW */ 655 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, 656 qidx, qs->rbdr_len - 1); 657 658 /* Set threshold value for interrupt generation */ 659 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, 660 qidx, rbdr->thresh - 1); 661 } 662 663 /* Requests PF to assign and enable Qset */ 664 void nicvf_qset_config(struct nicvf *nic, bool enable) 665 { 666 union nic_mbx mbx = {}; 667 struct queue_set *qs = nic->qs; 668 struct qs_cfg *qs_cfg; 669 670 if (!qs) { 671 netdev_warn(nic->netdev, 672 "Qset is still not allocated, don't init queues\n"); 673 return; 674 } 675 676 qs->enable = enable; 677 qs->vnic_id = nic->vf_id; 678 679 /* Send a mailbox msg to PF to config Qset */ 680 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; 681 mbx.qs.num = qs->vnic_id; 682 683 mbx.qs.cfg = 0; 684 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; 685 if (qs->enable) { 686 qs_cfg->ena = 1; 687 #ifdef __BIG_ENDIAN 688 qs_cfg->be = 1; 689 #endif 690 qs_cfg->vnic = qs->vnic_id; 691 } 692 nicvf_send_msg_to_pf(nic, &mbx); 693 } 694 695 static void nicvf_free_resources(struct nicvf *nic) 696 { 697 int qidx; 698 struct queue_set *qs = nic->qs; 699 700 /* Free receive buffer descriptor ring */ 701 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 702 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); 703 704 /* Free completion queue */ 705 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 706 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); 707 708 /* Free send queue */ 709 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 710 nicvf_free_snd_queue(nic, &qs->sq[qidx]); 711 } 712 713 static int nicvf_alloc_resources(struct nicvf *nic) 714 { 715 int qidx; 716 struct queue_set *qs = nic->qs; 717 718 /* Alloc receive buffer descriptor ring */ 719 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 720 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, 721 DMA_BUFFER_LEN)) 722 goto alloc_fail; 723 } 724 725 /* Alloc send queue */ 726 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { 727 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) 728 goto alloc_fail; 729 } 730 731 /* Alloc completion queue */ 732 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 733 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) 734 goto alloc_fail; 735 } 736 737 return 0; 738 alloc_fail: 739 nicvf_free_resources(nic); 740 return -ENOMEM; 741 } 742 743 int nicvf_set_qset_resources(struct nicvf *nic) 744 { 745 struct queue_set *qs; 746 747 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); 748 if (!qs) 749 return -ENOMEM; 750 nic->qs = qs; 751 752 /* Set count of each queue */ 753 qs->rbdr_cnt = RBDR_CNT; 754 qs->rq_cnt = RCV_QUEUE_CNT; 755 qs->sq_cnt = SND_QUEUE_CNT; 756 qs->cq_cnt = CMP_QUEUE_CNT; 757 758 /* Set queue lengths */ 759 qs->rbdr_len = RCV_BUF_COUNT; 760 qs->sq_len = SND_QUEUE_LEN; 761 qs->cq_len = CMP_QUEUE_LEN; 762 return 0; 763 } 764 765 int nicvf_config_data_transfer(struct nicvf *nic, bool enable) 766 { 767 bool disable = false; 768 struct queue_set *qs = nic->qs; 769 int qidx; 770 771 if (!qs) 772 return 0; 773 774 if (enable) { 775 if (nicvf_alloc_resources(nic)) 776 return -ENOMEM; 777 778 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 779 nicvf_snd_queue_config(nic, qs, qidx, enable); 780 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 781 nicvf_cmp_queue_config(nic, qs, qidx, enable); 782 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 783 nicvf_rbdr_config(nic, qs, qidx, enable); 784 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 785 nicvf_rcv_queue_config(nic, qs, qidx, enable); 786 } else { 787 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 788 nicvf_rcv_queue_config(nic, qs, qidx, disable); 789 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 790 nicvf_rbdr_config(nic, qs, qidx, disable); 791 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 792 nicvf_snd_queue_config(nic, qs, qidx, disable); 793 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 794 nicvf_cmp_queue_config(nic, qs, qidx, disable); 795 796 nicvf_free_resources(nic); 797 } 798 799 return 0; 800 } 801 802 /* Get a free desc from SQ 803 * returns descriptor ponter & descriptor number 804 */ 805 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) 806 { 807 int qentry; 808 809 qentry = sq->tail; 810 atomic_sub(desc_cnt, &sq->free_cnt); 811 sq->tail += desc_cnt; 812 sq->tail &= (sq->dmem.q_len - 1); 813 814 return qentry; 815 } 816 817 /* Free descriptor back to SQ for future use */ 818 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 819 { 820 atomic_add(desc_cnt, &sq->free_cnt); 821 sq->head += desc_cnt; 822 sq->head &= (sq->dmem.q_len - 1); 823 } 824 825 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) 826 { 827 qentry++; 828 qentry &= (sq->dmem.q_len - 1); 829 return qentry; 830 } 831 832 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) 833 { 834 u64 sq_cfg; 835 836 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 837 sq_cfg |= NICVF_SQ_EN; 838 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 839 /* Ring doorbell so that H/W restarts processing SQEs */ 840 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); 841 } 842 843 void nicvf_sq_disable(struct nicvf *nic, int qidx) 844 { 845 u64 sq_cfg; 846 847 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); 848 sq_cfg &= ~NICVF_SQ_EN; 849 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); 850 } 851 852 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, 853 int qidx) 854 { 855 u64 head, tail; 856 struct sk_buff *skb; 857 struct nicvf *nic = netdev_priv(netdev); 858 struct sq_hdr_subdesc *hdr; 859 860 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; 861 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; 862 while (sq->head != head) { 863 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); 864 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { 865 nicvf_put_sq_desc(sq, 1); 866 continue; 867 } 868 skb = (struct sk_buff *)sq->skbuff[sq->head]; 869 if (skb) 870 dev_kfree_skb_any(skb); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 872 atomic64_add(hdr->tot_len, 873 (atomic64_t *)&netdev->stats.tx_bytes); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 875 } 876 } 877 878 /* Calculate no of SQ subdescriptors needed to transmit all 879 * segments of this TSO packet. 880 * Taken from 'Tilera network driver' with a minor modification. 881 */ 882 static int nicvf_tso_count_subdescs(struct sk_buff *skb) 883 { 884 struct skb_shared_info *sh = skb_shinfo(skb); 885 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 886 unsigned int data_len = skb->len - sh_len; 887 unsigned int p_len = sh->gso_size; 888 long f_id = -1; /* id of the current fragment */ 889 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 890 long f_used = 0; /* bytes used from the current fragment */ 891 long n; /* size of the current piece of payload */ 892 int num_edescs = 0; 893 int segment; 894 895 for (segment = 0; segment < sh->gso_segs; segment++) { 896 unsigned int p_used = 0; 897 898 /* One edesc for header and for each piece of the payload. */ 899 for (num_edescs++; p_used < p_len; num_edescs++) { 900 /* Advance as needed. */ 901 while (f_used >= f_size) { 902 f_id++; 903 f_size = skb_frag_size(&sh->frags[f_id]); 904 f_used = 0; 905 } 906 907 /* Use bytes from the current fragment. */ 908 n = p_len - p_used; 909 if (n > f_size - f_used) 910 n = f_size - f_used; 911 f_used += n; 912 p_used += n; 913 } 914 915 /* The last segment may be less than gso_size. */ 916 data_len -= p_len; 917 if (data_len < p_len) 918 p_len = data_len; 919 } 920 921 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ 922 return num_edescs + sh->gso_segs; 923 } 924 925 /* Get the number of SQ descriptors needed to xmit this skb */ 926 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 927 { 928 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; 929 930 if (skb_shinfo(skb)->gso_size) { 931 subdesc_cnt = nicvf_tso_count_subdescs(skb); 932 return subdesc_cnt; 933 } 934 935 if (skb_shinfo(skb)->nr_frags) 936 subdesc_cnt += skb_shinfo(skb)->nr_frags; 937 938 return subdesc_cnt; 939 } 940 941 /* Add SQ HEADER subdescriptor. 942 * First subdescriptor for every send descriptor. 943 */ 944 static inline void 945 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, 946 int subdesc_cnt, struct sk_buff *skb, int len) 947 { 948 int proto; 949 struct sq_hdr_subdesc *hdr; 950 951 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 952 sq->skbuff[qentry] = (u64)skb; 953 954 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 955 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 956 /* Enable notification via CQE after processing SQE */ 957 hdr->post_cqe = 1; 958 /* No of subdescriptors following this */ 959 hdr->subdesc_cnt = subdesc_cnt; 960 hdr->tot_len = len; 961 962 /* Offload checksum calculation to HW */ 963 if (skb->ip_summed == CHECKSUM_PARTIAL) { 964 if (skb->protocol != htons(ETH_P_IP)) 965 return; 966 967 hdr->csum_l3 = 1; /* Enable IP csum calculation */ 968 hdr->l3_offset = skb_network_offset(skb); 969 hdr->l4_offset = skb_transport_offset(skb); 970 971 proto = ip_hdr(skb)->protocol; 972 switch (proto) { 973 case IPPROTO_TCP: 974 hdr->csum_l4 = SEND_L4_CSUM_TCP; 975 break; 976 case IPPROTO_UDP: 977 hdr->csum_l4 = SEND_L4_CSUM_UDP; 978 break; 979 case IPPROTO_SCTP: 980 hdr->csum_l4 = SEND_L4_CSUM_SCTP; 981 break; 982 } 983 } 984 } 985 986 /* SQ GATHER subdescriptor 987 * Must follow HDR descriptor 988 */ 989 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 990 int size, u64 data) 991 { 992 struct sq_gather_subdesc *gather; 993 994 qentry &= (sq->dmem.q_len - 1); 995 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); 996 997 memset(gather, 0, SND_QUEUE_DESC_SIZE); 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; 1000 gather->size = size; 1001 gather->addr = data; 1002 } 1003 1004 /* Segment a TSO packet into 'gso_size' segments and append 1005 * them to SQ for transfer 1006 */ 1007 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, 1008 int qentry, struct sk_buff *skb) 1009 { 1010 struct tso_t tso; 1011 int seg_subdescs = 0, desc_cnt = 0; 1012 int seg_len, total_len, data_left; 1013 int hdr_qentry = qentry; 1014 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1015 1016 tso_start(skb, &tso); 1017 total_len = skb->len - hdr_len; 1018 while (total_len > 0) { 1019 char *hdr; 1020 1021 /* Save Qentry for adding HDR_SUBDESC at the end */ 1022 hdr_qentry = qentry; 1023 1024 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 1025 total_len -= data_left; 1026 1027 /* Add segment's header */ 1028 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1029 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; 1030 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 1031 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, 1032 sq->tso_hdrs_phys + 1033 qentry * TSO_HEADER_SIZE); 1034 /* HDR_SUDESC + GATHER */ 1035 seg_subdescs = 2; 1036 seg_len = hdr_len; 1037 1038 /* Add segment's payload fragments */ 1039 while (data_left > 0) { 1040 int size; 1041 1042 size = min_t(int, tso.size, data_left); 1043 1044 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1045 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1046 virt_to_phys(tso.data)); 1047 seg_subdescs++; 1048 seg_len += size; 1049 1050 data_left -= size; 1051 tso_build_data(skb, &tso, size); 1052 } 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1054 seg_subdescs - 1, skb, seg_len); 1055 sq->skbuff[hdr_qentry] = (u64)NULL; 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1057 1058 desc_cnt += seg_subdescs; 1059 } 1060 /* Save SKB in the last segment for freeing */ 1061 sq->skbuff[hdr_qentry] = (u64)skb; 1062 1063 /* make sure all memory stores are done before ringing doorbell */ 1064 smp_wmb(); 1065 1066 /* Inform HW to xmit all TSO segments */ 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1068 skb_get_queue_mapping(skb), desc_cnt); 1069 nic->drv_stats.tx_tso++; 1070 return 1; 1071 } 1072 1073 /* Append an skb to a SQ for packet transfer. */ 1074 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1075 { 1076 int i, size; 1077 int subdesc_cnt; 1078 int sq_num, qentry; 1079 struct queue_set *qs = nic->qs; 1080 struct snd_queue *sq; 1081 1082 sq_num = skb_get_queue_mapping(skb); 1083 sq = &qs->sq[sq_num]; 1084 1085 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1086 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1087 goto append_fail; 1088 1089 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1090 1091 /* Check if its a TSO packet */ 1092 if (skb_shinfo(skb)->gso_size) 1093 return nicvf_sq_append_tso(nic, sq, qentry, skb); 1094 1095 /* Add SQ header subdesc */ 1096 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); 1097 1098 /* Add SQ gather subdescs */ 1099 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1100 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1101 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1102 1103 /* Check for scattered buffer */ 1104 if (!skb_is_nonlinear(skb)) 1105 goto doorbell; 1106 1107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1108 const struct skb_frag_struct *frag; 1109 1110 frag = &skb_shinfo(skb)->frags[i]; 1111 1112 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1113 size = skb_frag_size(frag); 1114 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1115 virt_to_phys( 1116 skb_frag_address(frag))); 1117 } 1118 1119 doorbell: 1120 /* make sure all memory stores are done before ringing doorbell */ 1121 smp_wmb(); 1122 1123 /* Inform HW to xmit new packet */ 1124 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1125 sq_num, subdesc_cnt); 1126 return 1; 1127 1128 append_fail: 1129 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); 1130 return 0; 1131 } 1132 1133 static inline unsigned frag_num(unsigned i) 1134 { 1135 #ifdef __BIG_ENDIAN 1136 return (i & ~3) + 3 - (i & 3); 1137 #else 1138 return i; 1139 #endif 1140 } 1141 1142 /* Returns SKB for a received packet */ 1143 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1144 { 1145 int frag; 1146 int payload_len = 0; 1147 struct sk_buff *skb = NULL; 1148 struct sk_buff *skb_frag = NULL; 1149 struct sk_buff *prev_frag = NULL; 1150 u16 *rb_lens = NULL; 1151 u64 *rb_ptrs = NULL; 1152 1153 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1154 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); 1155 1156 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", 1157 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); 1158 1159 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1160 payload_len = rb_lens[frag_num(frag)]; 1161 if (!frag) { 1162 /* First fragment */ 1163 skb = nicvf_rb_ptr_to_skb(nic, 1164 *rb_ptrs - cqe_rx->align_pad, 1165 payload_len); 1166 if (!skb) 1167 return NULL; 1168 skb_reserve(skb, cqe_rx->align_pad); 1169 skb_put(skb, payload_len); 1170 } else { 1171 /* Add fragments */ 1172 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, 1173 payload_len); 1174 if (!skb_frag) { 1175 dev_kfree_skb(skb); 1176 return NULL; 1177 } 1178 1179 if (!skb_shinfo(skb)->frag_list) 1180 skb_shinfo(skb)->frag_list = skb_frag; 1181 else 1182 prev_frag->next = skb_frag; 1183 1184 prev_frag = skb_frag; 1185 skb->len += payload_len; 1186 skb->data_len += payload_len; 1187 skb_frag->len = payload_len; 1188 } 1189 /* Next buffer pointer */ 1190 rb_ptrs++; 1191 } 1192 return skb; 1193 } 1194 1195 /* Enable interrupt */ 1196 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) 1197 { 1198 u64 reg_val; 1199 1200 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1201 1202 switch (int_type) { 1203 case NICVF_INTR_CQ: 1204 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1205 break; 1206 case NICVF_INTR_SQ: 1207 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1208 break; 1209 case NICVF_INTR_RBDR: 1210 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1211 break; 1212 case NICVF_INTR_PKT_DROP: 1213 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1214 break; 1215 case NICVF_INTR_TCP_TIMER: 1216 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1217 break; 1218 case NICVF_INTR_MBOX: 1219 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); 1220 break; 1221 case NICVF_INTR_QS_ERR: 1222 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1223 break; 1224 default: 1225 netdev_err(nic->netdev, 1226 "Failed to enable interrupt: unknown type\n"); 1227 break; 1228 } 1229 1230 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); 1231 } 1232 1233 /* Disable interrupt */ 1234 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) 1235 { 1236 u64 reg_val = 0; 1237 1238 switch (int_type) { 1239 case NICVF_INTR_CQ: 1240 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1241 break; 1242 case NICVF_INTR_SQ: 1243 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1244 break; 1245 case NICVF_INTR_RBDR: 1246 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1247 break; 1248 case NICVF_INTR_PKT_DROP: 1249 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1250 break; 1251 case NICVF_INTR_TCP_TIMER: 1252 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1253 break; 1254 case NICVF_INTR_MBOX: 1255 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); 1256 break; 1257 case NICVF_INTR_QS_ERR: 1258 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1259 break; 1260 default: 1261 netdev_err(nic->netdev, 1262 "Failed to disable interrupt: unknown type\n"); 1263 break; 1264 } 1265 1266 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); 1267 } 1268 1269 /* Clear interrupt */ 1270 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) 1271 { 1272 u64 reg_val = 0; 1273 1274 switch (int_type) { 1275 case NICVF_INTR_CQ: 1276 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1277 break; 1278 case NICVF_INTR_SQ: 1279 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1280 break; 1281 case NICVF_INTR_RBDR: 1282 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1283 break; 1284 case NICVF_INTR_PKT_DROP: 1285 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); 1286 break; 1287 case NICVF_INTR_TCP_TIMER: 1288 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); 1289 break; 1290 case NICVF_INTR_MBOX: 1291 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); 1292 break; 1293 case NICVF_INTR_QS_ERR: 1294 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); 1295 break; 1296 default: 1297 netdev_err(nic->netdev, 1298 "Failed to clear interrupt: unknown type\n"); 1299 break; 1300 } 1301 1302 nicvf_reg_write(nic, NIC_VF_INT, reg_val); 1303 } 1304 1305 /* Check if interrupt is enabled */ 1306 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) 1307 { 1308 u64 reg_val; 1309 u64 mask = 0xff; 1310 1311 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); 1312 1313 switch (int_type) { 1314 case NICVF_INTR_CQ: 1315 mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); 1316 break; 1317 case NICVF_INTR_SQ: 1318 mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); 1319 break; 1320 case NICVF_INTR_RBDR: 1321 mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); 1322 break; 1323 case NICVF_INTR_PKT_DROP: 1324 mask = NICVF_INTR_PKT_DROP_MASK; 1325 break; 1326 case NICVF_INTR_TCP_TIMER: 1327 mask = NICVF_INTR_TCP_TIMER_MASK; 1328 break; 1329 case NICVF_INTR_MBOX: 1330 mask = NICVF_INTR_MBOX_MASK; 1331 break; 1332 case NICVF_INTR_QS_ERR: 1333 mask = NICVF_INTR_QS_ERR_MASK; 1334 break; 1335 default: 1336 netdev_err(nic->netdev, 1337 "Failed to check interrupt enable: unknown type\n"); 1338 break; 1339 } 1340 1341 return (reg_val & mask); 1342 } 1343 1344 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) 1345 { 1346 struct rcv_queue *rq; 1347 1348 #define GET_RQ_STATS(reg) \ 1349 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ 1350 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1351 1352 rq = &nic->qs->rq[rq_idx]; 1353 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); 1354 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); 1355 } 1356 1357 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) 1358 { 1359 struct snd_queue *sq; 1360 1361 #define GET_SQ_STATS(reg) \ 1362 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ 1363 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) 1364 1365 sq = &nic->qs->sq[sq_idx]; 1366 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); 1367 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); 1368 } 1369 1370 /* Check for errors in the receive cmp.queue entry */ 1371 int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1372 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) 1373 { 1374 struct cmp_queue_stats *stats = &cq->stats; 1375 1376 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1377 stats->rx.errop.good++; 1378 return 0; 1379 } 1380 1381 if (netif_msg_rx_err(nic)) 1382 netdev_err(nic->netdev, 1383 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", 1384 nic->netdev->name, 1385 cqe_rx->err_level, cqe_rx->err_opcode); 1386 1387 switch (cqe_rx->err_level) { 1388 case CQ_ERRLVL_MAC: 1389 stats->rx.errlvl.mac_errs++; 1390 break; 1391 case CQ_ERRLVL_L2: 1392 stats->rx.errlvl.l2_errs++; 1393 break; 1394 case CQ_ERRLVL_L3: 1395 stats->rx.errlvl.l3_errs++; 1396 break; 1397 case CQ_ERRLVL_L4: 1398 stats->rx.errlvl.l4_errs++; 1399 break; 1400 } 1401 1402 switch (cqe_rx->err_opcode) { 1403 case CQ_RX_ERROP_RE_PARTIAL: 1404 stats->rx.errop.partial_pkts++; 1405 break; 1406 case CQ_RX_ERROP_RE_JABBER: 1407 stats->rx.errop.jabber_errs++; 1408 break; 1409 case CQ_RX_ERROP_RE_FCS: 1410 stats->rx.errop.fcs_errs++; 1411 break; 1412 case CQ_RX_ERROP_RE_TERMINATE: 1413 stats->rx.errop.terminate_errs++; 1414 break; 1415 case CQ_RX_ERROP_RE_RX_CTL: 1416 stats->rx.errop.bgx_rx_errs++; 1417 break; 1418 case CQ_RX_ERROP_PREL2_ERR: 1419 stats->rx.errop.prel2_errs++; 1420 break; 1421 case CQ_RX_ERROP_L2_FRAGMENT: 1422 stats->rx.errop.l2_frags++; 1423 break; 1424 case CQ_RX_ERROP_L2_OVERRUN: 1425 stats->rx.errop.l2_overruns++; 1426 break; 1427 case CQ_RX_ERROP_L2_PFCS: 1428 stats->rx.errop.l2_pfcs++; 1429 break; 1430 case CQ_RX_ERROP_L2_PUNY: 1431 stats->rx.errop.l2_puny++; 1432 break; 1433 case CQ_RX_ERROP_L2_MAL: 1434 stats->rx.errop.l2_hdr_malformed++; 1435 break; 1436 case CQ_RX_ERROP_L2_OVERSIZE: 1437 stats->rx.errop.l2_oversize++; 1438 break; 1439 case CQ_RX_ERROP_L2_UNDERSIZE: 1440 stats->rx.errop.l2_undersize++; 1441 break; 1442 case CQ_RX_ERROP_L2_LENMISM: 1443 stats->rx.errop.l2_len_mismatch++; 1444 break; 1445 case CQ_RX_ERROP_L2_PCLP: 1446 stats->rx.errop.l2_pclp++; 1447 break; 1448 case CQ_RX_ERROP_IP_NOT: 1449 stats->rx.errop.non_ip++; 1450 break; 1451 case CQ_RX_ERROP_IP_CSUM_ERR: 1452 stats->rx.errop.ip_csum_err++; 1453 break; 1454 case CQ_RX_ERROP_IP_MAL: 1455 stats->rx.errop.ip_hdr_malformed++; 1456 break; 1457 case CQ_RX_ERROP_IP_MALD: 1458 stats->rx.errop.ip_payload_malformed++; 1459 break; 1460 case CQ_RX_ERROP_IP_HOP: 1461 stats->rx.errop.ip_hop_errs++; 1462 break; 1463 case CQ_RX_ERROP_L3_ICRC: 1464 stats->rx.errop.l3_icrc_errs++; 1465 break; 1466 case CQ_RX_ERROP_L3_PCLP: 1467 stats->rx.errop.l3_pclp++; 1468 break; 1469 case CQ_RX_ERROP_L4_MAL: 1470 stats->rx.errop.l4_malformed++; 1471 break; 1472 case CQ_RX_ERROP_L4_CHK: 1473 stats->rx.errop.l4_csum_errs++; 1474 break; 1475 case CQ_RX_ERROP_UDP_LEN: 1476 stats->rx.errop.udp_len_err++; 1477 break; 1478 case CQ_RX_ERROP_L4_PORT: 1479 stats->rx.errop.bad_l4_port++; 1480 break; 1481 case CQ_RX_ERROP_TCP_FLAG: 1482 stats->rx.errop.bad_tcp_flag++; 1483 break; 1484 case CQ_RX_ERROP_TCP_OFFSET: 1485 stats->rx.errop.tcp_offset_errs++; 1486 break; 1487 case CQ_RX_ERROP_L4_PCLP: 1488 stats->rx.errop.l4_pclp++; 1489 break; 1490 case CQ_RX_ERROP_RBDR_TRUNC: 1491 stats->rx.errop.pkt_truncated++; 1492 break; 1493 } 1494 1495 return 1; 1496 } 1497 1498 /* Check for errors in the send cmp.queue entry */ 1499 int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1500 struct cmp_queue *cq, struct cqe_send_t *cqe_tx) 1501 { 1502 struct cmp_queue_stats *stats = &cq->stats; 1503 1504 switch (cqe_tx->send_status) { 1505 case CQ_TX_ERROP_GOOD: 1506 stats->tx.good++; 1507 return 0; 1508 case CQ_TX_ERROP_DESC_FAULT: 1509 stats->tx.desc_fault++; 1510 break; 1511 case CQ_TX_ERROP_HDR_CONS_ERR: 1512 stats->tx.hdr_cons_err++; 1513 break; 1514 case CQ_TX_ERROP_SUBDC_ERR: 1515 stats->tx.subdesc_err++; 1516 break; 1517 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1518 stats->tx.imm_size_oflow++; 1519 break; 1520 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1521 stats->tx.data_seq_err++; 1522 break; 1523 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1524 stats->tx.mem_seq_err++; 1525 break; 1526 case CQ_TX_ERROP_LOCK_VIOL: 1527 stats->tx.lock_viol++; 1528 break; 1529 case CQ_TX_ERROP_DATA_FAULT: 1530 stats->tx.data_fault++; 1531 break; 1532 case CQ_TX_ERROP_TSTMP_CONFLICT: 1533 stats->tx.tstmp_conflict++; 1534 break; 1535 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1536 stats->tx.tstmp_timeout++; 1537 break; 1538 case CQ_TX_ERROP_MEM_FAULT: 1539 stats->tx.mem_fault++; 1540 break; 1541 case CQ_TX_ERROP_CK_OVERLAP: 1542 stats->tx.csum_overlap++; 1543 break; 1544 case CQ_TX_ERROP_CK_OFLOW: 1545 stats->tx.csum_overflow++; 1546 break; 1547 } 1548 1549 return 1; 1550 } 1551