1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include <linux/vmalloc.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn66xx_regs.h" 29 #include "cn66xx_device.h" 30 #include "cn23xx_pf_device.h" 31 #include "cn23xx_vf_device.h" 32 33 struct niclist { 34 struct list_head list; 35 void *ptr; 36 }; 37 38 struct __dispatch { 39 struct list_head list; 40 struct octeon_recv_info *rinfo; 41 octeon_dispatch_fn_t disp_fn; 42 }; 43 44 /** Get the argument that the user set when registering dispatch 45 * function for a given opcode/subcode. 46 * @param octeon_dev - the octeon device pointer. 47 * @param opcode - the opcode for which the dispatch argument 48 * is to be checked. 49 * @param subcode - the subcode for which the dispatch argument 50 * is to be checked. 51 * @return Success: void * (argument to the dispatch function) 52 * @return Failure: NULL 53 * 54 */ 55 static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, 56 u16 opcode, u16 subcode) 57 { 58 int idx; 59 struct list_head *dispatch; 60 void *fn_arg = NULL; 61 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 62 63 idx = combined_opcode & OCTEON_OPCODE_MASK; 64 65 spin_lock_bh(&octeon_dev->dispatch.lock); 66 67 if (octeon_dev->dispatch.count == 0) { 68 spin_unlock_bh(&octeon_dev->dispatch.lock); 69 return NULL; 70 } 71 72 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 73 fn_arg = octeon_dev->dispatch.dlist[idx].arg; 74 } else { 75 list_for_each(dispatch, 76 &octeon_dev->dispatch.dlist[idx].list) { 77 if (((struct octeon_dispatch *)dispatch)->opcode == 78 combined_opcode) { 79 fn_arg = ((struct octeon_dispatch *) 80 dispatch)->arg; 81 break; 82 } 83 } 84 } 85 86 spin_unlock_bh(&octeon_dev->dispatch.lock); 87 return fn_arg; 88 } 89 90 /** Check for packets on Droq. This function should be called with lock held. 91 * @param droq - Droq on which count is checked. 92 * @return Returns packet count. 93 */ 94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) 95 { 96 u32 pkt_count = 0; 97 u32 last_count; 98 99 pkt_count = readl(droq->pkts_sent_reg); 100 101 last_count = pkt_count - droq->pkt_count; 102 droq->pkt_count = pkt_count; 103 104 /* we shall write to cnts at napi irq enable or end of droq tasklet */ 105 if (last_count) 106 atomic_add(last_count, &droq->pkts_pending); 107 108 return last_count; 109 } 110 111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) 112 { 113 u32 count = 0; 114 115 /* max_empty_descs is the max. no. of descs that can have no buffers. 116 * If the empty desc count goes beyond this value, we cannot safely 117 * read in a 64K packet sent by Octeon 118 * (64K is max pkt size from Octeon) 119 */ 120 droq->max_empty_descs = 0; 121 122 do { 123 droq->max_empty_descs++; 124 count += droq->buffer_size; 125 } while (count < (64 * 1024)); 126 127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; 128 } 129 130 static void octeon_droq_reset_indices(struct octeon_droq *droq) 131 { 132 droq->read_idx = 0; 133 droq->write_idx = 0; 134 droq->refill_idx = 0; 135 droq->refill_count = 0; 136 atomic_set(&droq->pkts_pending, 0); 137 } 138 139 static void 140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, 141 struct octeon_droq *droq) 142 { 143 u32 i; 144 struct octeon_skb_page_info *pg_info; 145 146 for (i = 0; i < droq->max_count; i++) { 147 pg_info = &droq->recv_buf_list[i].pg_info; 148 if (!pg_info) 149 continue; 150 151 if (pg_info->dma) 152 lio_unmap_ring(oct->pci_dev, 153 (u64)pg_info->dma); 154 pg_info->dma = 0; 155 156 if (pg_info->page) 157 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 158 pg_info); 159 160 droq->recv_buf_list[i].buffer = NULL; 161 } 162 163 octeon_droq_reset_indices(droq); 164 } 165 166 static int 167 octeon_droq_setup_ring_buffers(struct octeon_device *oct, 168 struct octeon_droq *droq) 169 { 170 u32 i; 171 void *buf; 172 struct octeon_droq_desc *desc_ring = droq->desc_ring; 173 174 for (i = 0; i < droq->max_count; i++) { 175 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); 176 177 if (!buf) { 178 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", 179 __func__); 180 droq->stats.rx_alloc_failure++; 181 return -ENOMEM; 182 } 183 184 droq->recv_buf_list[i].buffer = buf; 185 droq->recv_buf_list[i].data = get_rbd(buf); 186 desc_ring[i].info_ptr = 0; 187 desc_ring[i].buffer_ptr = 188 lio_map_ring(droq->recv_buf_list[i].buffer); 189 } 190 191 octeon_droq_reset_indices(droq); 192 193 octeon_droq_compute_max_packet_bufs(droq); 194 195 return 0; 196 } 197 198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) 199 { 200 struct octeon_droq *droq = oct->droq[q_no]; 201 202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 203 204 octeon_droq_destroy_ring_buffers(oct, droq); 205 vfree(droq->recv_buf_list); 206 207 if (droq->desc_ring) 208 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 209 droq->desc_ring, droq->desc_ring_dma); 210 211 memset(droq, 0, OCT_DROQ_SIZE); 212 213 return 0; 214 } 215 216 int octeon_init_droq(struct octeon_device *oct, 217 u32 q_no, 218 u32 num_descs, 219 u32 desc_size, 220 void *app_ctx) 221 { 222 struct octeon_droq *droq; 223 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 224 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 225 int numa_node = dev_to_node(&oct->pci_dev->dev); 226 227 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 228 229 droq = oct->droq[q_no]; 230 memset(droq, 0, OCT_DROQ_SIZE); 231 232 droq->oct_dev = oct; 233 droq->q_no = q_no; 234 if (app_ctx) 235 droq->app_ctx = app_ctx; 236 else 237 droq->app_ctx = (void *)(size_t)q_no; 238 239 c_num_descs = num_descs; 240 c_buf_size = desc_size; 241 if (OCTEON_CN6XXX(oct)) { 242 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 243 244 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); 245 c_refill_threshold = 246 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); 247 } else if (OCTEON_CN23XX_PF(oct)) { 248 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); 249 250 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 251 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 252 } else if (OCTEON_CN23XX_VF(oct)) { 253 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf); 254 255 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 256 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 257 } else { 258 return 1; 259 } 260 261 droq->max_count = c_num_descs; 262 droq->buffer_size = c_buf_size; 263 264 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 265 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 266 (dma_addr_t *)&droq->desc_ring_dma); 267 268 if (!droq->desc_ring) { 269 dev_err(&oct->pci_dev->dev, 270 "Output queue %d ring alloc failed\n", q_no); 271 return 1; 272 } 273 274 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", 275 q_no, droq->desc_ring, droq->desc_ring_dma); 276 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 277 droq->max_count); 278 279 droq->recv_buf_list = (struct octeon_recv_buffer *) 280 vzalloc_node(droq->max_count * 281 OCT_DROQ_RECVBUF_SIZE, 282 numa_node); 283 if (!droq->recv_buf_list) 284 droq->recv_buf_list = (struct octeon_recv_buffer *) 285 vzalloc(droq->max_count * 286 OCT_DROQ_RECVBUF_SIZE); 287 if (!droq->recv_buf_list) { 288 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); 289 goto init_droq_fail; 290 } 291 292 if (octeon_droq_setup_ring_buffers(oct, droq)) 293 goto init_droq_fail; 294 295 droq->pkts_per_intr = c_pkts_per_intr; 296 droq->refill_threshold = c_refill_threshold; 297 298 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", 299 droq->max_empty_descs); 300 301 spin_lock_init(&droq->lock); 302 303 INIT_LIST_HEAD(&droq->dispatch_list); 304 305 /* For 56xx Pass1, this function won't be called, so no checks. */ 306 oct->fn_list.setup_oq_regs(oct, q_no); 307 308 oct->io_qmask.oq |= BIT_ULL(q_no); 309 310 return 0; 311 312 init_droq_fail: 313 octeon_delete_droq(oct, q_no); 314 return 1; 315 } 316 317 /* octeon_create_recv_info 318 * Parameters: 319 * octeon_dev - pointer to the octeon device structure 320 * droq - droq in which the packet arrived. 321 * buf_cnt - no. of buffers used by the packet. 322 * idx - index in the descriptor for the first buffer in the packet. 323 * Description: 324 * Allocates a recv_info_t and copies the buffer addresses for packet data 325 * into the recv_pkt space which starts at an 8B offset from recv_info_t. 326 * Flags the descriptors for refill later. If available descriptors go 327 * below the threshold to receive a 64K pkt, new buffers are first allocated 328 * before the recv_pkt_t is created. 329 * This routine will be called in interrupt context. 330 * Returns: 331 * Success: Pointer to recv_info_t 332 * Failure: NULL. 333 * Locks: 334 * The droq->lock is held when this routine is called. 335 */ 336 static inline struct octeon_recv_info *octeon_create_recv_info( 337 struct octeon_device *octeon_dev, 338 struct octeon_droq *droq, 339 u32 buf_cnt, 340 u32 idx) 341 { 342 struct octeon_droq_info *info; 343 struct octeon_recv_pkt *recv_pkt; 344 struct octeon_recv_info *recv_info; 345 u32 i, bytes_left; 346 struct octeon_skb_page_info *pg_info; 347 348 info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data; 349 350 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); 351 if (!recv_info) 352 return NULL; 353 354 recv_pkt = recv_info->recv_pkt; 355 recv_pkt->rh = info->rh; 356 recv_pkt->length = (u32)info->length; 357 recv_pkt->buffer_count = (u16)buf_cnt; 358 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; 359 360 i = 0; 361 bytes_left = (u32)info->length; 362 363 while (buf_cnt) { 364 { 365 pg_info = &droq->recv_buf_list[idx].pg_info; 366 367 lio_unmap_ring(octeon_dev->pci_dev, 368 (u64)pg_info->dma); 369 pg_info->page = NULL; 370 pg_info->dma = 0; 371 } 372 373 recv_pkt->buffer_size[i] = 374 (bytes_left >= 375 droq->buffer_size) ? droq->buffer_size : bytes_left; 376 377 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; 378 droq->recv_buf_list[idx].buffer = NULL; 379 380 idx = incr_index(idx, 1, droq->max_count); 381 bytes_left -= droq->buffer_size; 382 i++; 383 buf_cnt--; 384 } 385 386 return recv_info; 387 } 388 389 /* If we were not able to refill all buffers, try to move around 390 * the buffers that were not dispatched. 391 */ 392 static inline u32 393 octeon_droq_refill_pullup_descs(struct octeon_droq *droq, 394 struct octeon_droq_desc *desc_ring) 395 { 396 u32 desc_refilled = 0; 397 398 u32 refill_index = droq->refill_idx; 399 400 while (refill_index != droq->read_idx) { 401 if (droq->recv_buf_list[refill_index].buffer) { 402 droq->recv_buf_list[droq->refill_idx].buffer = 403 droq->recv_buf_list[refill_index].buffer; 404 droq->recv_buf_list[droq->refill_idx].data = 405 droq->recv_buf_list[refill_index].data; 406 desc_ring[droq->refill_idx].buffer_ptr = 407 desc_ring[refill_index].buffer_ptr; 408 droq->recv_buf_list[refill_index].buffer = NULL; 409 desc_ring[refill_index].buffer_ptr = 0; 410 do { 411 droq->refill_idx = incr_index(droq->refill_idx, 412 1, 413 droq->max_count); 414 desc_refilled++; 415 droq->refill_count--; 416 } while (droq->recv_buf_list[droq->refill_idx].buffer); 417 } 418 refill_index = incr_index(refill_index, 1, droq->max_count); 419 } /* while */ 420 return desc_refilled; 421 } 422 423 /* octeon_droq_refill 424 * Parameters: 425 * droq - droq in which descriptors require new buffers. 426 * Description: 427 * Called during normal DROQ processing in interrupt mode or by the poll 428 * thread to refill the descriptors from which buffers were dispatched 429 * to upper layers. Attempts to allocate new buffers. If that fails, moves 430 * up buffers (that were not dispatched) to form a contiguous ring. 431 * Returns: 432 * No of descriptors refilled. 433 * Locks: 434 * This routine is called with droq->lock held. 435 */ 436 static u32 437 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) 438 { 439 struct octeon_droq_desc *desc_ring; 440 void *buf = NULL; 441 u8 *data; 442 u32 desc_refilled = 0; 443 struct octeon_skb_page_info *pg_info; 444 445 desc_ring = droq->desc_ring; 446 447 while (droq->refill_count && (desc_refilled < droq->max_count)) { 448 /* If a valid buffer exists (happens if there is no dispatch), 449 * reuse 450 * the buffer, else allocate. 451 */ 452 if (!droq->recv_buf_list[droq->refill_idx].buffer) { 453 pg_info = 454 &droq->recv_buf_list[droq->refill_idx].pg_info; 455 /* Either recycle the existing pages or go for 456 * new page alloc 457 */ 458 if (pg_info->page) 459 buf = recv_buffer_reuse(octeon_dev, pg_info); 460 else 461 buf = recv_buffer_alloc(octeon_dev, pg_info); 462 /* If a buffer could not be allocated, no point in 463 * continuing 464 */ 465 if (!buf) { 466 droq->stats.rx_alloc_failure++; 467 break; 468 } 469 droq->recv_buf_list[droq->refill_idx].buffer = 470 buf; 471 data = get_rbd(buf); 472 } else { 473 data = get_rbd(droq->recv_buf_list 474 [droq->refill_idx].buffer); 475 } 476 477 droq->recv_buf_list[droq->refill_idx].data = data; 478 479 desc_ring[droq->refill_idx].buffer_ptr = 480 lio_map_ring(droq->recv_buf_list[ 481 droq->refill_idx].buffer); 482 483 droq->refill_idx = incr_index(droq->refill_idx, 1, 484 droq->max_count); 485 desc_refilled++; 486 droq->refill_count--; 487 } 488 489 if (droq->refill_count) 490 desc_refilled += 491 octeon_droq_refill_pullup_descs(droq, desc_ring); 492 493 /* if droq->refill_count 494 * The refill count would not change in pass two. We only moved buffers 495 * to close the gap in the ring, but we would still have the same no. of 496 * buffers to refill. 497 */ 498 return desc_refilled; 499 } 500 501 /** check if we can allocate packets to get out of oom. 502 * @param droq - Droq being checked. 503 * @return does not return anything 504 */ 505 void octeon_droq_check_oom(struct octeon_droq *droq) 506 { 507 int desc_refilled; 508 struct octeon_device *oct = droq->oct_dev; 509 510 if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { 511 spin_lock_bh(&droq->lock); 512 desc_refilled = octeon_droq_refill(oct, droq); 513 if (desc_refilled) { 514 /* Flush the droq descriptor data to memory to be sure 515 * that when we update the credits the data in memory 516 * is accurate. 517 */ 518 wmb(); 519 writel(desc_refilled, droq->pkts_credit_reg); 520 /* make sure mmio write completes */ 521 mmiowb(); 522 } 523 spin_unlock_bh(&droq->lock); 524 } 525 } 526 527 static inline u32 528 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) 529 { 530 return ((total_len + buf_size - 1) / buf_size); 531 } 532 533 static int 534 octeon_droq_dispatch_pkt(struct octeon_device *oct, 535 struct octeon_droq *droq, 536 union octeon_rh *rh, 537 struct octeon_droq_info *info) 538 { 539 u32 cnt; 540 octeon_dispatch_fn_t disp_fn; 541 struct octeon_recv_info *rinfo; 542 543 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); 544 545 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, 546 (u16)rh->r.subcode); 547 if (disp_fn) { 548 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); 549 if (rinfo) { 550 struct __dispatch *rdisp = rinfo->rsvd; 551 552 rdisp->rinfo = rinfo; 553 rdisp->disp_fn = disp_fn; 554 rinfo->recv_pkt->rh = *rh; 555 list_add_tail(&rdisp->list, 556 &droq->dispatch_list); 557 } else { 558 droq->stats.dropped_nomem++; 559 } 560 } else { 561 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", 562 (unsigned int)rh->r.opcode, 563 (unsigned int)rh->r.subcode); 564 droq->stats.dropped_nodispatch++; 565 } 566 567 return cnt; 568 } 569 570 static inline void octeon_droq_drop_packets(struct octeon_device *oct, 571 struct octeon_droq *droq, 572 u32 cnt) 573 { 574 u32 i = 0, buf_cnt; 575 struct octeon_droq_info *info; 576 577 for (i = 0; i < cnt; i++) { 578 info = (struct octeon_droq_info *) 579 droq->recv_buf_list[droq->read_idx].data; 580 octeon_swap_8B_data((u64 *)info, 2); 581 582 if (info->length) { 583 info->length += OCTNET_FRM_LENGTH_SIZE; 584 droq->stats.bytes_received += info->length; 585 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, 586 (u32)info->length); 587 } else { 588 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); 589 buf_cnt = 1; 590 } 591 592 droq->read_idx = incr_index(droq->read_idx, buf_cnt, 593 droq->max_count); 594 droq->refill_count += buf_cnt; 595 } 596 } 597 598 static u32 599 octeon_droq_fast_process_packets(struct octeon_device *oct, 600 struct octeon_droq *droq, 601 u32 pkts_to_process) 602 { 603 struct octeon_droq_info *info; 604 union octeon_rh *rh; 605 u32 pkt, total_len = 0, pkt_count; 606 607 pkt_count = pkts_to_process; 608 609 for (pkt = 0; pkt < pkt_count; pkt++) { 610 u32 pkt_len = 0; 611 struct sk_buff *nicbuf = NULL; 612 struct octeon_skb_page_info *pg_info; 613 void *buf; 614 615 info = (struct octeon_droq_info *) 616 droq->recv_buf_list[droq->read_idx].data; 617 octeon_swap_8B_data((u64 *)info, 2); 618 619 if (!info->length) { 620 dev_err(&oct->pci_dev->dev, 621 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", 622 droq->q_no, droq->read_idx, pkt_count); 623 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, 624 (u8 *)info, 625 OCT_DROQ_INFO_SIZE); 626 break; 627 } 628 629 /* Len of resp hdr in included in the received data len. */ 630 rh = &info->rh; 631 632 info->length += OCTNET_FRM_LENGTH_SIZE; 633 rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64)); 634 total_len += (u32)info->length; 635 if (opcode_slow_path(rh)) { 636 u32 buf_cnt; 637 638 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); 639 droq->read_idx = incr_index(droq->read_idx, 640 buf_cnt, droq->max_count); 641 droq->refill_count += buf_cnt; 642 } else { 643 if (info->length <= droq->buffer_size) { 644 pkt_len = (u32)info->length; 645 nicbuf = droq->recv_buf_list[ 646 droq->read_idx].buffer; 647 pg_info = &droq->recv_buf_list[ 648 droq->read_idx].pg_info; 649 if (recv_buffer_recycle(oct, pg_info)) 650 pg_info->page = NULL; 651 droq->recv_buf_list[droq->read_idx].buffer = 652 NULL; 653 654 droq->read_idx = incr_index(droq->read_idx, 1, 655 droq->max_count); 656 droq->refill_count++; 657 } else { 658 nicbuf = octeon_fast_packet_alloc((u32) 659 info->length); 660 pkt_len = 0; 661 /* nicbuf allocation can fail. We'll handle it 662 * inside the loop. 663 */ 664 while (pkt_len < info->length) { 665 int cpy_len, idx = droq->read_idx; 666 667 cpy_len = ((pkt_len + droq->buffer_size) 668 > info->length) ? 669 ((u32)info->length - pkt_len) : 670 droq->buffer_size; 671 672 if (nicbuf) { 673 octeon_fast_packet_next(droq, 674 nicbuf, 675 cpy_len, 676 idx); 677 buf = droq->recv_buf_list[ 678 idx].buffer; 679 recv_buffer_fast_free(buf); 680 droq->recv_buf_list[idx].buffer 681 = NULL; 682 } else { 683 droq->stats.rx_alloc_failure++; 684 } 685 686 pkt_len += cpy_len; 687 droq->read_idx = 688 incr_index(droq->read_idx, 1, 689 droq->max_count); 690 droq->refill_count++; 691 } 692 } 693 694 if (nicbuf) { 695 if (droq->ops.fptr) { 696 droq->ops.fptr(oct->octeon_id, 697 nicbuf, pkt_len, 698 rh, &droq->napi, 699 droq->ops.farg); 700 } else { 701 recv_buffer_free(nicbuf); 702 } 703 } 704 } 705 706 if (droq->refill_count >= droq->refill_threshold) { 707 int desc_refilled = octeon_droq_refill(oct, droq); 708 709 /* Flush the droq descriptor data to memory to be sure 710 * that when we update the credits the data in memory 711 * is accurate. 712 */ 713 wmb(); 714 writel((desc_refilled), droq->pkts_credit_reg); 715 /* make sure mmio write completes */ 716 mmiowb(); 717 } 718 719 } /* for (each packet)... */ 720 721 /* Increment refill_count by the number of buffers processed. */ 722 droq->stats.pkts_received += pkt; 723 droq->stats.bytes_received += total_len; 724 725 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { 726 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); 727 728 droq->stats.dropped_toomany += (pkts_to_process - pkt); 729 return pkts_to_process; 730 } 731 732 return pkt; 733 } 734 735 int 736 octeon_droq_process_packets(struct octeon_device *oct, 737 struct octeon_droq *droq, 738 u32 budget) 739 { 740 u32 pkt_count = 0, pkts_processed = 0; 741 struct list_head *tmp, *tmp2; 742 743 /* Grab the droq lock */ 744 spin_lock(&droq->lock); 745 746 octeon_droq_check_hw_for_pkts(droq); 747 pkt_count = atomic_read(&droq->pkts_pending); 748 749 if (!pkt_count) { 750 spin_unlock(&droq->lock); 751 return 0; 752 } 753 754 if (pkt_count > budget) 755 pkt_count = budget; 756 757 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); 758 759 atomic_sub(pkts_processed, &droq->pkts_pending); 760 761 /* Release the spin lock */ 762 spin_unlock(&droq->lock); 763 764 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 765 struct __dispatch *rdisp = (struct __dispatch *)tmp; 766 767 list_del(tmp); 768 rdisp->disp_fn(rdisp->rinfo, 769 octeon_get_dispatch_arg 770 (oct, 771 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 772 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 773 } 774 775 /* If there are packets pending. schedule tasklet again */ 776 if (atomic_read(&droq->pkts_pending)) 777 return 1; 778 779 return 0; 780 } 781 782 /** 783 * Utility function to poll for packets. check_hw_for_packets must be 784 * called before calling this routine. 785 */ 786 787 static int 788 octeon_droq_process_poll_pkts(struct octeon_device *oct, 789 struct octeon_droq *droq, u32 budget) 790 { 791 struct list_head *tmp, *tmp2; 792 u32 pkts_available = 0, pkts_processed = 0; 793 u32 total_pkts_processed = 0; 794 795 if (budget > droq->max_count) 796 budget = droq->max_count; 797 798 spin_lock(&droq->lock); 799 800 while (total_pkts_processed < budget) { 801 octeon_droq_check_hw_for_pkts(droq); 802 803 pkts_available = min((budget - total_pkts_processed), 804 (u32)(atomic_read(&droq->pkts_pending))); 805 806 if (pkts_available == 0) 807 break; 808 809 pkts_processed = 810 octeon_droq_fast_process_packets(oct, droq, 811 pkts_available); 812 813 atomic_sub(pkts_processed, &droq->pkts_pending); 814 815 total_pkts_processed += pkts_processed; 816 } 817 818 spin_unlock(&droq->lock); 819 820 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 821 struct __dispatch *rdisp = (struct __dispatch *)tmp; 822 823 list_del(tmp); 824 rdisp->disp_fn(rdisp->rinfo, 825 octeon_get_dispatch_arg 826 (oct, 827 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 828 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 829 } 830 831 return total_pkts_processed; 832 } 833 834 int 835 octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, 836 u32 arg) 837 { 838 struct octeon_droq *droq; 839 840 droq = oct->droq[q_no]; 841 842 if (cmd == POLL_EVENT_PROCESS_PKTS) 843 return octeon_droq_process_poll_pkts(oct, droq, arg); 844 845 if (cmd == POLL_EVENT_PENDING_PKTS) { 846 u32 pkt_cnt = atomic_read(&droq->pkts_pending); 847 848 return octeon_droq_process_packets(oct, droq, pkt_cnt); 849 } 850 851 if (cmd == POLL_EVENT_ENABLE_INTR) { 852 u32 value; 853 unsigned long flags; 854 855 /* Enable Pkt Interrupt */ 856 switch (oct->chip_id) { 857 case OCTEON_CN66XX: 858 case OCTEON_CN68XX: { 859 struct octeon_cn6xxx *cn6xxx = 860 (struct octeon_cn6xxx *)oct->chip; 861 spin_lock_irqsave 862 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 863 value = 864 octeon_read_csr(oct, 865 CN6XXX_SLI_PKT_TIME_INT_ENB); 866 value |= (1 << q_no); 867 octeon_write_csr(oct, 868 CN6XXX_SLI_PKT_TIME_INT_ENB, 869 value); 870 value = 871 octeon_read_csr(oct, 872 CN6XXX_SLI_PKT_CNT_INT_ENB); 873 value |= (1 << q_no); 874 octeon_write_csr(oct, 875 CN6XXX_SLI_PKT_CNT_INT_ENB, 876 value); 877 878 /* don't bother flushing the enables */ 879 880 spin_unlock_irqrestore 881 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 882 return 0; 883 } 884 break; 885 case OCTEON_CN23XX_PF_VID: { 886 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 887 } 888 break; 889 890 case OCTEON_CN23XX_VF_VID: 891 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 892 break; 893 } 894 return 0; 895 } 896 897 dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); 898 return -EINVAL; 899 } 900 901 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, 902 struct octeon_droq_ops *ops) 903 { 904 struct octeon_droq *droq; 905 unsigned long flags; 906 struct octeon_config *oct_cfg = NULL; 907 908 oct_cfg = octeon_get_conf(oct); 909 910 if (!oct_cfg) 911 return -EINVAL; 912 913 if (!(ops)) { 914 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", 915 __func__); 916 return -EINVAL; 917 } 918 919 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 920 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 921 __func__, q_no, (oct->num_oqs - 1)); 922 return -EINVAL; 923 } 924 925 droq = oct->droq[q_no]; 926 927 spin_lock_irqsave(&droq->lock, flags); 928 929 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); 930 931 spin_unlock_irqrestore(&droq->lock, flags); 932 933 return 0; 934 } 935 936 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) 937 { 938 unsigned long flags; 939 struct octeon_droq *droq; 940 struct octeon_config *oct_cfg = NULL; 941 942 oct_cfg = octeon_get_conf(oct); 943 944 if (!oct_cfg) 945 return -EINVAL; 946 947 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 948 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 949 __func__, q_no, oct->num_oqs - 1); 950 return -EINVAL; 951 } 952 953 droq = oct->droq[q_no]; 954 955 if (!droq) { 956 dev_info(&oct->pci_dev->dev, 957 "Droq id (%d) not available.\n", q_no); 958 return 0; 959 } 960 961 spin_lock_irqsave(&droq->lock, flags); 962 963 droq->ops.fptr = NULL; 964 droq->ops.farg = NULL; 965 droq->ops.drop_on_max = 0; 966 967 spin_unlock_irqrestore(&droq->lock, flags); 968 969 return 0; 970 } 971 972 int octeon_create_droq(struct octeon_device *oct, 973 u32 q_no, u32 num_descs, 974 u32 desc_size, void *app_ctx) 975 { 976 struct octeon_droq *droq; 977 int numa_node = dev_to_node(&oct->pci_dev->dev); 978 979 if (oct->droq[q_no]) { 980 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 981 q_no); 982 return 1; 983 } 984 985 /* Allocate the DS for the new droq. */ 986 droq = vmalloc_node(sizeof(*droq), numa_node); 987 if (!droq) 988 droq = vmalloc(sizeof(*droq)); 989 if (!droq) 990 return -1; 991 992 memset(droq, 0, sizeof(struct octeon_droq)); 993 994 /*Disable the pkt o/p for this Q */ 995 octeon_set_droq_pkt_op(oct, q_no, 0); 996 oct->droq[q_no] = droq; 997 998 /* Initialize the Droq */ 999 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { 1000 vfree(oct->droq[q_no]); 1001 oct->droq[q_no] = NULL; 1002 return -1; 1003 } 1004 1005 oct->num_oqs++; 1006 1007 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, 1008 oct->num_oqs); 1009 1010 /* Global Droq register settings */ 1011 1012 /* As of now not required, as setting are done for all 32 Droqs at 1013 * the same time. 1014 */ 1015 return 0; 1016 } 1017