1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include <linux/vmalloc.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn66xx_regs.h" 29 #include "cn66xx_device.h" 30 #include "cn23xx_pf_device.h" 31 #include "cn23xx_vf_device.h" 32 33 struct niclist { 34 struct list_head list; 35 void *ptr; 36 }; 37 38 struct __dispatch { 39 struct list_head list; 40 struct octeon_recv_info *rinfo; 41 octeon_dispatch_fn_t disp_fn; 42 }; 43 44 /** Get the argument that the user set when registering dispatch 45 * function for a given opcode/subcode. 46 * @param octeon_dev - the octeon device pointer. 47 * @param opcode - the opcode for which the dispatch argument 48 * is to be checked. 49 * @param subcode - the subcode for which the dispatch argument 50 * is to be checked. 51 * @return Success: void * (argument to the dispatch function) 52 * @return Failure: NULL 53 * 54 */ 55 static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, 56 u16 opcode, u16 subcode) 57 { 58 int idx; 59 struct list_head *dispatch; 60 void *fn_arg = NULL; 61 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 62 63 idx = combined_opcode & OCTEON_OPCODE_MASK; 64 65 spin_lock_bh(&octeon_dev->dispatch.lock); 66 67 if (octeon_dev->dispatch.count == 0) { 68 spin_unlock_bh(&octeon_dev->dispatch.lock); 69 return NULL; 70 } 71 72 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 73 fn_arg = octeon_dev->dispatch.dlist[idx].arg; 74 } else { 75 list_for_each(dispatch, 76 &octeon_dev->dispatch.dlist[idx].list) { 77 if (((struct octeon_dispatch *)dispatch)->opcode == 78 combined_opcode) { 79 fn_arg = ((struct octeon_dispatch *) 80 dispatch)->arg; 81 break; 82 } 83 } 84 } 85 86 spin_unlock_bh(&octeon_dev->dispatch.lock); 87 return fn_arg; 88 } 89 90 /** Check for packets on Droq. This function should be called with lock held. 91 * @param droq - Droq on which count is checked. 92 * @return Returns packet count. 93 */ 94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) 95 { 96 u32 pkt_count = 0; 97 u32 last_count; 98 99 pkt_count = readl(droq->pkts_sent_reg); 100 101 last_count = pkt_count - droq->pkt_count; 102 droq->pkt_count = pkt_count; 103 104 /* we shall write to cnts at napi irq enable or end of droq tasklet */ 105 if (last_count) 106 atomic_add(last_count, &droq->pkts_pending); 107 108 return last_count; 109 } 110 111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) 112 { 113 u32 count = 0; 114 115 /* max_empty_descs is the max. no. of descs that can have no buffers. 116 * If the empty desc count goes beyond this value, we cannot safely 117 * read in a 64K packet sent by Octeon 118 * (64K is max pkt size from Octeon) 119 */ 120 droq->max_empty_descs = 0; 121 122 do { 123 droq->max_empty_descs++; 124 count += droq->buffer_size; 125 } while (count < (64 * 1024)); 126 127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; 128 } 129 130 static void octeon_droq_reset_indices(struct octeon_droq *droq) 131 { 132 droq->read_idx = 0; 133 droq->write_idx = 0; 134 droq->refill_idx = 0; 135 droq->refill_count = 0; 136 atomic_set(&droq->pkts_pending, 0); 137 } 138 139 static void 140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, 141 struct octeon_droq *droq) 142 { 143 u32 i; 144 struct octeon_skb_page_info *pg_info; 145 146 for (i = 0; i < droq->max_count; i++) { 147 pg_info = &droq->recv_buf_list[i].pg_info; 148 149 if (pg_info->dma) 150 lio_unmap_ring(oct->pci_dev, 151 (u64)pg_info->dma); 152 pg_info->dma = 0; 153 154 if (pg_info->page) 155 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 156 pg_info); 157 158 if (droq->desc_ring && droq->desc_ring[i].info_ptr) 159 lio_unmap_ring_info(oct->pci_dev, 160 (u64)droq-> 161 desc_ring[i].info_ptr, 162 OCT_DROQ_INFO_SIZE); 163 droq->recv_buf_list[i].buffer = NULL; 164 } 165 166 octeon_droq_reset_indices(droq); 167 } 168 169 static int 170 octeon_droq_setup_ring_buffers(struct octeon_device *oct, 171 struct octeon_droq *droq) 172 { 173 u32 i; 174 void *buf; 175 struct octeon_droq_desc *desc_ring = droq->desc_ring; 176 177 for (i = 0; i < droq->max_count; i++) { 178 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); 179 180 if (!buf) { 181 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", 182 __func__); 183 droq->stats.rx_alloc_failure++; 184 return -ENOMEM; 185 } 186 187 droq->recv_buf_list[i].buffer = buf; 188 droq->recv_buf_list[i].data = get_rbd(buf); 189 droq->info_list[i].length = 0; 190 191 /* map ring buffers into memory */ 192 desc_ring[i].info_ptr = lio_map_ring_info(droq, i); 193 desc_ring[i].buffer_ptr = 194 lio_map_ring(droq->recv_buf_list[i].buffer); 195 } 196 197 octeon_droq_reset_indices(droq); 198 199 octeon_droq_compute_max_packet_bufs(droq); 200 201 return 0; 202 } 203 204 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) 205 { 206 struct octeon_droq *droq = oct->droq[q_no]; 207 208 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 209 210 octeon_droq_destroy_ring_buffers(oct, droq); 211 vfree(droq->recv_buf_list); 212 213 if (droq->info_base_addr) 214 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, 215 droq->info_alloc_size, 216 droq->info_base_addr, 217 droq->info_list_dma); 218 219 if (droq->desc_ring) 220 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 221 droq->desc_ring, droq->desc_ring_dma); 222 223 memset(droq, 0, OCT_DROQ_SIZE); 224 225 return 0; 226 } 227 228 int octeon_init_droq(struct octeon_device *oct, 229 u32 q_no, 230 u32 num_descs, 231 u32 desc_size, 232 void *app_ctx) 233 { 234 struct octeon_droq *droq; 235 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 236 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 237 int numa_node = dev_to_node(&oct->pci_dev->dev); 238 239 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 240 241 droq = oct->droq[q_no]; 242 memset(droq, 0, OCT_DROQ_SIZE); 243 244 droq->oct_dev = oct; 245 droq->q_no = q_no; 246 if (app_ctx) 247 droq->app_ctx = app_ctx; 248 else 249 droq->app_ctx = (void *)(size_t)q_no; 250 251 c_num_descs = num_descs; 252 c_buf_size = desc_size; 253 if (OCTEON_CN6XXX(oct)) { 254 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 255 256 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); 257 c_refill_threshold = 258 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); 259 } else if (OCTEON_CN23XX_PF(oct)) { 260 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); 261 262 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 263 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 264 } else if (OCTEON_CN23XX_VF(oct)) { 265 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf); 266 267 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 268 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 269 } else { 270 return 1; 271 } 272 273 droq->max_count = c_num_descs; 274 droq->buffer_size = c_buf_size; 275 276 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 277 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 278 (dma_addr_t *)&droq->desc_ring_dma); 279 280 if (!droq->desc_ring) { 281 dev_err(&oct->pci_dev->dev, 282 "Output queue %d ring alloc failed\n", q_no); 283 return 1; 284 } 285 286 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", 287 q_no, droq->desc_ring, droq->desc_ring_dma); 288 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 289 droq->max_count); 290 291 droq->info_list = 292 cnnic_numa_alloc_aligned_dma((droq->max_count * 293 OCT_DROQ_INFO_SIZE), 294 &droq->info_alloc_size, 295 &droq->info_base_addr, 296 numa_node); 297 if (!droq->info_list) { 298 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 299 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 300 droq->desc_ring, droq->desc_ring_dma); 301 return 1; 302 } 303 304 droq->recv_buf_list = (struct octeon_recv_buffer *) 305 vmalloc_node(droq->max_count * 306 OCT_DROQ_RECVBUF_SIZE, 307 numa_node); 308 if (!droq->recv_buf_list) 309 droq->recv_buf_list = (struct octeon_recv_buffer *) 310 vmalloc(droq->max_count * 311 OCT_DROQ_RECVBUF_SIZE); 312 if (!droq->recv_buf_list) { 313 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); 314 goto init_droq_fail; 315 } 316 317 if (octeon_droq_setup_ring_buffers(oct, droq)) 318 goto init_droq_fail; 319 320 droq->pkts_per_intr = c_pkts_per_intr; 321 droq->refill_threshold = c_refill_threshold; 322 323 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", 324 droq->max_empty_descs); 325 326 spin_lock_init(&droq->lock); 327 328 INIT_LIST_HEAD(&droq->dispatch_list); 329 330 /* For 56xx Pass1, this function won't be called, so no checks. */ 331 oct->fn_list.setup_oq_regs(oct, q_no); 332 333 oct->io_qmask.oq |= BIT_ULL(q_no); 334 335 return 0; 336 337 init_droq_fail: 338 octeon_delete_droq(oct, q_no); 339 return 1; 340 } 341 342 /* octeon_create_recv_info 343 * Parameters: 344 * octeon_dev - pointer to the octeon device structure 345 * droq - droq in which the packet arrived. 346 * buf_cnt - no. of buffers used by the packet. 347 * idx - index in the descriptor for the first buffer in the packet. 348 * Description: 349 * Allocates a recv_info_t and copies the buffer addresses for packet data 350 * into the recv_pkt space which starts at an 8B offset from recv_info_t. 351 * Flags the descriptors for refill later. If available descriptors go 352 * below the threshold to receive a 64K pkt, new buffers are first allocated 353 * before the recv_pkt_t is created. 354 * This routine will be called in interrupt context. 355 * Returns: 356 * Success: Pointer to recv_info_t 357 * Failure: NULL. 358 * Locks: 359 * The droq->lock is held when this routine is called. 360 */ 361 static inline struct octeon_recv_info *octeon_create_recv_info( 362 struct octeon_device *octeon_dev, 363 struct octeon_droq *droq, 364 u32 buf_cnt, 365 u32 idx) 366 { 367 struct octeon_droq_info *info; 368 struct octeon_recv_pkt *recv_pkt; 369 struct octeon_recv_info *recv_info; 370 u32 i, bytes_left; 371 struct octeon_skb_page_info *pg_info; 372 373 info = &droq->info_list[idx]; 374 375 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); 376 if (!recv_info) 377 return NULL; 378 379 recv_pkt = recv_info->recv_pkt; 380 recv_pkt->rh = info->rh; 381 recv_pkt->length = (u32)info->length; 382 recv_pkt->buffer_count = (u16)buf_cnt; 383 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; 384 385 i = 0; 386 bytes_left = (u32)info->length; 387 388 while (buf_cnt) { 389 { 390 pg_info = &droq->recv_buf_list[idx].pg_info; 391 392 lio_unmap_ring(octeon_dev->pci_dev, 393 (u64)pg_info->dma); 394 pg_info->page = NULL; 395 pg_info->dma = 0; 396 } 397 398 recv_pkt->buffer_size[i] = 399 (bytes_left >= 400 droq->buffer_size) ? droq->buffer_size : bytes_left; 401 402 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; 403 droq->recv_buf_list[idx].buffer = NULL; 404 405 idx = incr_index(idx, 1, droq->max_count); 406 bytes_left -= droq->buffer_size; 407 i++; 408 buf_cnt--; 409 } 410 411 return recv_info; 412 } 413 414 /* If we were not able to refill all buffers, try to move around 415 * the buffers that were not dispatched. 416 */ 417 static inline u32 418 octeon_droq_refill_pullup_descs(struct octeon_droq *droq, 419 struct octeon_droq_desc *desc_ring) 420 { 421 u32 desc_refilled = 0; 422 423 u32 refill_index = droq->refill_idx; 424 425 while (refill_index != droq->read_idx) { 426 if (droq->recv_buf_list[refill_index].buffer) { 427 droq->recv_buf_list[droq->refill_idx].buffer = 428 droq->recv_buf_list[refill_index].buffer; 429 droq->recv_buf_list[droq->refill_idx].data = 430 droq->recv_buf_list[refill_index].data; 431 desc_ring[droq->refill_idx].buffer_ptr = 432 desc_ring[refill_index].buffer_ptr; 433 droq->recv_buf_list[refill_index].buffer = NULL; 434 desc_ring[refill_index].buffer_ptr = 0; 435 do { 436 droq->refill_idx = incr_index(droq->refill_idx, 437 1, 438 droq->max_count); 439 desc_refilled++; 440 droq->refill_count--; 441 } while (droq->recv_buf_list[droq->refill_idx]. 442 buffer); 443 } 444 refill_index = incr_index(refill_index, 1, droq->max_count); 445 } /* while */ 446 return desc_refilled; 447 } 448 449 /* octeon_droq_refill 450 * Parameters: 451 * droq - droq in which descriptors require new buffers. 452 * Description: 453 * Called during normal DROQ processing in interrupt mode or by the poll 454 * thread to refill the descriptors from which buffers were dispatched 455 * to upper layers. Attempts to allocate new buffers. If that fails, moves 456 * up buffers (that were not dispatched) to form a contiguous ring. 457 * Returns: 458 * No of descriptors refilled. 459 * Locks: 460 * This routine is called with droq->lock held. 461 */ 462 static u32 463 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) 464 { 465 struct octeon_droq_desc *desc_ring; 466 void *buf = NULL; 467 u8 *data; 468 u32 desc_refilled = 0; 469 struct octeon_skb_page_info *pg_info; 470 471 desc_ring = droq->desc_ring; 472 473 while (droq->refill_count && (desc_refilled < droq->max_count)) { 474 /* If a valid buffer exists (happens if there is no dispatch), 475 * reuse 476 * the buffer, else allocate. 477 */ 478 if (!droq->recv_buf_list[droq->refill_idx].buffer) { 479 pg_info = 480 &droq->recv_buf_list[droq->refill_idx].pg_info; 481 /* Either recycle the existing pages or go for 482 * new page alloc 483 */ 484 if (pg_info->page) 485 buf = recv_buffer_reuse(octeon_dev, pg_info); 486 else 487 buf = recv_buffer_alloc(octeon_dev, pg_info); 488 /* If a buffer could not be allocated, no point in 489 * continuing 490 */ 491 if (!buf) { 492 droq->stats.rx_alloc_failure++; 493 break; 494 } 495 droq->recv_buf_list[droq->refill_idx].buffer = 496 buf; 497 data = get_rbd(buf); 498 } else { 499 data = get_rbd(droq->recv_buf_list 500 [droq->refill_idx].buffer); 501 } 502 503 droq->recv_buf_list[droq->refill_idx].data = data; 504 505 desc_ring[droq->refill_idx].buffer_ptr = 506 lio_map_ring(droq->recv_buf_list[droq-> 507 refill_idx].buffer); 508 /* Reset any previous values in the length field. */ 509 droq->info_list[droq->refill_idx].length = 0; 510 511 droq->refill_idx = incr_index(droq->refill_idx, 1, 512 droq->max_count); 513 desc_refilled++; 514 droq->refill_count--; 515 } 516 517 if (droq->refill_count) 518 desc_refilled += 519 octeon_droq_refill_pullup_descs(droq, desc_ring); 520 521 /* if droq->refill_count 522 * The refill count would not change in pass two. We only moved buffers 523 * to close the gap in the ring, but we would still have the same no. of 524 * buffers to refill. 525 */ 526 return desc_refilled; 527 } 528 529 static inline u32 530 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) 531 { 532 u32 buf_cnt = 0; 533 534 while (total_len > (buf_size * buf_cnt)) 535 buf_cnt++; 536 return buf_cnt; 537 } 538 539 static int 540 octeon_droq_dispatch_pkt(struct octeon_device *oct, 541 struct octeon_droq *droq, 542 union octeon_rh *rh, 543 struct octeon_droq_info *info) 544 { 545 u32 cnt; 546 octeon_dispatch_fn_t disp_fn; 547 struct octeon_recv_info *rinfo; 548 549 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); 550 551 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, 552 (u16)rh->r.subcode); 553 if (disp_fn) { 554 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); 555 if (rinfo) { 556 struct __dispatch *rdisp = rinfo->rsvd; 557 558 rdisp->rinfo = rinfo; 559 rdisp->disp_fn = disp_fn; 560 rinfo->recv_pkt->rh = *rh; 561 list_add_tail(&rdisp->list, 562 &droq->dispatch_list); 563 } else { 564 droq->stats.dropped_nomem++; 565 } 566 } else { 567 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", 568 (unsigned int)rh->r.opcode, 569 (unsigned int)rh->r.subcode); 570 droq->stats.dropped_nodispatch++; 571 } 572 573 return cnt; 574 } 575 576 static inline void octeon_droq_drop_packets(struct octeon_device *oct, 577 struct octeon_droq *droq, 578 u32 cnt) 579 { 580 u32 i = 0, buf_cnt; 581 struct octeon_droq_info *info; 582 583 for (i = 0; i < cnt; i++) { 584 info = &droq->info_list[droq->read_idx]; 585 octeon_swap_8B_data((u64 *)info, 2); 586 587 if (info->length) { 588 info->length -= OCT_RH_SIZE; 589 droq->stats.bytes_received += info->length; 590 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, 591 (u32)info->length); 592 } else { 593 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); 594 buf_cnt = 1; 595 } 596 597 droq->read_idx = incr_index(droq->read_idx, buf_cnt, 598 droq->max_count); 599 droq->refill_count += buf_cnt; 600 } 601 } 602 603 static u32 604 octeon_droq_fast_process_packets(struct octeon_device *oct, 605 struct octeon_droq *droq, 606 u32 pkts_to_process) 607 { 608 struct octeon_droq_info *info; 609 union octeon_rh *rh; 610 u32 pkt, total_len = 0, pkt_count; 611 612 pkt_count = pkts_to_process; 613 614 for (pkt = 0; pkt < pkt_count; pkt++) { 615 u32 pkt_len = 0; 616 struct sk_buff *nicbuf = NULL; 617 struct octeon_skb_page_info *pg_info; 618 void *buf; 619 620 info = &droq->info_list[droq->read_idx]; 621 octeon_swap_8B_data((u64 *)info, 2); 622 623 if (!info->length) { 624 dev_err(&oct->pci_dev->dev, 625 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", 626 droq->q_no, droq->read_idx, pkt_count); 627 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, 628 (u8 *)info, 629 OCT_DROQ_INFO_SIZE); 630 break; 631 } 632 633 /* Len of resp hdr in included in the received data len. */ 634 info->length -= OCT_RH_SIZE; 635 rh = &info->rh; 636 637 total_len += (u32)info->length; 638 if (opcode_slow_path(rh)) { 639 u32 buf_cnt; 640 641 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); 642 droq->read_idx = incr_index(droq->read_idx, 643 buf_cnt, droq->max_count); 644 droq->refill_count += buf_cnt; 645 } else { 646 if (info->length <= droq->buffer_size) { 647 pkt_len = (u32)info->length; 648 nicbuf = droq->recv_buf_list[ 649 droq->read_idx].buffer; 650 pg_info = &droq->recv_buf_list[ 651 droq->read_idx].pg_info; 652 if (recv_buffer_recycle(oct, pg_info)) 653 pg_info->page = NULL; 654 droq->recv_buf_list[droq->read_idx].buffer = 655 NULL; 656 657 droq->read_idx = incr_index(droq->read_idx, 1, 658 droq->max_count); 659 droq->refill_count++; 660 } else { 661 nicbuf = octeon_fast_packet_alloc((u32) 662 info->length); 663 pkt_len = 0; 664 /* nicbuf allocation can fail. We'll handle it 665 * inside the loop. 666 */ 667 while (pkt_len < info->length) { 668 int cpy_len, idx = droq->read_idx; 669 670 cpy_len = ((pkt_len + droq->buffer_size) 671 > info->length) ? 672 ((u32)info->length - pkt_len) : 673 droq->buffer_size; 674 675 if (nicbuf) { 676 octeon_fast_packet_next(droq, 677 nicbuf, 678 cpy_len, 679 idx); 680 buf = droq->recv_buf_list[idx]. 681 buffer; 682 recv_buffer_fast_free(buf); 683 droq->recv_buf_list[idx].buffer 684 = NULL; 685 } else { 686 droq->stats.rx_alloc_failure++; 687 } 688 689 pkt_len += cpy_len; 690 droq->read_idx = 691 incr_index(droq->read_idx, 1, 692 droq->max_count); 693 droq->refill_count++; 694 } 695 } 696 697 if (nicbuf) { 698 if (droq->ops.fptr) { 699 droq->ops.fptr(oct->octeon_id, 700 nicbuf, pkt_len, 701 rh, &droq->napi, 702 droq->ops.farg); 703 } else { 704 recv_buffer_free(nicbuf); 705 } 706 } 707 } 708 709 if (droq->refill_count >= droq->refill_threshold) { 710 int desc_refilled = octeon_droq_refill(oct, droq); 711 712 /* Flush the droq descriptor data to memory to be sure 713 * that when we update the credits the data in memory 714 * is accurate. 715 */ 716 wmb(); 717 writel((desc_refilled), droq->pkts_credit_reg); 718 /* make sure mmio write completes */ 719 mmiowb(); 720 } 721 722 } /* for (each packet)... */ 723 724 /* Increment refill_count by the number of buffers processed. */ 725 droq->stats.pkts_received += pkt; 726 droq->stats.bytes_received += total_len; 727 728 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { 729 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); 730 731 droq->stats.dropped_toomany += (pkts_to_process - pkt); 732 return pkts_to_process; 733 } 734 735 return pkt; 736 } 737 738 int 739 octeon_droq_process_packets(struct octeon_device *oct, 740 struct octeon_droq *droq, 741 u32 budget) 742 { 743 u32 pkt_count = 0, pkts_processed = 0; 744 struct list_head *tmp, *tmp2; 745 746 /* Grab the droq lock */ 747 spin_lock(&droq->lock); 748 749 octeon_droq_check_hw_for_pkts(droq); 750 pkt_count = atomic_read(&droq->pkts_pending); 751 752 if (!pkt_count) { 753 spin_unlock(&droq->lock); 754 return 0; 755 } 756 757 if (pkt_count > budget) 758 pkt_count = budget; 759 760 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); 761 762 atomic_sub(pkts_processed, &droq->pkts_pending); 763 764 /* Release the spin lock */ 765 spin_unlock(&droq->lock); 766 767 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 768 struct __dispatch *rdisp = (struct __dispatch *)tmp; 769 770 list_del(tmp); 771 rdisp->disp_fn(rdisp->rinfo, 772 octeon_get_dispatch_arg 773 (oct, 774 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 775 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 776 } 777 778 /* If there are packets pending. schedule tasklet again */ 779 if (atomic_read(&droq->pkts_pending)) 780 return 1; 781 782 return 0; 783 } 784 785 /** 786 * Utility function to poll for packets. check_hw_for_packets must be 787 * called before calling this routine. 788 */ 789 790 static int 791 octeon_droq_process_poll_pkts(struct octeon_device *oct, 792 struct octeon_droq *droq, u32 budget) 793 { 794 struct list_head *tmp, *tmp2; 795 u32 pkts_available = 0, pkts_processed = 0; 796 u32 total_pkts_processed = 0; 797 798 if (budget > droq->max_count) 799 budget = droq->max_count; 800 801 spin_lock(&droq->lock); 802 803 while (total_pkts_processed < budget) { 804 octeon_droq_check_hw_for_pkts(droq); 805 806 pkts_available = min((budget - total_pkts_processed), 807 (u32)(atomic_read(&droq->pkts_pending))); 808 809 if (pkts_available == 0) 810 break; 811 812 pkts_processed = 813 octeon_droq_fast_process_packets(oct, droq, 814 pkts_available); 815 816 atomic_sub(pkts_processed, &droq->pkts_pending); 817 818 total_pkts_processed += pkts_processed; 819 } 820 821 spin_unlock(&droq->lock); 822 823 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 824 struct __dispatch *rdisp = (struct __dispatch *)tmp; 825 826 list_del(tmp); 827 rdisp->disp_fn(rdisp->rinfo, 828 octeon_get_dispatch_arg 829 (oct, 830 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 831 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 832 } 833 834 return total_pkts_processed; 835 } 836 837 int 838 octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, 839 u32 arg) 840 { 841 struct octeon_droq *droq; 842 843 droq = oct->droq[q_no]; 844 845 if (cmd == POLL_EVENT_PROCESS_PKTS) 846 return octeon_droq_process_poll_pkts(oct, droq, arg); 847 848 if (cmd == POLL_EVENT_PENDING_PKTS) { 849 u32 pkt_cnt = atomic_read(&droq->pkts_pending); 850 851 return octeon_droq_process_packets(oct, droq, pkt_cnt); 852 } 853 854 if (cmd == POLL_EVENT_ENABLE_INTR) { 855 u32 value; 856 unsigned long flags; 857 858 /* Enable Pkt Interrupt */ 859 switch (oct->chip_id) { 860 case OCTEON_CN66XX: 861 case OCTEON_CN68XX: { 862 struct octeon_cn6xxx *cn6xxx = 863 (struct octeon_cn6xxx *)oct->chip; 864 spin_lock_irqsave 865 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 866 value = 867 octeon_read_csr(oct, 868 CN6XXX_SLI_PKT_TIME_INT_ENB); 869 value |= (1 << q_no); 870 octeon_write_csr(oct, 871 CN6XXX_SLI_PKT_TIME_INT_ENB, 872 value); 873 value = 874 octeon_read_csr(oct, 875 CN6XXX_SLI_PKT_CNT_INT_ENB); 876 value |= (1 << q_no); 877 octeon_write_csr(oct, 878 CN6XXX_SLI_PKT_CNT_INT_ENB, 879 value); 880 881 /* don't bother flushing the enables */ 882 883 spin_unlock_irqrestore 884 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 885 return 0; 886 } 887 break; 888 case OCTEON_CN23XX_PF_VID: { 889 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 890 } 891 break; 892 893 case OCTEON_CN23XX_VF_VID: 894 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 895 break; 896 } 897 return 0; 898 } 899 900 dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); 901 return -EINVAL; 902 } 903 904 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, 905 struct octeon_droq_ops *ops) 906 { 907 struct octeon_droq *droq; 908 unsigned long flags; 909 struct octeon_config *oct_cfg = NULL; 910 911 oct_cfg = octeon_get_conf(oct); 912 913 if (!oct_cfg) 914 return -EINVAL; 915 916 if (!(ops)) { 917 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", 918 __func__); 919 return -EINVAL; 920 } 921 922 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 923 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 924 __func__, q_no, (oct->num_oqs - 1)); 925 return -EINVAL; 926 } 927 928 droq = oct->droq[q_no]; 929 930 spin_lock_irqsave(&droq->lock, flags); 931 932 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); 933 934 spin_unlock_irqrestore(&droq->lock, flags); 935 936 return 0; 937 } 938 939 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) 940 { 941 unsigned long flags; 942 struct octeon_droq *droq; 943 struct octeon_config *oct_cfg = NULL; 944 945 oct_cfg = octeon_get_conf(oct); 946 947 if (!oct_cfg) 948 return -EINVAL; 949 950 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 951 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 952 __func__, q_no, oct->num_oqs - 1); 953 return -EINVAL; 954 } 955 956 droq = oct->droq[q_no]; 957 958 if (!droq) { 959 dev_info(&oct->pci_dev->dev, 960 "Droq id (%d) not available.\n", q_no); 961 return 0; 962 } 963 964 spin_lock_irqsave(&droq->lock, flags); 965 966 droq->ops.fptr = NULL; 967 droq->ops.farg = NULL; 968 droq->ops.drop_on_max = 0; 969 970 spin_unlock_irqrestore(&droq->lock, flags); 971 972 return 0; 973 } 974 975 int octeon_create_droq(struct octeon_device *oct, 976 u32 q_no, u32 num_descs, 977 u32 desc_size, void *app_ctx) 978 { 979 struct octeon_droq *droq; 980 int numa_node = dev_to_node(&oct->pci_dev->dev); 981 982 if (oct->droq[q_no]) { 983 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 984 q_no); 985 return 1; 986 } 987 988 /* Allocate the DS for the new droq. */ 989 droq = vmalloc_node(sizeof(*droq), numa_node); 990 if (!droq) 991 droq = vmalloc(sizeof(*droq)); 992 if (!droq) 993 return -1; 994 995 memset(droq, 0, sizeof(struct octeon_droq)); 996 997 /*Disable the pkt o/p for this Q */ 998 octeon_set_droq_pkt_op(oct, q_no, 0); 999 oct->droq[q_no] = droq; 1000 1001 /* Initialize the Droq */ 1002 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { 1003 vfree(oct->droq[q_no]); 1004 oct->droq[q_no] = NULL; 1005 return -1; 1006 } 1007 1008 oct->num_oqs++; 1009 1010 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, 1011 oct->num_oqs); 1012 1013 /* Global Droq register settings */ 1014 1015 /* As of now not required, as setting are done for all 32 Droqs at 1016 * the same time. 1017 */ 1018 return 0; 1019 } 1020