1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include <linux/vmalloc.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn66xx_regs.h" 29 #include "cn66xx_device.h" 30 #include "cn23xx_pf_device.h" 31 32 #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) 33 #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) 34 35 struct niclist { 36 struct list_head list; 37 void *ptr; 38 }; 39 40 struct __dispatch { 41 struct list_head list; 42 struct octeon_recv_info *rinfo; 43 octeon_dispatch_fn_t disp_fn; 44 }; 45 46 /** Get the argument that the user set when registering dispatch 47 * function for a given opcode/subcode. 48 * @param octeon_dev - the octeon device pointer. 49 * @param opcode - the opcode for which the dispatch argument 50 * is to be checked. 51 * @param subcode - the subcode for which the dispatch argument 52 * is to be checked. 53 * @return Success: void * (argument to the dispatch function) 54 * @return Failure: NULL 55 * 56 */ 57 static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, 58 u16 opcode, u16 subcode) 59 { 60 int idx; 61 struct list_head *dispatch; 62 void *fn_arg = NULL; 63 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 64 65 idx = combined_opcode & OCTEON_OPCODE_MASK; 66 67 spin_lock_bh(&octeon_dev->dispatch.lock); 68 69 if (octeon_dev->dispatch.count == 0) { 70 spin_unlock_bh(&octeon_dev->dispatch.lock); 71 return NULL; 72 } 73 74 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 75 fn_arg = octeon_dev->dispatch.dlist[idx].arg; 76 } else { 77 list_for_each(dispatch, 78 &octeon_dev->dispatch.dlist[idx].list) { 79 if (((struct octeon_dispatch *)dispatch)->opcode == 80 combined_opcode) { 81 fn_arg = ((struct octeon_dispatch *) 82 dispatch)->arg; 83 break; 84 } 85 } 86 } 87 88 spin_unlock_bh(&octeon_dev->dispatch.lock); 89 return fn_arg; 90 } 91 92 /** Check for packets on Droq. This function should be called with lock held. 93 * @param droq - Droq on which count is checked. 94 * @return Returns packet count. 95 */ 96 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) 97 { 98 u32 pkt_count = 0; 99 u32 last_count; 100 101 pkt_count = readl(droq->pkts_sent_reg); 102 103 last_count = pkt_count - droq->pkt_count; 104 droq->pkt_count = pkt_count; 105 106 /* we shall write to cnts at napi irq enable or end of droq tasklet */ 107 if (last_count) 108 atomic_add(last_count, &droq->pkts_pending); 109 110 return last_count; 111 } 112 113 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) 114 { 115 u32 count = 0; 116 117 /* max_empty_descs is the max. no. of descs that can have no buffers. 118 * If the empty desc count goes beyond this value, we cannot safely 119 * read in a 64K packet sent by Octeon 120 * (64K is max pkt size from Octeon) 121 */ 122 droq->max_empty_descs = 0; 123 124 do { 125 droq->max_empty_descs++; 126 count += droq->buffer_size; 127 } while (count < (64 * 1024)); 128 129 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; 130 } 131 132 static void octeon_droq_reset_indices(struct octeon_droq *droq) 133 { 134 droq->read_idx = 0; 135 droq->write_idx = 0; 136 droq->refill_idx = 0; 137 droq->refill_count = 0; 138 atomic_set(&droq->pkts_pending, 0); 139 } 140 141 static void 142 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, 143 struct octeon_droq *droq) 144 { 145 u32 i; 146 struct octeon_skb_page_info *pg_info; 147 148 for (i = 0; i < droq->max_count; i++) { 149 pg_info = &droq->recv_buf_list[i].pg_info; 150 151 if (pg_info->dma) 152 lio_unmap_ring(oct->pci_dev, 153 (u64)pg_info->dma); 154 pg_info->dma = 0; 155 156 if (pg_info->page) 157 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 158 pg_info); 159 160 if (droq->desc_ring && droq->desc_ring[i].info_ptr) 161 lio_unmap_ring_info(oct->pci_dev, 162 (u64)droq-> 163 desc_ring[i].info_ptr, 164 OCT_DROQ_INFO_SIZE); 165 droq->recv_buf_list[i].buffer = NULL; 166 } 167 168 octeon_droq_reset_indices(droq); 169 } 170 171 static int 172 octeon_droq_setup_ring_buffers(struct octeon_device *oct, 173 struct octeon_droq *droq) 174 { 175 u32 i; 176 void *buf; 177 struct octeon_droq_desc *desc_ring = droq->desc_ring; 178 179 for (i = 0; i < droq->max_count; i++) { 180 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); 181 182 if (!buf) { 183 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", 184 __func__); 185 droq->stats.rx_alloc_failure++; 186 return -ENOMEM; 187 } 188 189 droq->recv_buf_list[i].buffer = buf; 190 droq->recv_buf_list[i].data = get_rbd(buf); 191 droq->info_list[i].length = 0; 192 193 /* map ring buffers into memory */ 194 desc_ring[i].info_ptr = lio_map_ring_info(droq, i); 195 desc_ring[i].buffer_ptr = 196 lio_map_ring(droq->recv_buf_list[i].buffer); 197 } 198 199 octeon_droq_reset_indices(droq); 200 201 octeon_droq_compute_max_packet_bufs(droq); 202 203 return 0; 204 } 205 206 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) 207 { 208 struct octeon_droq *droq = oct->droq[q_no]; 209 210 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 211 212 octeon_droq_destroy_ring_buffers(oct, droq); 213 vfree(droq->recv_buf_list); 214 215 if (droq->info_base_addr) 216 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, 217 droq->info_alloc_size, 218 droq->info_base_addr, 219 droq->info_list_dma); 220 221 if (droq->desc_ring) 222 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 223 droq->desc_ring, droq->desc_ring_dma); 224 225 memset(droq, 0, OCT_DROQ_SIZE); 226 227 return 0; 228 } 229 230 int octeon_init_droq(struct octeon_device *oct, 231 u32 q_no, 232 u32 num_descs, 233 u32 desc_size, 234 void *app_ctx) 235 { 236 struct octeon_droq *droq; 237 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 238 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 239 int orig_node = dev_to_node(&oct->pci_dev->dev); 240 int numa_node = cpu_to_node(q_no % num_online_cpus()); 241 242 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 243 244 droq = oct->droq[q_no]; 245 memset(droq, 0, OCT_DROQ_SIZE); 246 247 droq->oct_dev = oct; 248 droq->q_no = q_no; 249 if (app_ctx) 250 droq->app_ctx = app_ctx; 251 else 252 droq->app_ctx = (void *)(size_t)q_no; 253 254 c_num_descs = num_descs; 255 c_buf_size = desc_size; 256 if (OCTEON_CN6XXX(oct)) { 257 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); 258 259 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); 260 c_refill_threshold = 261 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); 262 } else if (OCTEON_CN23XX_PF(oct)) { 263 struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf); 264 265 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 266 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 267 } else { 268 return 1; 269 } 270 271 droq->max_count = c_num_descs; 272 droq->buffer_size = c_buf_size; 273 274 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 275 set_dev_node(&oct->pci_dev->dev, numa_node); 276 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 277 (dma_addr_t *)&droq->desc_ring_dma); 278 set_dev_node(&oct->pci_dev->dev, orig_node); 279 if (!droq->desc_ring) 280 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 281 (dma_addr_t *)&droq->desc_ring_dma); 282 283 if (!droq->desc_ring) { 284 dev_err(&oct->pci_dev->dev, 285 "Output queue %d ring alloc failed\n", q_no); 286 return 1; 287 } 288 289 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", 290 q_no, droq->desc_ring, droq->desc_ring_dma); 291 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 292 droq->max_count); 293 294 droq->info_list = 295 cnnic_numa_alloc_aligned_dma((droq->max_count * 296 OCT_DROQ_INFO_SIZE), 297 &droq->info_alloc_size, 298 &droq->info_base_addr, 299 numa_node); 300 if (!droq->info_list) { 301 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 302 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 303 droq->desc_ring, droq->desc_ring_dma); 304 return 1; 305 } 306 307 droq->recv_buf_list = (struct octeon_recv_buffer *) 308 vmalloc_node(droq->max_count * 309 OCT_DROQ_RECVBUF_SIZE, 310 numa_node); 311 if (!droq->recv_buf_list) 312 droq->recv_buf_list = (struct octeon_recv_buffer *) 313 vmalloc(droq->max_count * 314 OCT_DROQ_RECVBUF_SIZE); 315 if (!droq->recv_buf_list) { 316 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); 317 goto init_droq_fail; 318 } 319 320 if (octeon_droq_setup_ring_buffers(oct, droq)) 321 goto init_droq_fail; 322 323 droq->pkts_per_intr = c_pkts_per_intr; 324 droq->refill_threshold = c_refill_threshold; 325 326 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", 327 droq->max_empty_descs); 328 329 spin_lock_init(&droq->lock); 330 331 INIT_LIST_HEAD(&droq->dispatch_list); 332 333 /* For 56xx Pass1, this function won't be called, so no checks. */ 334 oct->fn_list.setup_oq_regs(oct, q_no); 335 336 oct->io_qmask.oq |= BIT_ULL(q_no); 337 338 return 0; 339 340 init_droq_fail: 341 octeon_delete_droq(oct, q_no); 342 return 1; 343 } 344 345 /* octeon_create_recv_info 346 * Parameters: 347 * octeon_dev - pointer to the octeon device structure 348 * droq - droq in which the packet arrived. 349 * buf_cnt - no. of buffers used by the packet. 350 * idx - index in the descriptor for the first buffer in the packet. 351 * Description: 352 * Allocates a recv_info_t and copies the buffer addresses for packet data 353 * into the recv_pkt space which starts at an 8B offset from recv_info_t. 354 * Flags the descriptors for refill later. If available descriptors go 355 * below the threshold to receive a 64K pkt, new buffers are first allocated 356 * before the recv_pkt_t is created. 357 * This routine will be called in interrupt context. 358 * Returns: 359 * Success: Pointer to recv_info_t 360 * Failure: NULL. 361 * Locks: 362 * The droq->lock is held when this routine is called. 363 */ 364 static inline struct octeon_recv_info *octeon_create_recv_info( 365 struct octeon_device *octeon_dev, 366 struct octeon_droq *droq, 367 u32 buf_cnt, 368 u32 idx) 369 { 370 struct octeon_droq_info *info; 371 struct octeon_recv_pkt *recv_pkt; 372 struct octeon_recv_info *recv_info; 373 u32 i, bytes_left; 374 struct octeon_skb_page_info *pg_info; 375 376 info = &droq->info_list[idx]; 377 378 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); 379 if (!recv_info) 380 return NULL; 381 382 recv_pkt = recv_info->recv_pkt; 383 recv_pkt->rh = info->rh; 384 recv_pkt->length = (u32)info->length; 385 recv_pkt->buffer_count = (u16)buf_cnt; 386 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; 387 388 i = 0; 389 bytes_left = (u32)info->length; 390 391 while (buf_cnt) { 392 { 393 pg_info = &droq->recv_buf_list[idx].pg_info; 394 395 lio_unmap_ring(octeon_dev->pci_dev, 396 (u64)pg_info->dma); 397 pg_info->page = NULL; 398 pg_info->dma = 0; 399 } 400 401 recv_pkt->buffer_size[i] = 402 (bytes_left >= 403 droq->buffer_size) ? droq->buffer_size : bytes_left; 404 405 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; 406 droq->recv_buf_list[idx].buffer = NULL; 407 408 INCR_INDEX_BY1(idx, droq->max_count); 409 bytes_left -= droq->buffer_size; 410 i++; 411 buf_cnt--; 412 } 413 414 return recv_info; 415 } 416 417 /* If we were not able to refill all buffers, try to move around 418 * the buffers that were not dispatched. 419 */ 420 static inline u32 421 octeon_droq_refill_pullup_descs(struct octeon_droq *droq, 422 struct octeon_droq_desc *desc_ring) 423 { 424 u32 desc_refilled = 0; 425 426 u32 refill_index = droq->refill_idx; 427 428 while (refill_index != droq->read_idx) { 429 if (droq->recv_buf_list[refill_index].buffer) { 430 droq->recv_buf_list[droq->refill_idx].buffer = 431 droq->recv_buf_list[refill_index].buffer; 432 droq->recv_buf_list[droq->refill_idx].data = 433 droq->recv_buf_list[refill_index].data; 434 desc_ring[droq->refill_idx].buffer_ptr = 435 desc_ring[refill_index].buffer_ptr; 436 droq->recv_buf_list[refill_index].buffer = NULL; 437 desc_ring[refill_index].buffer_ptr = 0; 438 do { 439 INCR_INDEX_BY1(droq->refill_idx, 440 droq->max_count); 441 desc_refilled++; 442 droq->refill_count--; 443 } while (droq->recv_buf_list[droq->refill_idx]. 444 buffer); 445 } 446 INCR_INDEX_BY1(refill_index, droq->max_count); 447 } /* while */ 448 return desc_refilled; 449 } 450 451 /* octeon_droq_refill 452 * Parameters: 453 * droq - droq in which descriptors require new buffers. 454 * Description: 455 * Called during normal DROQ processing in interrupt mode or by the poll 456 * thread to refill the descriptors from which buffers were dispatched 457 * to upper layers. Attempts to allocate new buffers. If that fails, moves 458 * up buffers (that were not dispatched) to form a contiguous ring. 459 * Returns: 460 * No of descriptors refilled. 461 * Locks: 462 * This routine is called with droq->lock held. 463 */ 464 static u32 465 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) 466 { 467 struct octeon_droq_desc *desc_ring; 468 void *buf = NULL; 469 u8 *data; 470 u32 desc_refilled = 0; 471 struct octeon_skb_page_info *pg_info; 472 473 desc_ring = droq->desc_ring; 474 475 while (droq->refill_count && (desc_refilled < droq->max_count)) { 476 /* If a valid buffer exists (happens if there is no dispatch), 477 * reuse 478 * the buffer, else allocate. 479 */ 480 if (!droq->recv_buf_list[droq->refill_idx].buffer) { 481 pg_info = 482 &droq->recv_buf_list[droq->refill_idx].pg_info; 483 /* Either recycle the existing pages or go for 484 * new page alloc 485 */ 486 if (pg_info->page) 487 buf = recv_buffer_reuse(octeon_dev, pg_info); 488 else 489 buf = recv_buffer_alloc(octeon_dev, pg_info); 490 /* If a buffer could not be allocated, no point in 491 * continuing 492 */ 493 if (!buf) { 494 droq->stats.rx_alloc_failure++; 495 break; 496 } 497 droq->recv_buf_list[droq->refill_idx].buffer = 498 buf; 499 data = get_rbd(buf); 500 } else { 501 data = get_rbd(droq->recv_buf_list 502 [droq->refill_idx].buffer); 503 } 504 505 droq->recv_buf_list[droq->refill_idx].data = data; 506 507 desc_ring[droq->refill_idx].buffer_ptr = 508 lio_map_ring(droq->recv_buf_list[droq-> 509 refill_idx].buffer); 510 /* Reset any previous values in the length field. */ 511 droq->info_list[droq->refill_idx].length = 0; 512 513 INCR_INDEX_BY1(droq->refill_idx, droq->max_count); 514 desc_refilled++; 515 droq->refill_count--; 516 } 517 518 if (droq->refill_count) 519 desc_refilled += 520 octeon_droq_refill_pullup_descs(droq, desc_ring); 521 522 /* if droq->refill_count 523 * The refill count would not change in pass two. We only moved buffers 524 * to close the gap in the ring, but we would still have the same no. of 525 * buffers to refill. 526 */ 527 return desc_refilled; 528 } 529 530 static inline u32 531 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) 532 { 533 u32 buf_cnt = 0; 534 535 while (total_len > (buf_size * buf_cnt)) 536 buf_cnt++; 537 return buf_cnt; 538 } 539 540 static int 541 octeon_droq_dispatch_pkt(struct octeon_device *oct, 542 struct octeon_droq *droq, 543 union octeon_rh *rh, 544 struct octeon_droq_info *info) 545 { 546 u32 cnt; 547 octeon_dispatch_fn_t disp_fn; 548 struct octeon_recv_info *rinfo; 549 550 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); 551 552 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, 553 (u16)rh->r.subcode); 554 if (disp_fn) { 555 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); 556 if (rinfo) { 557 struct __dispatch *rdisp = rinfo->rsvd; 558 559 rdisp->rinfo = rinfo; 560 rdisp->disp_fn = disp_fn; 561 rinfo->recv_pkt->rh = *rh; 562 list_add_tail(&rdisp->list, 563 &droq->dispatch_list); 564 } else { 565 droq->stats.dropped_nomem++; 566 } 567 } else { 568 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", 569 (unsigned int)rh->r.opcode, 570 (unsigned int)rh->r.subcode); 571 droq->stats.dropped_nodispatch++; 572 } 573 574 return cnt; 575 } 576 577 static inline void octeon_droq_drop_packets(struct octeon_device *oct, 578 struct octeon_droq *droq, 579 u32 cnt) 580 { 581 u32 i = 0, buf_cnt; 582 struct octeon_droq_info *info; 583 584 for (i = 0; i < cnt; i++) { 585 info = &droq->info_list[droq->read_idx]; 586 octeon_swap_8B_data((u64 *)info, 2); 587 588 if (info->length) { 589 info->length -= OCT_RH_SIZE; 590 droq->stats.bytes_received += info->length; 591 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, 592 (u32)info->length); 593 } else { 594 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); 595 buf_cnt = 1; 596 } 597 598 INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); 599 droq->refill_count += buf_cnt; 600 } 601 } 602 603 static u32 604 octeon_droq_fast_process_packets(struct octeon_device *oct, 605 struct octeon_droq *droq, 606 u32 pkts_to_process) 607 { 608 struct octeon_droq_info *info; 609 union octeon_rh *rh; 610 u32 pkt, total_len = 0, pkt_count; 611 612 pkt_count = pkts_to_process; 613 614 for (pkt = 0; pkt < pkt_count; pkt++) { 615 u32 pkt_len = 0; 616 struct sk_buff *nicbuf = NULL; 617 struct octeon_skb_page_info *pg_info; 618 void *buf; 619 620 info = &droq->info_list[droq->read_idx]; 621 octeon_swap_8B_data((u64 *)info, 2); 622 623 if (!info->length) { 624 dev_err(&oct->pci_dev->dev, 625 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", 626 droq->q_no, droq->read_idx, pkt_count); 627 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, 628 (u8 *)info, 629 OCT_DROQ_INFO_SIZE); 630 break; 631 } 632 633 /* Len of resp hdr in included in the received data len. */ 634 info->length -= OCT_RH_SIZE; 635 rh = &info->rh; 636 637 total_len += (u32)info->length; 638 if (OPCODE_SLOW_PATH(rh)) { 639 u32 buf_cnt; 640 641 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); 642 INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); 643 droq->refill_count += buf_cnt; 644 } else { 645 if (info->length <= droq->buffer_size) { 646 pkt_len = (u32)info->length; 647 nicbuf = droq->recv_buf_list[ 648 droq->read_idx].buffer; 649 pg_info = &droq->recv_buf_list[ 650 droq->read_idx].pg_info; 651 if (recv_buffer_recycle(oct, pg_info)) 652 pg_info->page = NULL; 653 droq->recv_buf_list[droq->read_idx].buffer = 654 NULL; 655 656 INCR_INDEX_BY1(droq->read_idx, droq->max_count); 657 droq->refill_count++; 658 } else { 659 nicbuf = octeon_fast_packet_alloc((u32) 660 info->length); 661 pkt_len = 0; 662 /* nicbuf allocation can fail. We'll handle it 663 * inside the loop. 664 */ 665 while (pkt_len < info->length) { 666 int cpy_len, idx = droq->read_idx; 667 668 cpy_len = ((pkt_len + droq->buffer_size) 669 > info->length) ? 670 ((u32)info->length - pkt_len) : 671 droq->buffer_size; 672 673 if (nicbuf) { 674 octeon_fast_packet_next(droq, 675 nicbuf, 676 cpy_len, 677 idx); 678 buf = droq->recv_buf_list[idx]. 679 buffer; 680 recv_buffer_fast_free(buf); 681 droq->recv_buf_list[idx].buffer 682 = NULL; 683 } else { 684 droq->stats.rx_alloc_failure++; 685 } 686 687 pkt_len += cpy_len; 688 INCR_INDEX_BY1(droq->read_idx, 689 droq->max_count); 690 droq->refill_count++; 691 } 692 } 693 694 if (nicbuf) { 695 if (droq->ops.fptr) { 696 droq->ops.fptr(oct->octeon_id, 697 nicbuf, pkt_len, 698 rh, &droq->napi, 699 droq->ops.farg); 700 } else { 701 recv_buffer_free(nicbuf); 702 } 703 } 704 } 705 706 if (droq->refill_count >= droq->refill_threshold) { 707 int desc_refilled = octeon_droq_refill(oct, droq); 708 709 /* Flush the droq descriptor data to memory to be sure 710 * that when we update the credits the data in memory 711 * is accurate. 712 */ 713 wmb(); 714 writel((desc_refilled), droq->pkts_credit_reg); 715 /* make sure mmio write completes */ 716 mmiowb(); 717 } 718 719 } /* for (each packet)... */ 720 721 /* Increment refill_count by the number of buffers processed. */ 722 droq->stats.pkts_received += pkt; 723 droq->stats.bytes_received += total_len; 724 725 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { 726 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); 727 728 droq->stats.dropped_toomany += (pkts_to_process - pkt); 729 return pkts_to_process; 730 } 731 732 return pkt; 733 } 734 735 int 736 octeon_droq_process_packets(struct octeon_device *oct, 737 struct octeon_droq *droq, 738 u32 budget) 739 { 740 u32 pkt_count = 0, pkts_processed = 0; 741 struct list_head *tmp, *tmp2; 742 743 /* Grab the droq lock */ 744 spin_lock(&droq->lock); 745 746 octeon_droq_check_hw_for_pkts(droq); 747 pkt_count = atomic_read(&droq->pkts_pending); 748 749 if (!pkt_count) { 750 spin_unlock(&droq->lock); 751 return 0; 752 } 753 754 if (pkt_count > budget) 755 pkt_count = budget; 756 757 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); 758 759 atomic_sub(pkts_processed, &droq->pkts_pending); 760 761 /* Release the spin lock */ 762 spin_unlock(&droq->lock); 763 764 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 765 struct __dispatch *rdisp = (struct __dispatch *)tmp; 766 767 list_del(tmp); 768 rdisp->disp_fn(rdisp->rinfo, 769 octeon_get_dispatch_arg 770 (oct, 771 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 772 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 773 } 774 775 /* If there are packets pending. schedule tasklet again */ 776 if (atomic_read(&droq->pkts_pending)) 777 return 1; 778 779 return 0; 780 } 781 782 /** 783 * Utility function to poll for packets. check_hw_for_packets must be 784 * called before calling this routine. 785 */ 786 787 static int 788 octeon_droq_process_poll_pkts(struct octeon_device *oct, 789 struct octeon_droq *droq, u32 budget) 790 { 791 struct list_head *tmp, *tmp2; 792 u32 pkts_available = 0, pkts_processed = 0; 793 u32 total_pkts_processed = 0; 794 795 if (budget > droq->max_count) 796 budget = droq->max_count; 797 798 spin_lock(&droq->lock); 799 800 while (total_pkts_processed < budget) { 801 octeon_droq_check_hw_for_pkts(droq); 802 803 pkts_available = 804 CVM_MIN((budget - total_pkts_processed), 805 (u32)(atomic_read(&droq->pkts_pending))); 806 807 if (pkts_available == 0) 808 break; 809 810 pkts_processed = 811 octeon_droq_fast_process_packets(oct, droq, 812 pkts_available); 813 814 atomic_sub(pkts_processed, &droq->pkts_pending); 815 816 total_pkts_processed += pkts_processed; 817 } 818 819 spin_unlock(&droq->lock); 820 821 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 822 struct __dispatch *rdisp = (struct __dispatch *)tmp; 823 824 list_del(tmp); 825 rdisp->disp_fn(rdisp->rinfo, 826 octeon_get_dispatch_arg 827 (oct, 828 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 829 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 830 } 831 832 return total_pkts_processed; 833 } 834 835 int 836 octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, 837 u32 arg) 838 { 839 struct octeon_droq *droq; 840 841 droq = oct->droq[q_no]; 842 843 if (cmd == POLL_EVENT_PROCESS_PKTS) 844 return octeon_droq_process_poll_pkts(oct, droq, arg); 845 846 if (cmd == POLL_EVENT_PENDING_PKTS) { 847 u32 pkt_cnt = atomic_read(&droq->pkts_pending); 848 849 return octeon_droq_process_packets(oct, droq, pkt_cnt); 850 } 851 852 if (cmd == POLL_EVENT_ENABLE_INTR) { 853 u32 value; 854 unsigned long flags; 855 856 /* Enable Pkt Interrupt */ 857 switch (oct->chip_id) { 858 case OCTEON_CN66XX: 859 case OCTEON_CN68XX: { 860 struct octeon_cn6xxx *cn6xxx = 861 (struct octeon_cn6xxx *)oct->chip; 862 spin_lock_irqsave 863 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 864 value = 865 octeon_read_csr(oct, 866 CN6XXX_SLI_PKT_TIME_INT_ENB); 867 value |= (1 << q_no); 868 octeon_write_csr(oct, 869 CN6XXX_SLI_PKT_TIME_INT_ENB, 870 value); 871 value = 872 octeon_read_csr(oct, 873 CN6XXX_SLI_PKT_CNT_INT_ENB); 874 value |= (1 << q_no); 875 octeon_write_csr(oct, 876 CN6XXX_SLI_PKT_CNT_INT_ENB, 877 value); 878 879 /* don't bother flushing the enables */ 880 881 spin_unlock_irqrestore 882 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 883 return 0; 884 } 885 break; 886 case OCTEON_CN23XX_PF_VID: { 887 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 888 } 889 break; 890 } 891 return 0; 892 } 893 894 dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); 895 return -EINVAL; 896 } 897 898 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, 899 struct octeon_droq_ops *ops) 900 { 901 struct octeon_droq *droq; 902 unsigned long flags; 903 struct octeon_config *oct_cfg = NULL; 904 905 oct_cfg = octeon_get_conf(oct); 906 907 if (!oct_cfg) 908 return -EINVAL; 909 910 if (!(ops)) { 911 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", 912 __func__); 913 return -EINVAL; 914 } 915 916 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 917 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 918 __func__, q_no, (oct->num_oqs - 1)); 919 return -EINVAL; 920 } 921 922 droq = oct->droq[q_no]; 923 924 spin_lock_irqsave(&droq->lock, flags); 925 926 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); 927 928 spin_unlock_irqrestore(&droq->lock, flags); 929 930 return 0; 931 } 932 933 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) 934 { 935 unsigned long flags; 936 struct octeon_droq *droq; 937 struct octeon_config *oct_cfg = NULL; 938 939 oct_cfg = octeon_get_conf(oct); 940 941 if (!oct_cfg) 942 return -EINVAL; 943 944 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 945 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 946 __func__, q_no, oct->num_oqs - 1); 947 return -EINVAL; 948 } 949 950 droq = oct->droq[q_no]; 951 952 if (!droq) { 953 dev_info(&oct->pci_dev->dev, 954 "Droq id (%d) not available.\n", q_no); 955 return 0; 956 } 957 958 spin_lock_irqsave(&droq->lock, flags); 959 960 droq->ops.fptr = NULL; 961 droq->ops.farg = NULL; 962 droq->ops.drop_on_max = 0; 963 964 spin_unlock_irqrestore(&droq->lock, flags); 965 966 return 0; 967 } 968 969 int octeon_create_droq(struct octeon_device *oct, 970 u32 q_no, u32 num_descs, 971 u32 desc_size, void *app_ctx) 972 { 973 struct octeon_droq *droq; 974 int numa_node = cpu_to_node(q_no % num_online_cpus()); 975 976 if (oct->droq[q_no]) { 977 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 978 q_no); 979 return 1; 980 } 981 982 /* Allocate the DS for the new droq. */ 983 droq = vmalloc_node(sizeof(*droq), numa_node); 984 if (!droq) 985 droq = vmalloc(sizeof(*droq)); 986 if (!droq) 987 return -1; 988 989 memset(droq, 0, sizeof(struct octeon_droq)); 990 991 /*Disable the pkt o/p for this Q */ 992 octeon_set_droq_pkt_op(oct, q_no, 0); 993 oct->droq[q_no] = droq; 994 995 /* Initialize the Droq */ 996 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { 997 vfree(oct->droq[q_no]); 998 oct->droq[q_no] = NULL; 999 return -1; 1000 } 1001 1002 oct->num_oqs++; 1003 1004 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, 1005 oct->num_oqs); 1006 1007 /* Global Droq register settings */ 1008 1009 /* As of now not required, as setting are done for all 32 Droqs at 1010 * the same time. 1011 */ 1012 return 0; 1013 } 1014