1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include <linux/vmalloc.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn66xx_regs.h" 29 #include "cn66xx_device.h" 30 #include "cn23xx_pf_device.h" 31 #include "cn23xx_vf_device.h" 32 33 struct niclist { 34 struct list_head list; 35 void *ptr; 36 }; 37 38 struct __dispatch { 39 struct list_head list; 40 struct octeon_recv_info *rinfo; 41 octeon_dispatch_fn_t disp_fn; 42 }; 43 44 /** Get the argument that the user set when registering dispatch 45 * function for a given opcode/subcode. 46 * @param octeon_dev - the octeon device pointer. 47 * @param opcode - the opcode for which the dispatch argument 48 * is to be checked. 49 * @param subcode - the subcode for which the dispatch argument 50 * is to be checked. 51 * @return Success: void * (argument to the dispatch function) 52 * @return Failure: NULL 53 * 54 */ 55 static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, 56 u16 opcode, u16 subcode) 57 { 58 int idx; 59 struct list_head *dispatch; 60 void *fn_arg = NULL; 61 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 62 63 idx = combined_opcode & OCTEON_OPCODE_MASK; 64 65 spin_lock_bh(&octeon_dev->dispatch.lock); 66 67 if (octeon_dev->dispatch.count == 0) { 68 spin_unlock_bh(&octeon_dev->dispatch.lock); 69 return NULL; 70 } 71 72 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 73 fn_arg = octeon_dev->dispatch.dlist[idx].arg; 74 } else { 75 list_for_each(dispatch, 76 &octeon_dev->dispatch.dlist[idx].list) { 77 if (((struct octeon_dispatch *)dispatch)->opcode == 78 combined_opcode) { 79 fn_arg = ((struct octeon_dispatch *) 80 dispatch)->arg; 81 break; 82 } 83 } 84 } 85 86 spin_unlock_bh(&octeon_dev->dispatch.lock); 87 return fn_arg; 88 } 89 90 /** Check for packets on Droq. This function should be called with lock held. 91 * @param droq - Droq on which count is checked. 92 * @return Returns packet count. 93 */ 94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) 95 { 96 u32 pkt_count = 0; 97 u32 last_count; 98 99 pkt_count = readl(droq->pkts_sent_reg); 100 101 last_count = pkt_count - droq->pkt_count; 102 droq->pkt_count = pkt_count; 103 104 /* we shall write to cnts at napi irq enable or end of droq tasklet */ 105 if (last_count) 106 atomic_add(last_count, &droq->pkts_pending); 107 108 return last_count; 109 } 110 111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) 112 { 113 u32 count = 0; 114 115 /* max_empty_descs is the max. no. of descs that can have no buffers. 116 * If the empty desc count goes beyond this value, we cannot safely 117 * read in a 64K packet sent by Octeon 118 * (64K is max pkt size from Octeon) 119 */ 120 droq->max_empty_descs = 0; 121 122 do { 123 droq->max_empty_descs++; 124 count += droq->buffer_size; 125 } while (count < (64 * 1024)); 126 127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; 128 } 129 130 static void octeon_droq_reset_indices(struct octeon_droq *droq) 131 { 132 droq->read_idx = 0; 133 droq->write_idx = 0; 134 droq->refill_idx = 0; 135 droq->refill_count = 0; 136 atomic_set(&droq->pkts_pending, 0); 137 } 138 139 static void 140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, 141 struct octeon_droq *droq) 142 { 143 u32 i; 144 struct octeon_skb_page_info *pg_info; 145 146 for (i = 0; i < droq->max_count; i++) { 147 pg_info = &droq->recv_buf_list[i].pg_info; 148 149 if (pg_info->dma) 150 lio_unmap_ring(oct->pci_dev, 151 (u64)pg_info->dma); 152 pg_info->dma = 0; 153 154 if (pg_info->page) 155 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 156 pg_info); 157 158 droq->recv_buf_list[i].buffer = NULL; 159 } 160 161 octeon_droq_reset_indices(droq); 162 } 163 164 static int 165 octeon_droq_setup_ring_buffers(struct octeon_device *oct, 166 struct octeon_droq *droq) 167 { 168 u32 i; 169 void *buf; 170 struct octeon_droq_desc *desc_ring = droq->desc_ring; 171 172 for (i = 0; i < droq->max_count; i++) { 173 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); 174 175 if (!buf) { 176 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", 177 __func__); 178 droq->stats.rx_alloc_failure++; 179 return -ENOMEM; 180 } 181 182 droq->recv_buf_list[i].buffer = buf; 183 droq->recv_buf_list[i].data = get_rbd(buf); 184 droq->info_list[i].length = 0; 185 186 /* map ring buffers into memory */ 187 desc_ring[i].info_ptr = lio_map_ring_info(droq, i); 188 desc_ring[i].buffer_ptr = 189 lio_map_ring(droq->recv_buf_list[i].buffer); 190 } 191 192 octeon_droq_reset_indices(droq); 193 194 octeon_droq_compute_max_packet_bufs(droq); 195 196 return 0; 197 } 198 199 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) 200 { 201 struct octeon_droq *droq = oct->droq[q_no]; 202 203 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 204 205 octeon_droq_destroy_ring_buffers(oct, droq); 206 vfree(droq->recv_buf_list); 207 208 if (droq->info_base_addr) 209 lio_free_info_buffer(oct, droq); 210 211 if (droq->desc_ring) 212 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 213 droq->desc_ring, droq->desc_ring_dma); 214 215 memset(droq, 0, OCT_DROQ_SIZE); 216 217 return 0; 218 } 219 220 int octeon_init_droq(struct octeon_device *oct, 221 u32 q_no, 222 u32 num_descs, 223 u32 desc_size, 224 void *app_ctx) 225 { 226 struct octeon_droq *droq; 227 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; 228 u32 c_pkts_per_intr = 0, c_refill_threshold = 0; 229 int orig_node = dev_to_node(&oct->pci_dev->dev); 230 int numa_node = cpu_to_node(q_no % num_online_cpus()); 231 232 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 233 234 droq = oct->droq[q_no]; 235 memset(droq, 0, OCT_DROQ_SIZE); 236 237 droq->oct_dev = oct; 238 droq->q_no = q_no; 239 if (app_ctx) 240 droq->app_ctx = app_ctx; 241 else 242 droq->app_ctx = (void *)(size_t)q_no; 243 244 c_num_descs = num_descs; 245 c_buf_size = desc_size; 246 if (OCTEON_CN6XXX(oct)) { 247 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); 248 249 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); 250 c_refill_threshold = 251 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); 252 } else if (OCTEON_CN23XX_PF(oct)) { 253 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); 254 255 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 256 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 257 } else if (OCTEON_CN23XX_VF(oct)) { 258 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf); 259 260 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); 261 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); 262 } else { 263 return 1; 264 } 265 266 droq->max_count = c_num_descs; 267 droq->buffer_size = c_buf_size; 268 269 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; 270 set_dev_node(&oct->pci_dev->dev, numa_node); 271 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 272 (dma_addr_t *)&droq->desc_ring_dma); 273 set_dev_node(&oct->pci_dev->dev, orig_node); 274 if (!droq->desc_ring) 275 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, 276 (dma_addr_t *)&droq->desc_ring_dma); 277 278 if (!droq->desc_ring) { 279 dev_err(&oct->pci_dev->dev, 280 "Output queue %d ring alloc failed\n", q_no); 281 return 1; 282 } 283 284 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", 285 q_no, droq->desc_ring, droq->desc_ring_dma); 286 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 287 droq->max_count); 288 289 droq->info_list = lio_alloc_info_buffer(oct, droq); 290 if (!droq->info_list) { 291 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 292 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 293 droq->desc_ring, droq->desc_ring_dma); 294 return 1; 295 } 296 297 droq->recv_buf_list = (struct octeon_recv_buffer *) 298 vmalloc_node(droq->max_count * 299 OCT_DROQ_RECVBUF_SIZE, 300 numa_node); 301 if (!droq->recv_buf_list) 302 droq->recv_buf_list = (struct octeon_recv_buffer *) 303 vmalloc(droq->max_count * 304 OCT_DROQ_RECVBUF_SIZE); 305 if (!droq->recv_buf_list) { 306 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); 307 goto init_droq_fail; 308 } 309 310 if (octeon_droq_setup_ring_buffers(oct, droq)) 311 goto init_droq_fail; 312 313 droq->pkts_per_intr = c_pkts_per_intr; 314 droq->refill_threshold = c_refill_threshold; 315 316 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", 317 droq->max_empty_descs); 318 319 spin_lock_init(&droq->lock); 320 321 INIT_LIST_HEAD(&droq->dispatch_list); 322 323 /* For 56xx Pass1, this function won't be called, so no checks. */ 324 oct->fn_list.setup_oq_regs(oct, q_no); 325 326 oct->io_qmask.oq |= BIT_ULL(q_no); 327 328 return 0; 329 330 init_droq_fail: 331 octeon_delete_droq(oct, q_no); 332 return 1; 333 } 334 335 /* octeon_create_recv_info 336 * Parameters: 337 * octeon_dev - pointer to the octeon device structure 338 * droq - droq in which the packet arrived. 339 * buf_cnt - no. of buffers used by the packet. 340 * idx - index in the descriptor for the first buffer in the packet. 341 * Description: 342 * Allocates a recv_info_t and copies the buffer addresses for packet data 343 * into the recv_pkt space which starts at an 8B offset from recv_info_t. 344 * Flags the descriptors for refill later. If available descriptors go 345 * below the threshold to receive a 64K pkt, new buffers are first allocated 346 * before the recv_pkt_t is created. 347 * This routine will be called in interrupt context. 348 * Returns: 349 * Success: Pointer to recv_info_t 350 * Failure: NULL. 351 * Locks: 352 * The droq->lock is held when this routine is called. 353 */ 354 static inline struct octeon_recv_info *octeon_create_recv_info( 355 struct octeon_device *octeon_dev, 356 struct octeon_droq *droq, 357 u32 buf_cnt, 358 u32 idx) 359 { 360 struct octeon_droq_info *info; 361 struct octeon_recv_pkt *recv_pkt; 362 struct octeon_recv_info *recv_info; 363 u32 i, bytes_left; 364 struct octeon_skb_page_info *pg_info; 365 366 info = &droq->info_list[idx]; 367 368 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); 369 if (!recv_info) 370 return NULL; 371 372 recv_pkt = recv_info->recv_pkt; 373 recv_pkt->rh = info->rh; 374 recv_pkt->length = (u32)info->length; 375 recv_pkt->buffer_count = (u16)buf_cnt; 376 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; 377 378 i = 0; 379 bytes_left = (u32)info->length; 380 381 while (buf_cnt) { 382 { 383 pg_info = &droq->recv_buf_list[idx].pg_info; 384 385 lio_unmap_ring(octeon_dev->pci_dev, 386 (u64)pg_info->dma); 387 pg_info->page = NULL; 388 pg_info->dma = 0; 389 } 390 391 recv_pkt->buffer_size[i] = 392 (bytes_left >= 393 droq->buffer_size) ? droq->buffer_size : bytes_left; 394 395 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; 396 droq->recv_buf_list[idx].buffer = NULL; 397 398 idx = incr_index(idx, 1, droq->max_count); 399 bytes_left -= droq->buffer_size; 400 i++; 401 buf_cnt--; 402 } 403 404 return recv_info; 405 } 406 407 /* If we were not able to refill all buffers, try to move around 408 * the buffers that were not dispatched. 409 */ 410 static inline u32 411 octeon_droq_refill_pullup_descs(struct octeon_droq *droq, 412 struct octeon_droq_desc *desc_ring) 413 { 414 u32 desc_refilled = 0; 415 416 u32 refill_index = droq->refill_idx; 417 418 while (refill_index != droq->read_idx) { 419 if (droq->recv_buf_list[refill_index].buffer) { 420 droq->recv_buf_list[droq->refill_idx].buffer = 421 droq->recv_buf_list[refill_index].buffer; 422 droq->recv_buf_list[droq->refill_idx].data = 423 droq->recv_buf_list[refill_index].data; 424 desc_ring[droq->refill_idx].buffer_ptr = 425 desc_ring[refill_index].buffer_ptr; 426 droq->recv_buf_list[refill_index].buffer = NULL; 427 desc_ring[refill_index].buffer_ptr = 0; 428 do { 429 droq->refill_idx = incr_index(droq->refill_idx, 430 1, 431 droq->max_count); 432 desc_refilled++; 433 droq->refill_count--; 434 } while (droq->recv_buf_list[droq->refill_idx]. 435 buffer); 436 } 437 refill_index = incr_index(refill_index, 1, droq->max_count); 438 } /* while */ 439 return desc_refilled; 440 } 441 442 /* octeon_droq_refill 443 * Parameters: 444 * droq - droq in which descriptors require new buffers. 445 * Description: 446 * Called during normal DROQ processing in interrupt mode or by the poll 447 * thread to refill the descriptors from which buffers were dispatched 448 * to upper layers. Attempts to allocate new buffers. If that fails, moves 449 * up buffers (that were not dispatched) to form a contiguous ring. 450 * Returns: 451 * No of descriptors refilled. 452 * Locks: 453 * This routine is called with droq->lock held. 454 */ 455 static u32 456 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) 457 { 458 struct octeon_droq_desc *desc_ring; 459 void *buf = NULL; 460 u8 *data; 461 u32 desc_refilled = 0; 462 struct octeon_skb_page_info *pg_info; 463 464 desc_ring = droq->desc_ring; 465 466 while (droq->refill_count && (desc_refilled < droq->max_count)) { 467 /* If a valid buffer exists (happens if there is no dispatch), 468 * reuse 469 * the buffer, else allocate. 470 */ 471 if (!droq->recv_buf_list[droq->refill_idx].buffer) { 472 pg_info = 473 &droq->recv_buf_list[droq->refill_idx].pg_info; 474 /* Either recycle the existing pages or go for 475 * new page alloc 476 */ 477 if (pg_info->page) 478 buf = recv_buffer_reuse(octeon_dev, pg_info); 479 else 480 buf = recv_buffer_alloc(octeon_dev, pg_info); 481 /* If a buffer could not be allocated, no point in 482 * continuing 483 */ 484 if (!buf) { 485 droq->stats.rx_alloc_failure++; 486 break; 487 } 488 droq->recv_buf_list[droq->refill_idx].buffer = 489 buf; 490 data = get_rbd(buf); 491 } else { 492 data = get_rbd(droq->recv_buf_list 493 [droq->refill_idx].buffer); 494 } 495 496 droq->recv_buf_list[droq->refill_idx].data = data; 497 498 desc_ring[droq->refill_idx].buffer_ptr = 499 lio_map_ring(droq->recv_buf_list[droq-> 500 refill_idx].buffer); 501 /* Reset any previous values in the length field. */ 502 droq->info_list[droq->refill_idx].length = 0; 503 504 droq->refill_idx = incr_index(droq->refill_idx, 1, 505 droq->max_count); 506 desc_refilled++; 507 droq->refill_count--; 508 } 509 510 if (droq->refill_count) 511 desc_refilled += 512 octeon_droq_refill_pullup_descs(droq, desc_ring); 513 514 /* if droq->refill_count 515 * The refill count would not change in pass two. We only moved buffers 516 * to close the gap in the ring, but we would still have the same no. of 517 * buffers to refill. 518 */ 519 return desc_refilled; 520 } 521 522 static inline u32 523 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) 524 { 525 u32 buf_cnt = 0; 526 527 while (total_len > (buf_size * buf_cnt)) 528 buf_cnt++; 529 return buf_cnt; 530 } 531 532 static int 533 octeon_droq_dispatch_pkt(struct octeon_device *oct, 534 struct octeon_droq *droq, 535 union octeon_rh *rh, 536 struct octeon_droq_info *info) 537 { 538 u32 cnt; 539 octeon_dispatch_fn_t disp_fn; 540 struct octeon_recv_info *rinfo; 541 542 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); 543 544 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, 545 (u16)rh->r.subcode); 546 if (disp_fn) { 547 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); 548 if (rinfo) { 549 struct __dispatch *rdisp = rinfo->rsvd; 550 551 rdisp->rinfo = rinfo; 552 rdisp->disp_fn = disp_fn; 553 rinfo->recv_pkt->rh = *rh; 554 list_add_tail(&rdisp->list, 555 &droq->dispatch_list); 556 } else { 557 droq->stats.dropped_nomem++; 558 } 559 } else { 560 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", 561 (unsigned int)rh->r.opcode, 562 (unsigned int)rh->r.subcode); 563 droq->stats.dropped_nodispatch++; 564 } 565 566 return cnt; 567 } 568 569 static inline void octeon_droq_drop_packets(struct octeon_device *oct, 570 struct octeon_droq *droq, 571 u32 cnt) 572 { 573 u32 i = 0, buf_cnt; 574 struct octeon_droq_info *info; 575 576 for (i = 0; i < cnt; i++) { 577 info = &droq->info_list[droq->read_idx]; 578 octeon_swap_8B_data((u64 *)info, 2); 579 580 if (info->length) { 581 info->length -= OCT_RH_SIZE; 582 droq->stats.bytes_received += info->length; 583 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, 584 (u32)info->length); 585 } else { 586 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); 587 buf_cnt = 1; 588 } 589 590 droq->read_idx = incr_index(droq->read_idx, buf_cnt, 591 droq->max_count); 592 droq->refill_count += buf_cnt; 593 } 594 } 595 596 static u32 597 octeon_droq_fast_process_packets(struct octeon_device *oct, 598 struct octeon_droq *droq, 599 u32 pkts_to_process) 600 { 601 struct octeon_droq_info *info; 602 union octeon_rh *rh; 603 u32 pkt, total_len = 0, pkt_count; 604 605 pkt_count = pkts_to_process; 606 607 for (pkt = 0; pkt < pkt_count; pkt++) { 608 u32 pkt_len = 0; 609 struct sk_buff *nicbuf = NULL; 610 struct octeon_skb_page_info *pg_info; 611 void *buf; 612 613 info = &droq->info_list[droq->read_idx]; 614 octeon_swap_8B_data((u64 *)info, 2); 615 616 if (!info->length) { 617 dev_err(&oct->pci_dev->dev, 618 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", 619 droq->q_no, droq->read_idx, pkt_count); 620 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, 621 (u8 *)info, 622 OCT_DROQ_INFO_SIZE); 623 break; 624 } 625 626 /* Len of resp hdr in included in the received data len. */ 627 info->length -= OCT_RH_SIZE; 628 rh = &info->rh; 629 630 total_len += (u32)info->length; 631 if (opcode_slow_path(rh)) { 632 u32 buf_cnt; 633 634 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); 635 droq->read_idx = incr_index(droq->read_idx, 636 buf_cnt, droq->max_count); 637 droq->refill_count += buf_cnt; 638 } else { 639 if (info->length <= droq->buffer_size) { 640 pkt_len = (u32)info->length; 641 nicbuf = droq->recv_buf_list[ 642 droq->read_idx].buffer; 643 pg_info = &droq->recv_buf_list[ 644 droq->read_idx].pg_info; 645 if (recv_buffer_recycle(oct, pg_info)) 646 pg_info->page = NULL; 647 droq->recv_buf_list[droq->read_idx].buffer = 648 NULL; 649 650 droq->read_idx = incr_index(droq->read_idx, 1, 651 droq->max_count); 652 droq->refill_count++; 653 } else { 654 nicbuf = octeon_fast_packet_alloc((u32) 655 info->length); 656 pkt_len = 0; 657 /* nicbuf allocation can fail. We'll handle it 658 * inside the loop. 659 */ 660 while (pkt_len < info->length) { 661 int cpy_len, idx = droq->read_idx; 662 663 cpy_len = ((pkt_len + droq->buffer_size) 664 > info->length) ? 665 ((u32)info->length - pkt_len) : 666 droq->buffer_size; 667 668 if (nicbuf) { 669 octeon_fast_packet_next(droq, 670 nicbuf, 671 cpy_len, 672 idx); 673 buf = droq->recv_buf_list[idx]. 674 buffer; 675 recv_buffer_fast_free(buf); 676 droq->recv_buf_list[idx].buffer 677 = NULL; 678 } else { 679 droq->stats.rx_alloc_failure++; 680 } 681 682 pkt_len += cpy_len; 683 droq->read_idx = 684 incr_index(droq->read_idx, 1, 685 droq->max_count); 686 droq->refill_count++; 687 } 688 } 689 690 if (nicbuf) { 691 if (droq->ops.fptr) { 692 droq->ops.fptr(oct->octeon_id, 693 nicbuf, pkt_len, 694 rh, &droq->napi, 695 droq->ops.farg); 696 } else { 697 recv_buffer_free(nicbuf); 698 } 699 } 700 } 701 702 if (droq->refill_count >= droq->refill_threshold) { 703 int desc_refilled = octeon_droq_refill(oct, droq); 704 705 /* Flush the droq descriptor data to memory to be sure 706 * that when we update the credits the data in memory 707 * is accurate. 708 */ 709 wmb(); 710 writel((desc_refilled), droq->pkts_credit_reg); 711 /* make sure mmio write completes */ 712 mmiowb(); 713 } 714 715 } /* for (each packet)... */ 716 717 /* Increment refill_count by the number of buffers processed. */ 718 droq->stats.pkts_received += pkt; 719 droq->stats.bytes_received += total_len; 720 721 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { 722 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); 723 724 droq->stats.dropped_toomany += (pkts_to_process - pkt); 725 return pkts_to_process; 726 } 727 728 return pkt; 729 } 730 731 int 732 octeon_droq_process_packets(struct octeon_device *oct, 733 struct octeon_droq *droq, 734 u32 budget) 735 { 736 u32 pkt_count = 0, pkts_processed = 0; 737 struct list_head *tmp, *tmp2; 738 739 /* Grab the droq lock */ 740 spin_lock(&droq->lock); 741 742 octeon_droq_check_hw_for_pkts(droq); 743 pkt_count = atomic_read(&droq->pkts_pending); 744 745 if (!pkt_count) { 746 spin_unlock(&droq->lock); 747 return 0; 748 } 749 750 if (pkt_count > budget) 751 pkt_count = budget; 752 753 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); 754 755 atomic_sub(pkts_processed, &droq->pkts_pending); 756 757 /* Release the spin lock */ 758 spin_unlock(&droq->lock); 759 760 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 761 struct __dispatch *rdisp = (struct __dispatch *)tmp; 762 763 list_del(tmp); 764 rdisp->disp_fn(rdisp->rinfo, 765 octeon_get_dispatch_arg 766 (oct, 767 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 768 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 769 } 770 771 /* If there are packets pending. schedule tasklet again */ 772 if (atomic_read(&droq->pkts_pending)) 773 return 1; 774 775 return 0; 776 } 777 778 /** 779 * Utility function to poll for packets. check_hw_for_packets must be 780 * called before calling this routine. 781 */ 782 783 static int 784 octeon_droq_process_poll_pkts(struct octeon_device *oct, 785 struct octeon_droq *droq, u32 budget) 786 { 787 struct list_head *tmp, *tmp2; 788 u32 pkts_available = 0, pkts_processed = 0; 789 u32 total_pkts_processed = 0; 790 791 if (budget > droq->max_count) 792 budget = droq->max_count; 793 794 spin_lock(&droq->lock); 795 796 while (total_pkts_processed < budget) { 797 octeon_droq_check_hw_for_pkts(droq); 798 799 pkts_available = min((budget - total_pkts_processed), 800 (u32)(atomic_read(&droq->pkts_pending))); 801 802 if (pkts_available == 0) 803 break; 804 805 pkts_processed = 806 octeon_droq_fast_process_packets(oct, droq, 807 pkts_available); 808 809 atomic_sub(pkts_processed, &droq->pkts_pending); 810 811 total_pkts_processed += pkts_processed; 812 } 813 814 spin_unlock(&droq->lock); 815 816 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { 817 struct __dispatch *rdisp = (struct __dispatch *)tmp; 818 819 list_del(tmp); 820 rdisp->disp_fn(rdisp->rinfo, 821 octeon_get_dispatch_arg 822 (oct, 823 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, 824 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); 825 } 826 827 return total_pkts_processed; 828 } 829 830 int 831 octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, 832 u32 arg) 833 { 834 struct octeon_droq *droq; 835 836 droq = oct->droq[q_no]; 837 838 if (cmd == POLL_EVENT_PROCESS_PKTS) 839 return octeon_droq_process_poll_pkts(oct, droq, arg); 840 841 if (cmd == POLL_EVENT_PENDING_PKTS) { 842 u32 pkt_cnt = atomic_read(&droq->pkts_pending); 843 844 return octeon_droq_process_packets(oct, droq, pkt_cnt); 845 } 846 847 if (cmd == POLL_EVENT_ENABLE_INTR) { 848 u32 value; 849 unsigned long flags; 850 851 /* Enable Pkt Interrupt */ 852 switch (oct->chip_id) { 853 case OCTEON_CN66XX: 854 case OCTEON_CN68XX: { 855 struct octeon_cn6xxx *cn6xxx = 856 (struct octeon_cn6xxx *)oct->chip; 857 spin_lock_irqsave 858 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 859 value = 860 octeon_read_csr(oct, 861 CN6XXX_SLI_PKT_TIME_INT_ENB); 862 value |= (1 << q_no); 863 octeon_write_csr(oct, 864 CN6XXX_SLI_PKT_TIME_INT_ENB, 865 value); 866 value = 867 octeon_read_csr(oct, 868 CN6XXX_SLI_PKT_CNT_INT_ENB); 869 value |= (1 << q_no); 870 octeon_write_csr(oct, 871 CN6XXX_SLI_PKT_CNT_INT_ENB, 872 value); 873 874 /* don't bother flushing the enables */ 875 876 spin_unlock_irqrestore 877 (&cn6xxx->lock_for_droq_int_enb_reg, flags); 878 return 0; 879 } 880 break; 881 case OCTEON_CN23XX_PF_VID: { 882 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 883 } 884 break; 885 886 case OCTEON_CN23XX_VF_VID: 887 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); 888 break; 889 } 890 return 0; 891 } 892 893 dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); 894 return -EINVAL; 895 } 896 897 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, 898 struct octeon_droq_ops *ops) 899 { 900 struct octeon_droq *droq; 901 unsigned long flags; 902 struct octeon_config *oct_cfg = NULL; 903 904 oct_cfg = octeon_get_conf(oct); 905 906 if (!oct_cfg) 907 return -EINVAL; 908 909 if (!(ops)) { 910 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", 911 __func__); 912 return -EINVAL; 913 } 914 915 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 916 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 917 __func__, q_no, (oct->num_oqs - 1)); 918 return -EINVAL; 919 } 920 921 droq = oct->droq[q_no]; 922 923 spin_lock_irqsave(&droq->lock, flags); 924 925 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); 926 927 spin_unlock_irqrestore(&droq->lock, flags); 928 929 return 0; 930 } 931 932 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) 933 { 934 unsigned long flags; 935 struct octeon_droq *droq; 936 struct octeon_config *oct_cfg = NULL; 937 938 oct_cfg = octeon_get_conf(oct); 939 940 if (!oct_cfg) 941 return -EINVAL; 942 943 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { 944 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", 945 __func__, q_no, oct->num_oqs - 1); 946 return -EINVAL; 947 } 948 949 droq = oct->droq[q_no]; 950 951 if (!droq) { 952 dev_info(&oct->pci_dev->dev, 953 "Droq id (%d) not available.\n", q_no); 954 return 0; 955 } 956 957 spin_lock_irqsave(&droq->lock, flags); 958 959 droq->ops.fptr = NULL; 960 droq->ops.farg = NULL; 961 droq->ops.drop_on_max = 0; 962 963 spin_unlock_irqrestore(&droq->lock, flags); 964 965 return 0; 966 } 967 968 int octeon_create_droq(struct octeon_device *oct, 969 u32 q_no, u32 num_descs, 970 u32 desc_size, void *app_ctx) 971 { 972 struct octeon_droq *droq; 973 int numa_node = cpu_to_node(q_no % num_online_cpus()); 974 975 if (oct->droq[q_no]) { 976 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", 977 q_no); 978 return 1; 979 } 980 981 /* Allocate the DS for the new droq. */ 982 droq = vmalloc_node(sizeof(*droq), numa_node); 983 if (!droq) 984 droq = vmalloc(sizeof(*droq)); 985 if (!droq) 986 return -1; 987 988 memset(droq, 0, sizeof(struct octeon_droq)); 989 990 /*Disable the pkt o/p for this Q */ 991 octeon_set_droq_pkt_op(oct, q_no, 0); 992 oct->droq[q_no] = droq; 993 994 /* Initialize the Droq */ 995 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { 996 vfree(oct->droq[q_no]); 997 oct->droq[q_no] = NULL; 998 return -1; 999 } 1000 1001 oct->num_oqs++; 1002 1003 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, 1004 oct->num_oqs); 1005 1006 /* Global Droq register settings */ 1007 1008 /* As of now not required, as setting are done for all 32 Droqs at 1009 * the same time. 1010 */ 1011 return 0; 1012 } 1013