1 /* 2 * Copyright(c) 2015-2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/delay.h> 49 #include "hfi.h" 50 #include "qp.h" 51 #include "trace.h" 52 53 #define SC(name) SEND_CTXT_##name 54 /* 55 * Send Context functions 56 */ 57 static void sc_wait_for_packet_egress(struct send_context *sc, int pause); 58 59 /* 60 * Set the CM reset bit and wait for it to clear. Use the provided 61 * sendctrl register. This routine has no locking. 62 */ 63 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl) 64 { 65 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); 66 while (1) { 67 udelay(1); 68 sendctrl = read_csr(dd, SEND_CTRL); 69 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0) 70 break; 71 } 72 } 73 74 /* defined in header release 48 and higher */ 75 #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT 76 #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3 77 #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull 78 #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \ 79 << SEND_CTRL_UNSUPPORTED_VL_SHIFT) 80 #endif 81 82 /* global control of PIO send */ 83 void pio_send_control(struct hfi1_devdata *dd, int op) 84 { 85 u64 reg, mask; 86 unsigned long flags; 87 int write = 1; /* write sendctrl back */ 88 int flush = 0; /* re-read sendctrl to make sure it is flushed */ 89 90 spin_lock_irqsave(&dd->sendctrl_lock, flags); 91 92 reg = read_csr(dd, SEND_CTRL); 93 switch (op) { 94 case PSC_GLOBAL_ENABLE: 95 reg |= SEND_CTRL_SEND_ENABLE_SMASK; 96 /* Fall through */ 97 case PSC_DATA_VL_ENABLE: 98 /* Disallow sending on VLs not enabled */ 99 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << 100 SEND_CTRL_UNSUPPORTED_VL_SHIFT; 101 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; 102 break; 103 case PSC_GLOBAL_DISABLE: 104 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK; 105 break; 106 case PSC_GLOBAL_VLARB_ENABLE: 107 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK; 108 break; 109 case PSC_GLOBAL_VLARB_DISABLE: 110 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK; 111 break; 112 case PSC_CM_RESET: 113 __cm_reset(dd, reg); 114 write = 0; /* CSR already written (and flushed) */ 115 break; 116 case PSC_DATA_VL_DISABLE: 117 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK; 118 flush = 1; 119 break; 120 default: 121 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); 122 break; 123 } 124 125 if (write) { 126 write_csr(dd, SEND_CTRL, reg); 127 if (flush) 128 (void)read_csr(dd, SEND_CTRL); /* flush write */ 129 } 130 131 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); 132 } 133 134 /* number of send context memory pools */ 135 #define NUM_SC_POOLS 2 136 137 /* Send Context Size (SCS) wildcards */ 138 #define SCS_POOL_0 -1 139 #define SCS_POOL_1 -2 140 141 /* Send Context Count (SCC) wildcards */ 142 #define SCC_PER_VL -1 143 #define SCC_PER_CPU -2 144 #define SCC_PER_KRCVQ -3 145 146 /* Send Context Size (SCS) constants */ 147 #define SCS_ACK_CREDITS 32 148 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */ 149 150 #define PIO_THRESHOLD_CEILING 4096 151 152 #define PIO_WAIT_BATCH_SIZE 5 153 154 /* default send context sizes */ 155 static struct sc_config_sizes sc_config_sizes[SC_MAX] = { 156 [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */ 157 .count = SCC_PER_VL }, /* one per NUMA */ 158 [SC_ACK] = { .size = SCS_ACK_CREDITS, 159 .count = SCC_PER_KRCVQ }, 160 [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */ 161 .count = SCC_PER_CPU }, /* one per CPU */ 162 [SC_VL15] = { .size = SCS_VL15_CREDITS, 163 .count = 1 }, 164 165 }; 166 167 /* send context memory pool configuration */ 168 struct mem_pool_config { 169 int centipercent; /* % of memory, in 100ths of 1% */ 170 int absolute_blocks; /* absolute block count */ 171 }; 172 173 /* default memory pool configuration: 100% in pool 0 */ 174 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = { 175 /* centi%, abs blocks */ 176 { 10000, -1 }, /* pool 0 */ 177 { 0, -1 }, /* pool 1 */ 178 }; 179 180 /* memory pool information, used when calculating final sizes */ 181 struct mem_pool_info { 182 int centipercent; /* 183 * 100th of 1% of memory to use, -1 if blocks 184 * already set 185 */ 186 int count; /* count of contexts in the pool */ 187 int blocks; /* block size of the pool */ 188 int size; /* context size, in blocks */ 189 }; 190 191 /* 192 * Convert a pool wildcard to a valid pool index. The wildcards 193 * start at -1 and increase negatively. Map them as: 194 * -1 => 0 195 * -2 => 1 196 * etc. 197 * 198 * Return -1 on non-wildcard input, otherwise convert to a pool number. 199 */ 200 static int wildcard_to_pool(int wc) 201 { 202 if (wc >= 0) 203 return -1; /* non-wildcard */ 204 return -wc - 1; 205 } 206 207 static const char *sc_type_names[SC_MAX] = { 208 "kernel", 209 "ack", 210 "user", 211 "vl15" 212 }; 213 214 static const char *sc_type_name(int index) 215 { 216 if (index < 0 || index >= SC_MAX) 217 return "unknown"; 218 return sc_type_names[index]; 219 } 220 221 /* 222 * Read the send context memory pool configuration and send context 223 * size configuration. Replace any wildcards and come up with final 224 * counts and sizes for the send context types. 225 */ 226 int init_sc_pools_and_sizes(struct hfi1_devdata *dd) 227 { 228 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } }; 229 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; 230 int total_contexts = 0; 231 int fixed_blocks; 232 int pool_blocks; 233 int used_blocks; 234 int cp_total; /* centipercent total */ 235 int ab_total; /* absolute block total */ 236 int extra; 237 int i; 238 239 /* 240 * When SDMA is enabled, kernel context pio packet size is capped by 241 * "piothreshold". Reduce pio buffer allocation for kernel context by 242 * setting it to a fixed size. The allocation allows 3-deep buffering 243 * of the largest pio packets plus up to 128 bytes header, sufficient 244 * to maintain verbs performance. 245 * 246 * When SDMA is disabled, keep the default pooling allocation. 247 */ 248 if (HFI1_CAP_IS_KSET(SDMA)) { 249 u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ? 250 piothreshold : PIO_THRESHOLD_CEILING; 251 sc_config_sizes[SC_KERNEL].size = 252 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE; 253 } 254 255 /* 256 * Step 0: 257 * - copy the centipercents/absolute sizes from the pool config 258 * - sanity check these values 259 * - add up centipercents, then later check for full value 260 * - add up absolute blocks, then later check for over-commit 261 */ 262 cp_total = 0; 263 ab_total = 0; 264 for (i = 0; i < NUM_SC_POOLS; i++) { 265 int cp = sc_mem_pool_config[i].centipercent; 266 int ab = sc_mem_pool_config[i].absolute_blocks; 267 268 /* 269 * A negative value is "unused" or "invalid". Both *can* 270 * be valid, but centipercent wins, so check that first 271 */ 272 if (cp >= 0) { /* centipercent valid */ 273 cp_total += cp; 274 } else if (ab >= 0) { /* absolute blocks valid */ 275 ab_total += ab; 276 } else { /* neither valid */ 277 dd_dev_err( 278 dd, 279 "Send context memory pool %d: both the block count and centipercent are invalid\n", 280 i); 281 return -EINVAL; 282 } 283 284 mem_pool_info[i].centipercent = cp; 285 mem_pool_info[i].blocks = ab; 286 } 287 288 /* do not use both % and absolute blocks for different pools */ 289 if (cp_total != 0 && ab_total != 0) { 290 dd_dev_err( 291 dd, 292 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n"); 293 return -EINVAL; 294 } 295 296 /* if any percentages are present, they must add up to 100% x 100 */ 297 if (cp_total != 0 && cp_total != 10000) { 298 dd_dev_err( 299 dd, 300 "Send context memory pool centipercent is %d, expecting 10000\n", 301 cp_total); 302 return -EINVAL; 303 } 304 305 /* the absolute pool total cannot be more than the mem total */ 306 if (ab_total > total_blocks) { 307 dd_dev_err( 308 dd, 309 "Send context memory pool absolute block count %d is larger than the memory size %d\n", 310 ab_total, total_blocks); 311 return -EINVAL; 312 } 313 314 /* 315 * Step 2: 316 * - copy from the context size config 317 * - replace context type wildcard counts with real values 318 * - add up non-memory pool block sizes 319 * - add up memory pool user counts 320 */ 321 fixed_blocks = 0; 322 for (i = 0; i < SC_MAX; i++) { 323 int count = sc_config_sizes[i].count; 324 int size = sc_config_sizes[i].size; 325 int pool; 326 327 /* 328 * Sanity check count: Either a positive value or 329 * one of the expected wildcards is valid. The positive 330 * value is checked later when we compare against total 331 * memory available. 332 */ 333 if (i == SC_ACK) { 334 count = dd->n_krcv_queues; 335 } else if (i == SC_KERNEL) { 336 count = INIT_SC_PER_VL * num_vls; 337 } else if (count == SCC_PER_CPU) { 338 count = dd->num_rcv_contexts - dd->n_krcv_queues; 339 } else if (count < 0) { 340 dd_dev_err( 341 dd, 342 "%s send context invalid count wildcard %d\n", 343 sc_type_name(i), count); 344 return -EINVAL; 345 } 346 if (total_contexts + count > chip_send_contexts(dd)) 347 count = chip_send_contexts(dd) - total_contexts; 348 349 total_contexts += count; 350 351 /* 352 * Sanity check pool: The conversion will return a pool 353 * number or -1 if a fixed (non-negative) value. The fixed 354 * value is checked later when we compare against 355 * total memory available. 356 */ 357 pool = wildcard_to_pool(size); 358 if (pool == -1) { /* non-wildcard */ 359 fixed_blocks += size * count; 360 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */ 361 mem_pool_info[pool].count += count; 362 } else { /* invalid wildcard */ 363 dd_dev_err( 364 dd, 365 "%s send context invalid pool wildcard %d\n", 366 sc_type_name(i), size); 367 return -EINVAL; 368 } 369 370 dd->sc_sizes[i].count = count; 371 dd->sc_sizes[i].size = size; 372 } 373 if (fixed_blocks > total_blocks) { 374 dd_dev_err( 375 dd, 376 "Send context fixed block count, %u, larger than total block count %u\n", 377 fixed_blocks, total_blocks); 378 return -EINVAL; 379 } 380 381 /* step 3: calculate the blocks in the pools, and pool context sizes */ 382 pool_blocks = total_blocks - fixed_blocks; 383 if (ab_total > pool_blocks) { 384 dd_dev_err( 385 dd, 386 "Send context fixed pool sizes, %u, larger than pool block count %u\n", 387 ab_total, pool_blocks); 388 return -EINVAL; 389 } 390 /* subtract off the fixed pool blocks */ 391 pool_blocks -= ab_total; 392 393 for (i = 0; i < NUM_SC_POOLS; i++) { 394 struct mem_pool_info *pi = &mem_pool_info[i]; 395 396 /* % beats absolute blocks */ 397 if (pi->centipercent >= 0) 398 pi->blocks = (pool_blocks * pi->centipercent) / 10000; 399 400 if (pi->blocks == 0 && pi->count != 0) { 401 dd_dev_err( 402 dd, 403 "Send context memory pool %d has %u contexts, but no blocks\n", 404 i, pi->count); 405 return -EINVAL; 406 } 407 if (pi->count == 0) { 408 /* warn about wasted blocks */ 409 if (pi->blocks != 0) 410 dd_dev_err( 411 dd, 412 "Send context memory pool %d has %u blocks, but zero contexts\n", 413 i, pi->blocks); 414 pi->size = 0; 415 } else { 416 pi->size = pi->blocks / pi->count; 417 } 418 } 419 420 /* step 4: fill in the context type sizes from the pool sizes */ 421 used_blocks = 0; 422 for (i = 0; i < SC_MAX; i++) { 423 if (dd->sc_sizes[i].size < 0) { 424 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); 425 426 WARN_ON_ONCE(pool >= NUM_SC_POOLS); 427 dd->sc_sizes[i].size = mem_pool_info[pool].size; 428 } 429 /* make sure we are not larger than what is allowed by the HW */ 430 #define PIO_MAX_BLOCKS 1024 431 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) 432 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; 433 434 /* calculate our total usage */ 435 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; 436 } 437 extra = total_blocks - used_blocks; 438 if (extra != 0) 439 dd_dev_info(dd, "unused send context blocks: %d\n", extra); 440 441 return total_contexts; 442 } 443 444 int init_send_contexts(struct hfi1_devdata *dd) 445 { 446 u16 base; 447 int ret, i, j, context; 448 449 ret = init_credit_return(dd); 450 if (ret) 451 return ret; 452 453 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), 454 GFP_KERNEL); 455 dd->send_contexts = kcalloc(dd->num_send_contexts, 456 sizeof(struct send_context_info), 457 GFP_KERNEL); 458 if (!dd->send_contexts || !dd->hw_to_sw) { 459 kfree(dd->hw_to_sw); 460 kfree(dd->send_contexts); 461 free_credit_return(dd); 462 return -ENOMEM; 463 } 464 465 /* hardware context map starts with invalid send context indices */ 466 for (i = 0; i < TXE_NUM_CONTEXTS; i++) 467 dd->hw_to_sw[i] = INVALID_SCI; 468 469 /* 470 * All send contexts have their credit sizes. Allocate credits 471 * for each context one after another from the global space. 472 */ 473 context = 0; 474 base = 1; 475 for (i = 0; i < SC_MAX; i++) { 476 struct sc_config_sizes *scs = &dd->sc_sizes[i]; 477 478 for (j = 0; j < scs->count; j++) { 479 struct send_context_info *sci = 480 &dd->send_contexts[context]; 481 sci->type = i; 482 sci->base = base; 483 sci->credits = scs->size; 484 485 context++; 486 base += scs->size; 487 } 488 } 489 490 return 0; 491 } 492 493 /* 494 * Allocate a software index and hardware context of the given type. 495 * 496 * Must be called with dd->sc_lock held. 497 */ 498 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, 499 u32 *hw_context) 500 { 501 struct send_context_info *sci; 502 u32 index; 503 u32 context; 504 505 for (index = 0, sci = &dd->send_contexts[0]; 506 index < dd->num_send_contexts; index++, sci++) { 507 if (sci->type == type && sci->allocated == 0) { 508 sci->allocated = 1; 509 /* use a 1:1 mapping, but make them non-equal */ 510 context = chip_send_contexts(dd) - index - 1; 511 dd->hw_to_sw[context] = index; 512 *sw_index = index; 513 *hw_context = context; 514 return 0; /* success */ 515 } 516 } 517 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); 518 return -ENOSPC; 519 } 520 521 /* 522 * Free the send context given by its software index. 523 * 524 * Must be called with dd->sc_lock held. 525 */ 526 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) 527 { 528 struct send_context_info *sci; 529 530 sci = &dd->send_contexts[sw_index]; 531 if (!sci->allocated) { 532 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", 533 __func__, sw_index, hw_context); 534 } 535 sci->allocated = 0; 536 dd->hw_to_sw[hw_context] = INVALID_SCI; 537 } 538 539 /* return the base context of a context in a group */ 540 static inline u32 group_context(u32 context, u32 group) 541 { 542 return (context >> group) << group; 543 } 544 545 /* return the size of a group */ 546 static inline u32 group_size(u32 group) 547 { 548 return 1 << group; 549 } 550 551 /* 552 * Obtain the credit return addresses, kernel virtual and bus, for the 553 * given sc. 554 * 555 * To understand this routine: 556 * o va and dma are arrays of struct credit_return. One for each physical 557 * send context, per NUMA. 558 * o Each send context always looks in its relative location in a struct 559 * credit_return for its credit return. 560 * o Each send context in a group must have its return address CSR programmed 561 * with the same value. Use the address of the first send context in the 562 * group. 563 */ 564 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) 565 { 566 u32 gc = group_context(sc->hw_context, sc->group); 567 u32 index = sc->hw_context & 0x7; 568 569 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; 570 *dma = (unsigned long) 571 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; 572 } 573 574 /* 575 * Work queue function triggered in error interrupt routine for 576 * kernel contexts. 577 */ 578 static void sc_halted(struct work_struct *work) 579 { 580 struct send_context *sc; 581 582 sc = container_of(work, struct send_context, halt_work); 583 sc_restart(sc); 584 } 585 586 /* 587 * Calculate PIO block threshold for this send context using the given MTU. 588 * Trigger a return when one MTU plus optional header of credits remain. 589 * 590 * Parameter mtu is in bytes. 591 * Parameter hdrqentsize is in DWORDs. 592 * 593 * Return value is what to write into the CSR: trigger return when 594 * unreturned credits pass this count. 595 */ 596 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) 597 { 598 u32 release_credits; 599 u32 threshold; 600 601 /* add in the header size, then divide by the PIO block size */ 602 mtu += hdrqentsize << 2; 603 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE); 604 605 /* check against this context's credits */ 606 if (sc->credits <= release_credits) 607 threshold = 1; 608 else 609 threshold = sc->credits - release_credits; 610 611 return threshold; 612 } 613 614 /* 615 * Calculate credit threshold in terms of percent of the allocated credits. 616 * Trigger when unreturned credits equal or exceed the percentage of the whole. 617 * 618 * Return value is what to write into the CSR: trigger return when 619 * unreturned credits pass this count. 620 */ 621 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) 622 { 623 return (sc->credits * percent) / 100; 624 } 625 626 /* 627 * Set the credit return threshold. 628 */ 629 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) 630 { 631 unsigned long flags; 632 u32 old_threshold; 633 int force_return = 0; 634 635 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 636 637 old_threshold = (sc->credit_ctrl >> 638 SC(CREDIT_CTRL_THRESHOLD_SHIFT)) 639 & SC(CREDIT_CTRL_THRESHOLD_MASK); 640 641 if (new_threshold != old_threshold) { 642 sc->credit_ctrl = 643 (sc->credit_ctrl 644 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK)) 645 | ((new_threshold 646 & SC(CREDIT_CTRL_THRESHOLD_MASK)) 647 << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); 648 write_kctxt_csr(sc->dd, sc->hw_context, 649 SC(CREDIT_CTRL), sc->credit_ctrl); 650 651 /* force a credit return on change to avoid a possible stall */ 652 force_return = 1; 653 } 654 655 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 656 657 if (force_return) 658 sc_return_credits(sc); 659 } 660 661 /* 662 * set_pio_integrity 663 * 664 * Set the CHECK_ENABLE register for the send context 'sc'. 665 */ 666 void set_pio_integrity(struct send_context *sc) 667 { 668 struct hfi1_devdata *dd = sc->dd; 669 u32 hw_context = sc->hw_context; 670 int type = sc->type; 671 672 write_kctxt_csr(dd, hw_context, 673 SC(CHECK_ENABLE), 674 hfi1_pkt_default_send_ctxt_mask(dd, type)); 675 } 676 677 static u32 get_buffers_allocated(struct send_context *sc) 678 { 679 int cpu; 680 u32 ret = 0; 681 682 for_each_possible_cpu(cpu) 683 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); 684 return ret; 685 } 686 687 static void reset_buffers_allocated(struct send_context *sc) 688 { 689 int cpu; 690 691 for_each_possible_cpu(cpu) 692 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; 693 } 694 695 /* 696 * Allocate a NUMA relative send context structure of the given type along 697 * with a HW context. 698 */ 699 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, 700 uint hdrqentsize, int numa) 701 { 702 struct send_context_info *sci; 703 struct send_context *sc = NULL; 704 dma_addr_t dma; 705 unsigned long flags; 706 u64 reg; 707 u32 thresh; 708 u32 sw_index; 709 u32 hw_context; 710 int ret; 711 u8 opval, opmask; 712 713 /* do not allocate while frozen */ 714 if (dd->flags & HFI1_FROZEN) 715 return NULL; 716 717 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); 718 if (!sc) 719 return NULL; 720 721 sc->buffers_allocated = alloc_percpu(u32); 722 if (!sc->buffers_allocated) { 723 kfree(sc); 724 dd_dev_err(dd, 725 "Cannot allocate buffers_allocated per cpu counters\n" 726 ); 727 return NULL; 728 } 729 730 spin_lock_irqsave(&dd->sc_lock, flags); 731 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); 732 if (ret) { 733 spin_unlock_irqrestore(&dd->sc_lock, flags); 734 free_percpu(sc->buffers_allocated); 735 kfree(sc); 736 return NULL; 737 } 738 739 sci = &dd->send_contexts[sw_index]; 740 sci->sc = sc; 741 742 sc->dd = dd; 743 sc->node = numa; 744 sc->type = type; 745 spin_lock_init(&sc->alloc_lock); 746 spin_lock_init(&sc->release_lock); 747 spin_lock_init(&sc->credit_ctrl_lock); 748 INIT_LIST_HEAD(&sc->piowait); 749 INIT_WORK(&sc->halt_work, sc_halted); 750 init_waitqueue_head(&sc->halt_wait); 751 752 /* grouping is always single context for now */ 753 sc->group = 0; 754 755 sc->sw_index = sw_index; 756 sc->hw_context = hw_context; 757 cr_group_addresses(sc, &dma); 758 sc->credits = sci->credits; 759 sc->size = sc->credits * PIO_BLOCK_SIZE; 760 761 /* PIO Send Memory Address details */ 762 #define PIO_ADDR_CONTEXT_MASK 0xfful 763 #define PIO_ADDR_CONTEXT_SHIFT 16 764 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) 765 << PIO_ADDR_CONTEXT_SHIFT); 766 767 /* set base and credits */ 768 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) 769 << SC(CTRL_CTXT_DEPTH_SHIFT)) 770 | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) 771 << SC(CTRL_CTXT_BASE_SHIFT)); 772 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); 773 774 set_pio_integrity(sc); 775 776 /* unmask all errors */ 777 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); 778 779 /* set the default partition key */ 780 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 781 (SC(CHECK_PARTITION_KEY_VALUE_MASK) & 782 DEFAULT_PKEY) << 783 SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); 784 785 /* per context type checks */ 786 if (type == SC_USER) { 787 opval = USER_OPCODE_CHECK_VAL; 788 opmask = USER_OPCODE_CHECK_MASK; 789 } else { 790 opval = OPCODE_CHECK_VAL_DISABLED; 791 opmask = OPCODE_CHECK_MASK_DISABLED; 792 } 793 794 /* set the send context check opcode mask and value */ 795 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 796 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | 797 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); 798 799 /* set up credit return */ 800 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); 801 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); 802 803 /* 804 * Calculate the initial credit return threshold. 805 * 806 * For Ack contexts, set a threshold for half the credits. 807 * For User contexts use the given percentage. This has been 808 * sanitized on driver start-up. 809 * For Kernel contexts, use the default MTU plus a header 810 * or half the credits, whichever is smaller. This should 811 * work for both the 3-deep buffering allocation and the 812 * pooling allocation. 813 */ 814 if (type == SC_ACK) { 815 thresh = sc_percent_to_threshold(sc, 50); 816 } else if (type == SC_USER) { 817 thresh = sc_percent_to_threshold(sc, 818 user_credit_return_threshold); 819 } else { /* kernel */ 820 thresh = min(sc_percent_to_threshold(sc, 50), 821 sc_mtu_to_threshold(sc, hfi1_max_mtu, 822 hdrqentsize)); 823 } 824 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT); 825 /* add in early return */ 826 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN)) 827 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); 828 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */ 829 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); 830 831 /* set up write-through credit_ctrl */ 832 sc->credit_ctrl = reg; 833 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); 834 835 /* User send contexts should not allow sending on VL15 */ 836 if (type == SC_USER) { 837 reg = 1ULL << 15; 838 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); 839 } 840 841 spin_unlock_irqrestore(&dd->sc_lock, flags); 842 843 /* 844 * Allocate shadow ring to track outstanding PIO buffers _after_ 845 * unlocking. We don't know the size until the lock is held and 846 * we can't allocate while the lock is held. No one is using 847 * the context yet, so allocate it now. 848 * 849 * User contexts do not get a shadow ring. 850 */ 851 if (type != SC_USER) { 852 /* 853 * Size the shadow ring 1 larger than the number of credits 854 * so head == tail can mean empty. 855 */ 856 sc->sr_size = sci->credits + 1; 857 sc->sr = kcalloc_node(sc->sr_size, 858 sizeof(union pio_shadow_ring), 859 GFP_KERNEL, numa); 860 if (!sc->sr) { 861 sc_free(sc); 862 return NULL; 863 } 864 } 865 866 hfi1_cdbg(PIO, 867 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n", 868 sw_index, 869 hw_context, 870 sc_type_name(type), 871 sc->group, 872 sc->credits, 873 sc->credit_ctrl, 874 thresh); 875 876 return sc; 877 } 878 879 /* free a per-NUMA send context structure */ 880 void sc_free(struct send_context *sc) 881 { 882 struct hfi1_devdata *dd; 883 unsigned long flags; 884 u32 sw_index; 885 u32 hw_context; 886 887 if (!sc) 888 return; 889 890 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ 891 dd = sc->dd; 892 if (!list_empty(&sc->piowait)) 893 dd_dev_err(dd, "piowait list not empty!\n"); 894 sw_index = sc->sw_index; 895 hw_context = sc->hw_context; 896 sc_disable(sc); /* make sure the HW is disabled */ 897 flush_work(&sc->halt_work); 898 899 spin_lock_irqsave(&dd->sc_lock, flags); 900 dd->send_contexts[sw_index].sc = NULL; 901 902 /* clear/disable all registers set in sc_alloc */ 903 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); 904 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); 905 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); 906 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); 907 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); 908 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); 909 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); 910 911 /* release the index and context for re-use */ 912 sc_hw_free(dd, sw_index, hw_context); 913 spin_unlock_irqrestore(&dd->sc_lock, flags); 914 915 kfree(sc->sr); 916 free_percpu(sc->buffers_allocated); 917 kfree(sc); 918 } 919 920 /* disable the context */ 921 void sc_disable(struct send_context *sc) 922 { 923 u64 reg; 924 unsigned long flags; 925 struct pio_buf *pbuf; 926 927 if (!sc) 928 return; 929 930 /* do all steps, even if already disabled */ 931 spin_lock_irqsave(&sc->alloc_lock, flags); 932 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); 933 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); 934 sc->flags &= ~SCF_ENABLED; 935 sc_wait_for_packet_egress(sc, 1); 936 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); 937 spin_unlock_irqrestore(&sc->alloc_lock, flags); 938 939 /* 940 * Flush any waiters. Once the context is disabled, 941 * credit return interrupts are stopped (although there 942 * could be one in-process when the context is disabled). 943 * Wait one microsecond for any lingering interrupts, then 944 * proceed with the flush. 945 */ 946 udelay(1); 947 spin_lock_irqsave(&sc->release_lock, flags); 948 if (sc->sr) { /* this context has a shadow ring */ 949 while (sc->sr_tail != sc->sr_head) { 950 pbuf = &sc->sr[sc->sr_tail].pbuf; 951 if (pbuf->cb) 952 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); 953 sc->sr_tail++; 954 if (sc->sr_tail >= sc->sr_size) 955 sc->sr_tail = 0; 956 } 957 } 958 spin_unlock_irqrestore(&sc->release_lock, flags); 959 } 960 961 /* return SendEgressCtxtStatus.PacketOccupancy */ 962 static u64 packet_occupancy(u64 reg) 963 { 964 return (reg & 965 SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK) 966 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT; 967 } 968 969 /* is egress halted on the context? */ 970 static bool egress_halted(u64 reg) 971 { 972 return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK); 973 } 974 975 /* is the send context halted? */ 976 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) 977 { 978 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & 979 SC(STATUS_CTXT_HALTED_SMASK)); 980 } 981 982 /** 983 * sc_wait_for_packet_egress 984 * @sc: valid send context 985 * @pause: wait for credit return 986 * 987 * Wait for packet egress, optionally pause for credit return 988 * 989 * Egress halt and Context halt are not necessarily the same thing, so 990 * check for both. 991 * 992 * NOTE: The context halt bit may not be set immediately. Because of this, 993 * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW 994 * context bit to determine if the context is halted. 995 */ 996 static void sc_wait_for_packet_egress(struct send_context *sc, int pause) 997 { 998 struct hfi1_devdata *dd = sc->dd; 999 u64 reg = 0; 1000 u64 reg_prev; 1001 u32 loop = 0; 1002 1003 while (1) { 1004 reg_prev = reg; 1005 reg = read_csr(dd, sc->hw_context * 8 + 1006 SEND_EGRESS_CTXT_STATUS); 1007 /* done if any halt bits, SW or HW are set */ 1008 if (sc->flags & SCF_HALTED || 1009 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) 1010 break; 1011 reg = packet_occupancy(reg); 1012 if (reg == 0) 1013 break; 1014 /* counter is reset if occupancy count changes */ 1015 if (reg != reg_prev) 1016 loop = 0; 1017 if (loop > 50000) { 1018 /* timed out - bounce the link */ 1019 dd_dev_err(dd, 1020 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", 1021 __func__, sc->sw_index, 1022 sc->hw_context, (u32)reg); 1023 queue_work(dd->pport->link_wq, 1024 &dd->pport->link_bounce_work); 1025 break; 1026 } 1027 loop++; 1028 udelay(1); 1029 } 1030 1031 if (pause) 1032 /* Add additional delay to ensure chip returns all credits */ 1033 pause_for_credit_return(dd); 1034 } 1035 1036 void sc_wait(struct hfi1_devdata *dd) 1037 { 1038 int i; 1039 1040 for (i = 0; i < dd->num_send_contexts; i++) { 1041 struct send_context *sc = dd->send_contexts[i].sc; 1042 1043 if (!sc) 1044 continue; 1045 sc_wait_for_packet_egress(sc, 0); 1046 } 1047 } 1048 1049 /* 1050 * Restart a context after it has been halted due to error. 1051 * 1052 * If the first step fails - wait for the halt to be asserted, return early. 1053 * Otherwise complain about timeouts but keep going. 1054 * 1055 * It is expected that allocations (enabled flag bit) have been shut off 1056 * already (only applies to kernel contexts). 1057 */ 1058 int sc_restart(struct send_context *sc) 1059 { 1060 struct hfi1_devdata *dd = sc->dd; 1061 u64 reg; 1062 u32 loop; 1063 int count; 1064 1065 /* bounce off if not halted, or being free'd */ 1066 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) 1067 return -EINVAL; 1068 1069 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, 1070 sc->hw_context); 1071 1072 /* 1073 * Step 1: Wait for the context to actually halt. 1074 * 1075 * The error interrupt is asynchronous to actually setting halt 1076 * on the context. 1077 */ 1078 loop = 0; 1079 while (1) { 1080 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); 1081 if (reg & SC(STATUS_CTXT_HALTED_SMASK)) 1082 break; 1083 if (loop > 100) { 1084 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", 1085 __func__, sc->sw_index, sc->hw_context); 1086 return -ETIME; 1087 } 1088 loop++; 1089 udelay(1); 1090 } 1091 1092 /* 1093 * Step 2: Ensure no users are still trying to write to PIO. 1094 * 1095 * For kernel contexts, we have already turned off buffer allocation. 1096 * Now wait for the buffer count to go to zero. 1097 * 1098 * For user contexts, the user handling code has cut off write access 1099 * to the context's PIO pages before calling this routine and will 1100 * restore write access after this routine returns. 1101 */ 1102 if (sc->type != SC_USER) { 1103 /* kernel context */ 1104 loop = 0; 1105 while (1) { 1106 count = get_buffers_allocated(sc); 1107 if (count == 0) 1108 break; 1109 if (loop > 100) { 1110 dd_dev_err(dd, 1111 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", 1112 __func__, sc->sw_index, 1113 sc->hw_context, count); 1114 } 1115 loop++; 1116 udelay(1); 1117 } 1118 } 1119 1120 /* 1121 * Step 3: Wait for all packets to egress. 1122 * This is done while disabling the send context 1123 * 1124 * Step 4: Disable the context 1125 * 1126 * This is a superset of the halt. After the disable, the 1127 * errors can be cleared. 1128 */ 1129 sc_disable(sc); 1130 1131 /* 1132 * Step 5: Enable the context 1133 * 1134 * This enable will clear the halted flag and per-send context 1135 * error flags. 1136 */ 1137 return sc_enable(sc); 1138 } 1139 1140 /* 1141 * PIO freeze processing. To be called after the TXE block is fully frozen. 1142 * Go through all frozen send contexts and disable them. The contexts are 1143 * already stopped by the freeze. 1144 */ 1145 void pio_freeze(struct hfi1_devdata *dd) 1146 { 1147 struct send_context *sc; 1148 int i; 1149 1150 for (i = 0; i < dd->num_send_contexts; i++) { 1151 sc = dd->send_contexts[i].sc; 1152 /* 1153 * Don't disable unallocated, unfrozen, or user send contexts. 1154 * User send contexts will be disabled when the process 1155 * calls into the driver to reset its context. 1156 */ 1157 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1158 continue; 1159 1160 /* only need to disable, the context is already stopped */ 1161 sc_disable(sc); 1162 } 1163 } 1164 1165 /* 1166 * Unfreeze PIO for kernel send contexts. The precondition for calling this 1167 * is that all PIO send contexts have been disabled and the SPC freeze has 1168 * been cleared. Now perform the last step and re-enable each kernel context. 1169 * User (PSM) processing will occur when PSM calls into the kernel to 1170 * acknowledge the freeze. 1171 */ 1172 void pio_kernel_unfreeze(struct hfi1_devdata *dd) 1173 { 1174 struct send_context *sc; 1175 int i; 1176 1177 for (i = 0; i < dd->num_send_contexts; i++) { 1178 sc = dd->send_contexts[i].sc; 1179 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1180 continue; 1181 1182 sc_enable(sc); /* will clear the sc frozen flag */ 1183 } 1184 } 1185 1186 /* 1187 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. 1188 * Returns: 1189 * -ETIMEDOUT - if we wait too long 1190 * -EIO - if there was an error 1191 */ 1192 static int pio_init_wait_progress(struct hfi1_devdata *dd) 1193 { 1194 u64 reg; 1195 int max, count = 0; 1196 1197 /* max is the longest possible HW init time / delay */ 1198 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; 1199 while (1) { 1200 reg = read_csr(dd, SEND_PIO_INIT_CTXT); 1201 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK)) 1202 break; 1203 if (count >= max) 1204 return -ETIMEDOUT; 1205 udelay(5); 1206 count++; 1207 } 1208 1209 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; 1210 } 1211 1212 /* 1213 * Reset all of the send contexts to their power-on state. Used 1214 * only during manual init - no lock against sc_enable needed. 1215 */ 1216 void pio_reset_all(struct hfi1_devdata *dd) 1217 { 1218 int ret; 1219 1220 /* make sure the init engine is not busy */ 1221 ret = pio_init_wait_progress(dd); 1222 /* ignore any timeout */ 1223 if (ret == -EIO) { 1224 /* clear the error */ 1225 write_csr(dd, SEND_PIO_ERR_CLEAR, 1226 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); 1227 } 1228 1229 /* reset init all */ 1230 write_csr(dd, SEND_PIO_INIT_CTXT, 1231 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); 1232 udelay(2); 1233 ret = pio_init_wait_progress(dd); 1234 if (ret < 0) { 1235 dd_dev_err(dd, 1236 "PIO send context init %s while initializing all PIO blocks\n", 1237 ret == -ETIMEDOUT ? "is stuck" : "had an error"); 1238 } 1239 } 1240 1241 /* enable the context */ 1242 int sc_enable(struct send_context *sc) 1243 { 1244 u64 sc_ctrl, reg, pio; 1245 struct hfi1_devdata *dd; 1246 unsigned long flags; 1247 int ret = 0; 1248 1249 if (!sc) 1250 return -EINVAL; 1251 dd = sc->dd; 1252 1253 /* 1254 * Obtain the allocator lock to guard against any allocation 1255 * attempts (which should not happen prior to context being 1256 * enabled). On the release/disable side we don't need to 1257 * worry about locking since the releaser will not do anything 1258 * if the context accounting values have not changed. 1259 */ 1260 spin_lock_irqsave(&sc->alloc_lock, flags); 1261 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); 1262 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK))) 1263 goto unlock; /* already enabled */ 1264 1265 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ 1266 1267 *sc->hw_free = 0; 1268 sc->free = 0; 1269 sc->alloc_free = 0; 1270 sc->fill = 0; 1271 sc->fill_wrap = 0; 1272 sc->sr_head = 0; 1273 sc->sr_tail = 0; 1274 sc->flags = 0; 1275 /* the alloc lock insures no fast path allocation */ 1276 reset_buffers_allocated(sc); 1277 1278 /* 1279 * Clear all per-context errors. Some of these will be set when 1280 * we are re-enabling after a context halt. Now that the context 1281 * is disabled, the halt will not clear until after the PIO init 1282 * engine runs below. 1283 */ 1284 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); 1285 if (reg) 1286 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); 1287 1288 /* 1289 * The HW PIO initialization engine can handle only one init 1290 * request at a time. Serialize access to each device's engine. 1291 */ 1292 spin_lock(&dd->sc_init_lock); 1293 /* 1294 * Since access to this code block is serialized and 1295 * each access waits for the initialization to complete 1296 * before releasing the lock, the PIO initialization engine 1297 * should not be in use, so we don't have to wait for the 1298 * InProgress bit to go down. 1299 */ 1300 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << 1301 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) | 1302 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK; 1303 write_csr(dd, SEND_PIO_INIT_CTXT, pio); 1304 /* 1305 * Wait until the engine is done. Give the chip the required time 1306 * so, hopefully, we read the register just once. 1307 */ 1308 udelay(2); 1309 ret = pio_init_wait_progress(dd); 1310 spin_unlock(&dd->sc_init_lock); 1311 if (ret) { 1312 dd_dev_err(dd, 1313 "sctxt%u(%u): Context not enabled due to init failure %d\n", 1314 sc->sw_index, sc->hw_context, ret); 1315 goto unlock; 1316 } 1317 1318 /* 1319 * All is well. Enable the context. 1320 */ 1321 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK); 1322 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); 1323 /* 1324 * Read SendCtxtCtrl to force the write out and prevent a timing 1325 * hazard where a PIO write may reach the context before the enable. 1326 */ 1327 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); 1328 sc->flags |= SCF_ENABLED; 1329 1330 unlock: 1331 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1332 1333 return ret; 1334 } 1335 1336 /* force a credit return on the context */ 1337 void sc_return_credits(struct send_context *sc) 1338 { 1339 if (!sc) 1340 return; 1341 1342 /* a 0->1 transition schedules a credit return */ 1343 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 1344 SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); 1345 /* 1346 * Ensure that the write is flushed and the credit return is 1347 * scheduled. We care more about the 0 -> 1 transition. 1348 */ 1349 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); 1350 /* set back to 0 for next time */ 1351 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); 1352 } 1353 1354 /* allow all in-flight packets to drain on the context */ 1355 void sc_flush(struct send_context *sc) 1356 { 1357 if (!sc) 1358 return; 1359 1360 sc_wait_for_packet_egress(sc, 1); 1361 } 1362 1363 /* drop all packets on the context, no waiting until they are sent */ 1364 void sc_drop(struct send_context *sc) 1365 { 1366 if (!sc) 1367 return; 1368 1369 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", 1370 __func__, sc->sw_index, sc->hw_context); 1371 } 1372 1373 /* 1374 * Start the software reaction to a context halt or SPC freeze: 1375 * - mark the context as halted or frozen 1376 * - stop buffer allocations 1377 * 1378 * Called from the error interrupt. Other work is deferred until 1379 * out of the interrupt. 1380 */ 1381 void sc_stop(struct send_context *sc, int flag) 1382 { 1383 unsigned long flags; 1384 1385 /* mark the context */ 1386 sc->flags |= flag; 1387 1388 /* stop buffer allocations */ 1389 spin_lock_irqsave(&sc->alloc_lock, flags); 1390 sc->flags &= ~SCF_ENABLED; 1391 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1392 wake_up(&sc->halt_wait); 1393 } 1394 1395 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32)) 1396 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS) 1397 1398 /* 1399 * The send context buffer "allocator". 1400 * 1401 * @sc: the PIO send context we are allocating from 1402 * @len: length of whole packet - including PBC - in dwords 1403 * @cb: optional callback to call when the buffer is finished sending 1404 * @arg: argument for cb 1405 * 1406 * Return a pointer to a PIO buffer if successful, NULL if not enough room. 1407 */ 1408 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, 1409 pio_release_cb cb, void *arg) 1410 { 1411 struct pio_buf *pbuf = NULL; 1412 unsigned long flags; 1413 unsigned long avail; 1414 unsigned long blocks = dwords_to_blocks(dw_len); 1415 u32 fill_wrap; 1416 int trycount = 0; 1417 u32 head, next; 1418 1419 spin_lock_irqsave(&sc->alloc_lock, flags); 1420 if (!(sc->flags & SCF_ENABLED)) { 1421 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1422 goto done; 1423 } 1424 1425 retry: 1426 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); 1427 if (blocks > avail) { 1428 /* not enough room */ 1429 if (unlikely(trycount)) { /* already tried to get more room */ 1430 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1431 goto done; 1432 } 1433 /* copy from receiver cache line and recalculate */ 1434 sc->alloc_free = READ_ONCE(sc->free); 1435 avail = 1436 (unsigned long)sc->credits - 1437 (sc->fill - sc->alloc_free); 1438 if (blocks > avail) { 1439 /* still no room, actively update */ 1440 sc_release_update(sc); 1441 sc->alloc_free = READ_ONCE(sc->free); 1442 trycount++; 1443 goto retry; 1444 } 1445 } 1446 1447 /* there is enough room */ 1448 1449 preempt_disable(); 1450 this_cpu_inc(*sc->buffers_allocated); 1451 1452 /* read this once */ 1453 head = sc->sr_head; 1454 1455 /* "allocate" the buffer */ 1456 sc->fill += blocks; 1457 fill_wrap = sc->fill_wrap; 1458 sc->fill_wrap += blocks; 1459 if (sc->fill_wrap >= sc->credits) 1460 sc->fill_wrap = sc->fill_wrap - sc->credits; 1461 1462 /* 1463 * Fill the parts that the releaser looks at before moving the head. 1464 * The only necessary piece is the sent_at field. The credits 1465 * we have just allocated cannot have been returned yet, so the 1466 * cb and arg will not be looked at for a "while". Put them 1467 * on this side of the memory barrier anyway. 1468 */ 1469 pbuf = &sc->sr[head].pbuf; 1470 pbuf->sent_at = sc->fill; 1471 pbuf->cb = cb; 1472 pbuf->arg = arg; 1473 pbuf->sc = sc; /* could be filled in at sc->sr init time */ 1474 /* make sure this is in memory before updating the head */ 1475 1476 /* calculate next head index, do not store */ 1477 next = head + 1; 1478 if (next >= sc->sr_size) 1479 next = 0; 1480 /* 1481 * update the head - must be last! - the releaser can look at fields 1482 * in pbuf once we move the head 1483 */ 1484 smp_wmb(); 1485 sc->sr_head = next; 1486 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1487 1488 /* finish filling in the buffer outside the lock */ 1489 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; 1490 pbuf->end = sc->base_addr + sc->size; 1491 pbuf->qw_written = 0; 1492 pbuf->carry_bytes = 0; 1493 pbuf->carry.val64 = 0; 1494 done: 1495 return pbuf; 1496 } 1497 1498 /* 1499 * There are at least two entities that can turn on credit return 1500 * interrupts and they can overlap. Avoid problems by implementing 1501 * a count scheme that is enforced by a lock. The lock is needed because 1502 * the count and CSR write must be paired. 1503 */ 1504 1505 /* 1506 * Start credit return interrupts. This is managed by a count. If already 1507 * on, just increment the count. 1508 */ 1509 void sc_add_credit_return_intr(struct send_context *sc) 1510 { 1511 unsigned long flags; 1512 1513 /* lock must surround both the count change and the CSR update */ 1514 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 1515 if (sc->credit_intr_count == 0) { 1516 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); 1517 write_kctxt_csr(sc->dd, sc->hw_context, 1518 SC(CREDIT_CTRL), sc->credit_ctrl); 1519 } 1520 sc->credit_intr_count++; 1521 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 1522 } 1523 1524 /* 1525 * Stop credit return interrupts. This is managed by a count. Decrement the 1526 * count, if the last user, then turn the credit interrupts off. 1527 */ 1528 void sc_del_credit_return_intr(struct send_context *sc) 1529 { 1530 unsigned long flags; 1531 1532 WARN_ON(sc->credit_intr_count == 0); 1533 1534 /* lock must surround both the count change and the CSR update */ 1535 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 1536 sc->credit_intr_count--; 1537 if (sc->credit_intr_count == 0) { 1538 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); 1539 write_kctxt_csr(sc->dd, sc->hw_context, 1540 SC(CREDIT_CTRL), sc->credit_ctrl); 1541 } 1542 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 1543 } 1544 1545 /* 1546 * The caller must be careful when calling this. All needint calls 1547 * must be paired with !needint. 1548 */ 1549 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) 1550 { 1551 if (needint) 1552 sc_add_credit_return_intr(sc); 1553 else 1554 sc_del_credit_return_intr(sc); 1555 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); 1556 if (needint) { 1557 mmiowb(); 1558 sc_return_credits(sc); 1559 } 1560 } 1561 1562 /** 1563 * sc_piobufavail - callback when a PIO buffer is available 1564 * @sc: the send context 1565 * 1566 * This is called from the interrupt handler when a PIO buffer is 1567 * available after hfi1_verbs_send() returned an error that no buffers were 1568 * available. Disable the interrupt if there are no more QPs waiting. 1569 */ 1570 static void sc_piobufavail(struct send_context *sc) 1571 { 1572 struct hfi1_devdata *dd = sc->dd; 1573 struct hfi1_ibdev *dev = &dd->verbs_dev; 1574 struct list_head *list; 1575 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; 1576 struct rvt_qp *qp; 1577 struct hfi1_qp_priv *priv; 1578 unsigned long flags; 1579 uint i, n = 0, max_idx = 0; 1580 u8 max_starved_cnt = 0; 1581 1582 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && 1583 dd->send_contexts[sc->sw_index].type != SC_VL15) 1584 return; 1585 list = &sc->piowait; 1586 /* 1587 * Note: checking that the piowait list is empty and clearing 1588 * the buffer available interrupt needs to be atomic or we 1589 * could end up with QPs on the wait list with the interrupt 1590 * disabled. 1591 */ 1592 write_seqlock_irqsave(&dev->iowait_lock, flags); 1593 while (!list_empty(list)) { 1594 struct iowait *wait; 1595 1596 if (n == ARRAY_SIZE(qps)) 1597 break; 1598 wait = list_first_entry(list, struct iowait, list); 1599 qp = iowait_to_qp(wait); 1600 priv = qp->priv; 1601 list_del_init(&priv->s_iowait.list); 1602 priv->s_iowait.lock = NULL; 1603 iowait_starve_find_max(wait, &max_starved_cnt, n, &max_idx); 1604 /* refcount held until actual wake up */ 1605 qps[n++] = qp; 1606 } 1607 /* 1608 * If there had been waiters and there are more 1609 * insure that we redo the force to avoid a potential hang. 1610 */ 1611 if (n) { 1612 hfi1_sc_wantpiobuf_intr(sc, 0); 1613 if (!list_empty(list)) 1614 hfi1_sc_wantpiobuf_intr(sc, 1); 1615 } 1616 write_sequnlock_irqrestore(&dev->iowait_lock, flags); 1617 1618 /* Wake up the most starved one first */ 1619 if (n) 1620 hfi1_qp_wakeup(qps[max_idx], 1621 RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); 1622 for (i = 0; i < n; i++) 1623 if (i != max_idx) 1624 hfi1_qp_wakeup(qps[i], 1625 RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); 1626 } 1627 1628 /* translate a send credit update to a bit code of reasons */ 1629 static inline int fill_code(u64 hw_free) 1630 { 1631 int code = 0; 1632 1633 if (hw_free & CR_STATUS_SMASK) 1634 code |= PRC_STATUS_ERR; 1635 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK) 1636 code |= PRC_PBC; 1637 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK) 1638 code |= PRC_THRESHOLD; 1639 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK) 1640 code |= PRC_FILL_ERR; 1641 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK) 1642 code |= PRC_SC_DISABLE; 1643 return code; 1644 } 1645 1646 /* use the jiffies compare to get the wrap right */ 1647 #define sent_before(a, b) time_before(a, b) /* a < b */ 1648 1649 /* 1650 * The send context buffer "releaser". 1651 */ 1652 void sc_release_update(struct send_context *sc) 1653 { 1654 struct pio_buf *pbuf; 1655 u64 hw_free; 1656 u32 head, tail; 1657 unsigned long old_free; 1658 unsigned long free; 1659 unsigned long extra; 1660 unsigned long flags; 1661 int code; 1662 1663 if (!sc) 1664 return; 1665 1666 spin_lock_irqsave(&sc->release_lock, flags); 1667 /* update free */ 1668 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ 1669 old_free = sc->free; 1670 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT) 1671 - (old_free & CR_COUNTER_MASK)) 1672 & CR_COUNTER_MASK; 1673 free = old_free + extra; 1674 trace_hfi1_piofree(sc, extra); 1675 1676 /* call sent buffer callbacks */ 1677 code = -1; /* code not yet set */ 1678 head = READ_ONCE(sc->sr_head); /* snapshot the head */ 1679 tail = sc->sr_tail; 1680 while (head != tail) { 1681 pbuf = &sc->sr[tail].pbuf; 1682 1683 if (sent_before(free, pbuf->sent_at)) { 1684 /* not sent yet */ 1685 break; 1686 } 1687 if (pbuf->cb) { 1688 if (code < 0) /* fill in code on first user */ 1689 code = fill_code(hw_free); 1690 (*pbuf->cb)(pbuf->arg, code); 1691 } 1692 1693 tail++; 1694 if (tail >= sc->sr_size) 1695 tail = 0; 1696 } 1697 sc->sr_tail = tail; 1698 /* make sure tail is updated before free */ 1699 smp_wmb(); 1700 sc->free = free; 1701 spin_unlock_irqrestore(&sc->release_lock, flags); 1702 sc_piobufavail(sc); 1703 } 1704 1705 /* 1706 * Send context group releaser. Argument is the send context that caused 1707 * the interrupt. Called from the send context interrupt handler. 1708 * 1709 * Call release on all contexts in the group. 1710 * 1711 * This routine takes the sc_lock without an irqsave because it is only 1712 * called from an interrupt handler. Adjust if that changes. 1713 */ 1714 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) 1715 { 1716 struct send_context *sc; 1717 u32 sw_index; 1718 u32 gc, gc_end; 1719 1720 spin_lock(&dd->sc_lock); 1721 sw_index = dd->hw_to_sw[hw_context]; 1722 if (unlikely(sw_index >= dd->num_send_contexts)) { 1723 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", 1724 __func__, hw_context, sw_index); 1725 goto done; 1726 } 1727 sc = dd->send_contexts[sw_index].sc; 1728 if (unlikely(!sc)) 1729 goto done; 1730 1731 gc = group_context(hw_context, sc->group); 1732 gc_end = gc + group_size(sc->group); 1733 for (; gc < gc_end; gc++) { 1734 sw_index = dd->hw_to_sw[gc]; 1735 if (unlikely(sw_index >= dd->num_send_contexts)) { 1736 dd_dev_err(dd, 1737 "%s: invalid hw (%u) to sw (%u) mapping\n", 1738 __func__, hw_context, sw_index); 1739 continue; 1740 } 1741 sc_release_update(dd->send_contexts[sw_index].sc); 1742 } 1743 done: 1744 spin_unlock(&dd->sc_lock); 1745 } 1746 1747 /* 1748 * pio_select_send_context_vl() - select send context 1749 * @dd: devdata 1750 * @selector: a spreading factor 1751 * @vl: this vl 1752 * 1753 * This function returns a send context based on the selector and a vl. 1754 * The mapping fields are protected by RCU 1755 */ 1756 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, 1757 u32 selector, u8 vl) 1758 { 1759 struct pio_vl_map *m; 1760 struct pio_map_elem *e; 1761 struct send_context *rval; 1762 1763 /* 1764 * NOTE This should only happen if SC->VL changed after the initial 1765 * checks on the QP/AH 1766 * Default will return VL0's send context below 1767 */ 1768 if (unlikely(vl >= num_vls)) { 1769 rval = NULL; 1770 goto done; 1771 } 1772 1773 rcu_read_lock(); 1774 m = rcu_dereference(dd->pio_map); 1775 if (unlikely(!m)) { 1776 rcu_read_unlock(); 1777 return dd->vld[0].sc; 1778 } 1779 e = m->map[vl & m->mask]; 1780 rval = e->ksc[selector & e->mask]; 1781 rcu_read_unlock(); 1782 1783 done: 1784 rval = !rval ? dd->vld[0].sc : rval; 1785 return rval; 1786 } 1787 1788 /* 1789 * pio_select_send_context_sc() - select send context 1790 * @dd: devdata 1791 * @selector: a spreading factor 1792 * @sc5: the 5 bit sc 1793 * 1794 * This function returns an send context based on the selector and an sc 1795 */ 1796 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, 1797 u32 selector, u8 sc5) 1798 { 1799 u8 vl = sc_to_vlt(dd, sc5); 1800 1801 return pio_select_send_context_vl(dd, selector, vl); 1802 } 1803 1804 /* 1805 * Free the indicated map struct 1806 */ 1807 static void pio_map_free(struct pio_vl_map *m) 1808 { 1809 int i; 1810 1811 for (i = 0; m && i < m->actual_vls; i++) 1812 kfree(m->map[i]); 1813 kfree(m); 1814 } 1815 1816 /* 1817 * Handle RCU callback 1818 */ 1819 static void pio_map_rcu_callback(struct rcu_head *list) 1820 { 1821 struct pio_vl_map *m = container_of(list, struct pio_vl_map, list); 1822 1823 pio_map_free(m); 1824 } 1825 1826 /* 1827 * Set credit return threshold for the kernel send context 1828 */ 1829 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) 1830 { 1831 u32 thres; 1832 1833 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], 1834 50), 1835 sc_mtu_to_threshold(dd->kernel_send_context[scontext], 1836 dd->vld[i].mtu, 1837 dd->rcd[0]->rcvhdrqentsize)); 1838 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); 1839 } 1840 1841 /* 1842 * pio_map_init - called when #vls change 1843 * @dd: hfi1_devdata 1844 * @port: port number 1845 * @num_vls: number of vls 1846 * @vl_scontexts: per vl send context mapping (optional) 1847 * 1848 * This routine changes the mapping based on the number of vls. 1849 * 1850 * vl_scontexts is used to specify a non-uniform vl/send context 1851 * loading. NULL implies auto computing the loading and giving each 1852 * VL an uniform distribution of send contexts per VL. 1853 * 1854 * The auto algorithm computers the sc_per_vl and the number of extra 1855 * send contexts. Any extra send contexts are added from the last VL 1856 * on down 1857 * 1858 * rcu locking is used here to control access to the mapping fields. 1859 * 1860 * If either the num_vls or num_send_contexts are non-power of 2, the 1861 * array sizes in the struct pio_vl_map and the struct pio_map_elem are 1862 * rounded up to the next highest power of 2 and the first entry is 1863 * reused in a round robin fashion. 1864 * 1865 * If an error occurs the map change is not done and the mapping is not 1866 * chaged. 1867 * 1868 */ 1869 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) 1870 { 1871 int i, j; 1872 int extra, sc_per_vl; 1873 int scontext = 1; 1874 int num_kernel_send_contexts = 0; 1875 u8 lvl_scontexts[OPA_MAX_VLS]; 1876 struct pio_vl_map *oldmap, *newmap; 1877 1878 if (!vl_scontexts) { 1879 for (i = 0; i < dd->num_send_contexts; i++) 1880 if (dd->send_contexts[i].type == SC_KERNEL) 1881 num_kernel_send_contexts++; 1882 /* truncate divide */ 1883 sc_per_vl = num_kernel_send_contexts / num_vls; 1884 /* extras */ 1885 extra = num_kernel_send_contexts % num_vls; 1886 vl_scontexts = lvl_scontexts; 1887 /* add extras from last vl down */ 1888 for (i = num_vls - 1; i >= 0; i--, extra--) 1889 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); 1890 } 1891 /* build new map */ 1892 newmap = kzalloc(sizeof(*newmap) + 1893 roundup_pow_of_two(num_vls) * 1894 sizeof(struct pio_map_elem *), 1895 GFP_KERNEL); 1896 if (!newmap) 1897 goto bail; 1898 newmap->actual_vls = num_vls; 1899 newmap->vls = roundup_pow_of_two(num_vls); 1900 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1901 for (i = 0; i < newmap->vls; i++) { 1902 /* save for wrap around */ 1903 int first_scontext = scontext; 1904 1905 if (i < newmap->actual_vls) { 1906 int sz = roundup_pow_of_two(vl_scontexts[i]); 1907 1908 /* only allocate once */ 1909 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + 1910 sz * sizeof(struct 1911 send_context *), 1912 GFP_KERNEL); 1913 if (!newmap->map[i]) 1914 goto bail; 1915 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1916 /* 1917 * assign send contexts and 1918 * adjust credit return threshold 1919 */ 1920 for (j = 0; j < sz; j++) { 1921 if (dd->kernel_send_context[scontext]) { 1922 newmap->map[i]->ksc[j] = 1923 dd->kernel_send_context[scontext]; 1924 set_threshold(dd, scontext, i); 1925 } 1926 if (++scontext >= first_scontext + 1927 vl_scontexts[i]) 1928 /* wrap back to first send context */ 1929 scontext = first_scontext; 1930 } 1931 } else { 1932 /* just re-use entry without allocating */ 1933 newmap->map[i] = newmap->map[i % num_vls]; 1934 } 1935 scontext = first_scontext + vl_scontexts[i]; 1936 } 1937 /* newmap in hand, save old map */ 1938 spin_lock_irq(&dd->pio_map_lock); 1939 oldmap = rcu_dereference_protected(dd->pio_map, 1940 lockdep_is_held(&dd->pio_map_lock)); 1941 1942 /* publish newmap */ 1943 rcu_assign_pointer(dd->pio_map, newmap); 1944 1945 spin_unlock_irq(&dd->pio_map_lock); 1946 /* success, free any old map after grace period */ 1947 if (oldmap) 1948 call_rcu(&oldmap->list, pio_map_rcu_callback); 1949 return 0; 1950 bail: 1951 /* free any partial allocation */ 1952 pio_map_free(newmap); 1953 return -ENOMEM; 1954 } 1955 1956 void free_pio_map(struct hfi1_devdata *dd) 1957 { 1958 /* Free PIO map if allocated */ 1959 if (rcu_access_pointer(dd->pio_map)) { 1960 spin_lock_irq(&dd->pio_map_lock); 1961 pio_map_free(rcu_access_pointer(dd->pio_map)); 1962 RCU_INIT_POINTER(dd->pio_map, NULL); 1963 spin_unlock_irq(&dd->pio_map_lock); 1964 synchronize_rcu(); 1965 } 1966 kfree(dd->kernel_send_context); 1967 dd->kernel_send_context = NULL; 1968 } 1969 1970 int init_pervl_scs(struct hfi1_devdata *dd) 1971 { 1972 int i; 1973 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ 1974 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ 1975 u32 ctxt; 1976 struct hfi1_pportdata *ppd = dd->pport; 1977 1978 dd->vld[15].sc = sc_alloc(dd, SC_VL15, 1979 dd->rcd[0]->rcvhdrqentsize, dd->node); 1980 if (!dd->vld[15].sc) 1981 return -ENOMEM; 1982 1983 hfi1_init_ctxt(dd->vld[15].sc); 1984 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); 1985 1986 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, 1987 sizeof(struct send_context *), 1988 GFP_KERNEL, dd->node); 1989 if (!dd->kernel_send_context) 1990 goto freesc15; 1991 1992 dd->kernel_send_context[0] = dd->vld[15].sc; 1993 1994 for (i = 0; i < num_vls; i++) { 1995 /* 1996 * Since this function does not deal with a specific 1997 * receive context but we need the RcvHdrQ entry size, 1998 * use the size from rcd[0]. It is guaranteed to be 1999 * valid at this point and will remain the same for all 2000 * receive contexts. 2001 */ 2002 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, 2003 dd->rcd[0]->rcvhdrqentsize, dd->node); 2004 if (!dd->vld[i].sc) 2005 goto nomem; 2006 dd->kernel_send_context[i + 1] = dd->vld[i].sc; 2007 hfi1_init_ctxt(dd->vld[i].sc); 2008 /* non VL15 start with the max MTU */ 2009 dd->vld[i].mtu = hfi1_max_mtu; 2010 } 2011 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { 2012 dd->kernel_send_context[i + 1] = 2013 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); 2014 if (!dd->kernel_send_context[i + 1]) 2015 goto nomem; 2016 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); 2017 } 2018 2019 sc_enable(dd->vld[15].sc); 2020 ctxt = dd->vld[15].sc->hw_context; 2021 mask = all_vl_mask & ~(1LL << 15); 2022 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 2023 dd_dev_info(dd, 2024 "Using send context %u(%u) for VL15\n", 2025 dd->vld[15].sc->sw_index, ctxt); 2026 2027 for (i = 0; i < num_vls; i++) { 2028 sc_enable(dd->vld[i].sc); 2029 ctxt = dd->vld[i].sc->hw_context; 2030 mask = all_vl_mask & ~(data_vls_mask); 2031 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 2032 } 2033 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { 2034 sc_enable(dd->kernel_send_context[i + 1]); 2035 ctxt = dd->kernel_send_context[i + 1]->hw_context; 2036 mask = all_vl_mask & ~(data_vls_mask); 2037 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 2038 } 2039 2040 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) 2041 goto nomem; 2042 return 0; 2043 2044 nomem: 2045 for (i = 0; i < num_vls; i++) { 2046 sc_free(dd->vld[i].sc); 2047 dd->vld[i].sc = NULL; 2048 } 2049 2050 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) 2051 sc_free(dd->kernel_send_context[i + 1]); 2052 2053 kfree(dd->kernel_send_context); 2054 dd->kernel_send_context = NULL; 2055 2056 freesc15: 2057 sc_free(dd->vld[15].sc); 2058 return -ENOMEM; 2059 } 2060 2061 int init_credit_return(struct hfi1_devdata *dd) 2062 { 2063 int ret; 2064 int i; 2065 2066 dd->cr_base = kcalloc( 2067 node_affinity.num_possible_nodes, 2068 sizeof(struct credit_return_base), 2069 GFP_KERNEL); 2070 if (!dd->cr_base) { 2071 ret = -ENOMEM; 2072 goto done; 2073 } 2074 for_each_node_with_cpus(i) { 2075 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2076 2077 set_dev_node(&dd->pcidev->dev, i); 2078 dd->cr_base[i].va = dma_zalloc_coherent( 2079 &dd->pcidev->dev, 2080 bytes, 2081 &dd->cr_base[i].dma, 2082 GFP_KERNEL); 2083 if (!dd->cr_base[i].va) { 2084 set_dev_node(&dd->pcidev->dev, dd->node); 2085 dd_dev_err(dd, 2086 "Unable to allocate credit return DMA range for NUMA %d\n", 2087 i); 2088 ret = -ENOMEM; 2089 goto done; 2090 } 2091 } 2092 set_dev_node(&dd->pcidev->dev, dd->node); 2093 2094 ret = 0; 2095 done: 2096 return ret; 2097 } 2098 2099 void free_credit_return(struct hfi1_devdata *dd) 2100 { 2101 int i; 2102 2103 if (!dd->cr_base) 2104 return; 2105 for (i = 0; i < node_affinity.num_possible_nodes; i++) { 2106 if (dd->cr_base[i].va) { 2107 dma_free_coherent(&dd->pcidev->dev, 2108 TXE_NUM_CONTEXTS * 2109 sizeof(struct credit_return), 2110 dd->cr_base[i].va, 2111 dd->cr_base[i].dma); 2112 } 2113 } 2114 kfree(dd->cr_base); 2115 dd->cr_base = NULL; 2116 } 2117