1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/delay.h> 49 #include "hfi.h" 50 #include "qp.h" 51 #include "trace.h" 52 53 #define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */ 54 55 #define SC(name) SEND_CTXT_##name 56 /* 57 * Send Context functions 58 */ 59 static void sc_wait_for_packet_egress(struct send_context *sc, int pause); 60 61 /* 62 * Set the CM reset bit and wait for it to clear. Use the provided 63 * sendctrl register. This routine has no locking. 64 */ 65 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl) 66 { 67 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); 68 while (1) { 69 udelay(1); 70 sendctrl = read_csr(dd, SEND_CTRL); 71 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0) 72 break; 73 } 74 } 75 76 /* defined in header release 48 and higher */ 77 #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT 78 #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3 79 #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull 80 #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \ 81 << SEND_CTRL_UNSUPPORTED_VL_SHIFT) 82 #endif 83 84 /* global control of PIO send */ 85 void pio_send_control(struct hfi1_devdata *dd, int op) 86 { 87 u64 reg, mask; 88 unsigned long flags; 89 int write = 1; /* write sendctrl back */ 90 int flush = 0; /* re-read sendctrl to make sure it is flushed */ 91 92 spin_lock_irqsave(&dd->sendctrl_lock, flags); 93 94 reg = read_csr(dd, SEND_CTRL); 95 switch (op) { 96 case PSC_GLOBAL_ENABLE: 97 reg |= SEND_CTRL_SEND_ENABLE_SMASK; 98 /* Fall through */ 99 case PSC_DATA_VL_ENABLE: 100 /* Disallow sending on VLs not enabled */ 101 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << 102 SEND_CTRL_UNSUPPORTED_VL_SHIFT; 103 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; 104 break; 105 case PSC_GLOBAL_DISABLE: 106 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK; 107 break; 108 case PSC_GLOBAL_VLARB_ENABLE: 109 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK; 110 break; 111 case PSC_GLOBAL_VLARB_DISABLE: 112 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK; 113 break; 114 case PSC_CM_RESET: 115 __cm_reset(dd, reg); 116 write = 0; /* CSR already written (and flushed) */ 117 break; 118 case PSC_DATA_VL_DISABLE: 119 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK; 120 flush = 1; 121 break; 122 default: 123 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); 124 break; 125 } 126 127 if (write) { 128 write_csr(dd, SEND_CTRL, reg); 129 if (flush) 130 (void)read_csr(dd, SEND_CTRL); /* flush write */ 131 } 132 133 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); 134 } 135 136 /* number of send context memory pools */ 137 #define NUM_SC_POOLS 2 138 139 /* Send Context Size (SCS) wildcards */ 140 #define SCS_POOL_0 -1 141 #define SCS_POOL_1 -2 142 143 /* Send Context Count (SCC) wildcards */ 144 #define SCC_PER_VL -1 145 #define SCC_PER_CPU -2 146 #define SCC_PER_KRCVQ -3 147 148 /* Send Context Size (SCS) constants */ 149 #define SCS_ACK_CREDITS 32 150 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */ 151 152 #define PIO_THRESHOLD_CEILING 4096 153 154 #define PIO_WAIT_BATCH_SIZE 5 155 156 /* default send context sizes */ 157 static struct sc_config_sizes sc_config_sizes[SC_MAX] = { 158 [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */ 159 .count = SCC_PER_VL }, /* one per NUMA */ 160 [SC_ACK] = { .size = SCS_ACK_CREDITS, 161 .count = SCC_PER_KRCVQ }, 162 [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */ 163 .count = SCC_PER_CPU }, /* one per CPU */ 164 [SC_VL15] = { .size = SCS_VL15_CREDITS, 165 .count = 1 }, 166 167 }; 168 169 /* send context memory pool configuration */ 170 struct mem_pool_config { 171 int centipercent; /* % of memory, in 100ths of 1% */ 172 int absolute_blocks; /* absolute block count */ 173 }; 174 175 /* default memory pool configuration: 100% in pool 0 */ 176 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = { 177 /* centi%, abs blocks */ 178 { 10000, -1 }, /* pool 0 */ 179 { 0, -1 }, /* pool 1 */ 180 }; 181 182 /* memory pool information, used when calculating final sizes */ 183 struct mem_pool_info { 184 int centipercent; /* 185 * 100th of 1% of memory to use, -1 if blocks 186 * already set 187 */ 188 int count; /* count of contexts in the pool */ 189 int blocks; /* block size of the pool */ 190 int size; /* context size, in blocks */ 191 }; 192 193 /* 194 * Convert a pool wildcard to a valid pool index. The wildcards 195 * start at -1 and increase negatively. Map them as: 196 * -1 => 0 197 * -2 => 1 198 * etc. 199 * 200 * Return -1 on non-wildcard input, otherwise convert to a pool number. 201 */ 202 static int wildcard_to_pool(int wc) 203 { 204 if (wc >= 0) 205 return -1; /* non-wildcard */ 206 return -wc - 1; 207 } 208 209 static const char *sc_type_names[SC_MAX] = { 210 "kernel", 211 "ack", 212 "user", 213 "vl15" 214 }; 215 216 static const char *sc_type_name(int index) 217 { 218 if (index < 0 || index >= SC_MAX) 219 return "unknown"; 220 return sc_type_names[index]; 221 } 222 223 /* 224 * Read the send context memory pool configuration and send context 225 * size configuration. Replace any wildcards and come up with final 226 * counts and sizes for the send context types. 227 */ 228 int init_sc_pools_and_sizes(struct hfi1_devdata *dd) 229 { 230 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } }; 231 int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1; 232 int total_contexts = 0; 233 int fixed_blocks; 234 int pool_blocks; 235 int used_blocks; 236 int cp_total; /* centipercent total */ 237 int ab_total; /* absolute block total */ 238 int extra; 239 int i; 240 241 /* 242 * When SDMA is enabled, kernel context pio packet size is capped by 243 * "piothreshold". Reduce pio buffer allocation for kernel context by 244 * setting it to a fixed size. The allocation allows 3-deep buffering 245 * of the largest pio packets plus up to 128 bytes header, sufficient 246 * to maintain verbs performance. 247 * 248 * When SDMA is disabled, keep the default pooling allocation. 249 */ 250 if (HFI1_CAP_IS_KSET(SDMA)) { 251 u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ? 252 piothreshold : PIO_THRESHOLD_CEILING; 253 sc_config_sizes[SC_KERNEL].size = 254 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE; 255 } 256 257 /* 258 * Step 0: 259 * - copy the centipercents/absolute sizes from the pool config 260 * - sanity check these values 261 * - add up centipercents, then later check for full value 262 * - add up absolute blocks, then later check for over-commit 263 */ 264 cp_total = 0; 265 ab_total = 0; 266 for (i = 0; i < NUM_SC_POOLS; i++) { 267 int cp = sc_mem_pool_config[i].centipercent; 268 int ab = sc_mem_pool_config[i].absolute_blocks; 269 270 /* 271 * A negative value is "unused" or "invalid". Both *can* 272 * be valid, but centipercent wins, so check that first 273 */ 274 if (cp >= 0) { /* centipercent valid */ 275 cp_total += cp; 276 } else if (ab >= 0) { /* absolute blocks valid */ 277 ab_total += ab; 278 } else { /* neither valid */ 279 dd_dev_err( 280 dd, 281 "Send context memory pool %d: both the block count and centipercent are invalid\n", 282 i); 283 return -EINVAL; 284 } 285 286 mem_pool_info[i].centipercent = cp; 287 mem_pool_info[i].blocks = ab; 288 } 289 290 /* do not use both % and absolute blocks for different pools */ 291 if (cp_total != 0 && ab_total != 0) { 292 dd_dev_err( 293 dd, 294 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n"); 295 return -EINVAL; 296 } 297 298 /* if any percentages are present, they must add up to 100% x 100 */ 299 if (cp_total != 0 && cp_total != 10000) { 300 dd_dev_err( 301 dd, 302 "Send context memory pool centipercent is %d, expecting 10000\n", 303 cp_total); 304 return -EINVAL; 305 } 306 307 /* the absolute pool total cannot be more than the mem total */ 308 if (ab_total > total_blocks) { 309 dd_dev_err( 310 dd, 311 "Send context memory pool absolute block count %d is larger than the memory size %d\n", 312 ab_total, total_blocks); 313 return -EINVAL; 314 } 315 316 /* 317 * Step 2: 318 * - copy from the context size config 319 * - replace context type wildcard counts with real values 320 * - add up non-memory pool block sizes 321 * - add up memory pool user counts 322 */ 323 fixed_blocks = 0; 324 for (i = 0; i < SC_MAX; i++) { 325 int count = sc_config_sizes[i].count; 326 int size = sc_config_sizes[i].size; 327 int pool; 328 329 /* 330 * Sanity check count: Either a positive value or 331 * one of the expected wildcards is valid. The positive 332 * value is checked later when we compare against total 333 * memory available. 334 */ 335 if (i == SC_ACK) { 336 count = dd->n_krcv_queues; 337 } else if (i == SC_KERNEL) { 338 count = INIT_SC_PER_VL * num_vls; 339 } else if (count == SCC_PER_CPU) { 340 count = dd->num_rcv_contexts - dd->n_krcv_queues; 341 } else if (count < 0) { 342 dd_dev_err( 343 dd, 344 "%s send context invalid count wildcard %d\n", 345 sc_type_name(i), count); 346 return -EINVAL; 347 } 348 if (total_contexts + count > dd->chip_send_contexts) 349 count = dd->chip_send_contexts - total_contexts; 350 351 total_contexts += count; 352 353 /* 354 * Sanity check pool: The conversion will return a pool 355 * number or -1 if a fixed (non-negative) value. The fixed 356 * value is checked later when we compare against 357 * total memory available. 358 */ 359 pool = wildcard_to_pool(size); 360 if (pool == -1) { /* non-wildcard */ 361 fixed_blocks += size * count; 362 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */ 363 mem_pool_info[pool].count += count; 364 } else { /* invalid wildcard */ 365 dd_dev_err( 366 dd, 367 "%s send context invalid pool wildcard %d\n", 368 sc_type_name(i), size); 369 return -EINVAL; 370 } 371 372 dd->sc_sizes[i].count = count; 373 dd->sc_sizes[i].size = size; 374 } 375 if (fixed_blocks > total_blocks) { 376 dd_dev_err( 377 dd, 378 "Send context fixed block count, %u, larger than total block count %u\n", 379 fixed_blocks, total_blocks); 380 return -EINVAL; 381 } 382 383 /* step 3: calculate the blocks in the pools, and pool context sizes */ 384 pool_blocks = total_blocks - fixed_blocks; 385 if (ab_total > pool_blocks) { 386 dd_dev_err( 387 dd, 388 "Send context fixed pool sizes, %u, larger than pool block count %u\n", 389 ab_total, pool_blocks); 390 return -EINVAL; 391 } 392 /* subtract off the fixed pool blocks */ 393 pool_blocks -= ab_total; 394 395 for (i = 0; i < NUM_SC_POOLS; i++) { 396 struct mem_pool_info *pi = &mem_pool_info[i]; 397 398 /* % beats absolute blocks */ 399 if (pi->centipercent >= 0) 400 pi->blocks = (pool_blocks * pi->centipercent) / 10000; 401 402 if (pi->blocks == 0 && pi->count != 0) { 403 dd_dev_err( 404 dd, 405 "Send context memory pool %d has %u contexts, but no blocks\n", 406 i, pi->count); 407 return -EINVAL; 408 } 409 if (pi->count == 0) { 410 /* warn about wasted blocks */ 411 if (pi->blocks != 0) 412 dd_dev_err( 413 dd, 414 "Send context memory pool %d has %u blocks, but zero contexts\n", 415 i, pi->blocks); 416 pi->size = 0; 417 } else { 418 pi->size = pi->blocks / pi->count; 419 } 420 } 421 422 /* step 4: fill in the context type sizes from the pool sizes */ 423 used_blocks = 0; 424 for (i = 0; i < SC_MAX; i++) { 425 if (dd->sc_sizes[i].size < 0) { 426 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); 427 428 WARN_ON_ONCE(pool >= NUM_SC_POOLS); 429 dd->sc_sizes[i].size = mem_pool_info[pool].size; 430 } 431 /* make sure we are not larger than what is allowed by the HW */ 432 #define PIO_MAX_BLOCKS 1024 433 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) 434 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; 435 436 /* calculate our total usage */ 437 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; 438 } 439 extra = total_blocks - used_blocks; 440 if (extra != 0) 441 dd_dev_info(dd, "unused send context blocks: %d\n", extra); 442 443 return total_contexts; 444 } 445 446 int init_send_contexts(struct hfi1_devdata *dd) 447 { 448 u16 base; 449 int ret, i, j, context; 450 451 ret = init_credit_return(dd); 452 if (ret) 453 return ret; 454 455 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), 456 GFP_KERNEL); 457 dd->send_contexts = kcalloc(dd->num_send_contexts, 458 sizeof(struct send_context_info), 459 GFP_KERNEL); 460 if (!dd->send_contexts || !dd->hw_to_sw) { 461 kfree(dd->hw_to_sw); 462 kfree(dd->send_contexts); 463 free_credit_return(dd); 464 return -ENOMEM; 465 } 466 467 /* hardware context map starts with invalid send context indices */ 468 for (i = 0; i < TXE_NUM_CONTEXTS; i++) 469 dd->hw_to_sw[i] = INVALID_SCI; 470 471 /* 472 * All send contexts have their credit sizes. Allocate credits 473 * for each context one after another from the global space. 474 */ 475 context = 0; 476 base = 1; 477 for (i = 0; i < SC_MAX; i++) { 478 struct sc_config_sizes *scs = &dd->sc_sizes[i]; 479 480 for (j = 0; j < scs->count; j++) { 481 struct send_context_info *sci = 482 &dd->send_contexts[context]; 483 sci->type = i; 484 sci->base = base; 485 sci->credits = scs->size; 486 487 context++; 488 base += scs->size; 489 } 490 } 491 492 return 0; 493 } 494 495 /* 496 * Allocate a software index and hardware context of the given type. 497 * 498 * Must be called with dd->sc_lock held. 499 */ 500 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, 501 u32 *hw_context) 502 { 503 struct send_context_info *sci; 504 u32 index; 505 u32 context; 506 507 for (index = 0, sci = &dd->send_contexts[0]; 508 index < dd->num_send_contexts; index++, sci++) { 509 if (sci->type == type && sci->allocated == 0) { 510 sci->allocated = 1; 511 /* use a 1:1 mapping, but make them non-equal */ 512 context = dd->chip_send_contexts - index - 1; 513 dd->hw_to_sw[context] = index; 514 *sw_index = index; 515 *hw_context = context; 516 return 0; /* success */ 517 } 518 } 519 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); 520 return -ENOSPC; 521 } 522 523 /* 524 * Free the send context given by its software index. 525 * 526 * Must be called with dd->sc_lock held. 527 */ 528 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) 529 { 530 struct send_context_info *sci; 531 532 sci = &dd->send_contexts[sw_index]; 533 if (!sci->allocated) { 534 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", 535 __func__, sw_index, hw_context); 536 } 537 sci->allocated = 0; 538 dd->hw_to_sw[hw_context] = INVALID_SCI; 539 } 540 541 /* return the base context of a context in a group */ 542 static inline u32 group_context(u32 context, u32 group) 543 { 544 return (context >> group) << group; 545 } 546 547 /* return the size of a group */ 548 static inline u32 group_size(u32 group) 549 { 550 return 1 << group; 551 } 552 553 /* 554 * Obtain the credit return addresses, kernel virtual and bus, for the 555 * given sc. 556 * 557 * To understand this routine: 558 * o va and dma are arrays of struct credit_return. One for each physical 559 * send context, per NUMA. 560 * o Each send context always looks in its relative location in a struct 561 * credit_return for its credit return. 562 * o Each send context in a group must have its return address CSR programmed 563 * with the same value. Use the address of the first send context in the 564 * group. 565 */ 566 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) 567 { 568 u32 gc = group_context(sc->hw_context, sc->group); 569 u32 index = sc->hw_context & 0x7; 570 571 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; 572 *dma = (unsigned long) 573 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; 574 } 575 576 /* 577 * Work queue function triggered in error interrupt routine for 578 * kernel contexts. 579 */ 580 static void sc_halted(struct work_struct *work) 581 { 582 struct send_context *sc; 583 584 sc = container_of(work, struct send_context, halt_work); 585 sc_restart(sc); 586 } 587 588 /* 589 * Calculate PIO block threshold for this send context using the given MTU. 590 * Trigger a return when one MTU plus optional header of credits remain. 591 * 592 * Parameter mtu is in bytes. 593 * Parameter hdrqentsize is in DWORDs. 594 * 595 * Return value is what to write into the CSR: trigger return when 596 * unreturned credits pass this count. 597 */ 598 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) 599 { 600 u32 release_credits; 601 u32 threshold; 602 603 /* add in the header size, then divide by the PIO block size */ 604 mtu += hdrqentsize << 2; 605 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE); 606 607 /* check against this context's credits */ 608 if (sc->credits <= release_credits) 609 threshold = 1; 610 else 611 threshold = sc->credits - release_credits; 612 613 return threshold; 614 } 615 616 /* 617 * Calculate credit threshold in terms of percent of the allocated credits. 618 * Trigger when unreturned credits equal or exceed the percentage of the whole. 619 * 620 * Return value is what to write into the CSR: trigger return when 621 * unreturned credits pass this count. 622 */ 623 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) 624 { 625 return (sc->credits * percent) / 100; 626 } 627 628 /* 629 * Set the credit return threshold. 630 */ 631 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) 632 { 633 unsigned long flags; 634 u32 old_threshold; 635 int force_return = 0; 636 637 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 638 639 old_threshold = (sc->credit_ctrl >> 640 SC(CREDIT_CTRL_THRESHOLD_SHIFT)) 641 & SC(CREDIT_CTRL_THRESHOLD_MASK); 642 643 if (new_threshold != old_threshold) { 644 sc->credit_ctrl = 645 (sc->credit_ctrl 646 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK)) 647 | ((new_threshold 648 & SC(CREDIT_CTRL_THRESHOLD_MASK)) 649 << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); 650 write_kctxt_csr(sc->dd, sc->hw_context, 651 SC(CREDIT_CTRL), sc->credit_ctrl); 652 653 /* force a credit return on change to avoid a possible stall */ 654 force_return = 1; 655 } 656 657 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 658 659 if (force_return) 660 sc_return_credits(sc); 661 } 662 663 /* 664 * set_pio_integrity 665 * 666 * Set the CHECK_ENABLE register for the send context 'sc'. 667 */ 668 void set_pio_integrity(struct send_context *sc) 669 { 670 struct hfi1_devdata *dd = sc->dd; 671 u32 hw_context = sc->hw_context; 672 int type = sc->type; 673 674 write_kctxt_csr(dd, hw_context, 675 SC(CHECK_ENABLE), 676 hfi1_pkt_default_send_ctxt_mask(dd, type)); 677 } 678 679 static u32 get_buffers_allocated(struct send_context *sc) 680 { 681 int cpu; 682 u32 ret = 0; 683 684 for_each_possible_cpu(cpu) 685 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); 686 return ret; 687 } 688 689 static void reset_buffers_allocated(struct send_context *sc) 690 { 691 int cpu; 692 693 for_each_possible_cpu(cpu) 694 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; 695 } 696 697 /* 698 * Allocate a NUMA relative send context structure of the given type along 699 * with a HW context. 700 */ 701 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, 702 uint hdrqentsize, int numa) 703 { 704 struct send_context_info *sci; 705 struct send_context *sc = NULL; 706 dma_addr_t dma; 707 unsigned long flags; 708 u64 reg; 709 u32 thresh; 710 u32 sw_index; 711 u32 hw_context; 712 int ret; 713 u8 opval, opmask; 714 715 /* do not allocate while frozen */ 716 if (dd->flags & HFI1_FROZEN) 717 return NULL; 718 719 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); 720 if (!sc) 721 return NULL; 722 723 sc->buffers_allocated = alloc_percpu(u32); 724 if (!sc->buffers_allocated) { 725 kfree(sc); 726 dd_dev_err(dd, 727 "Cannot allocate buffers_allocated per cpu counters\n" 728 ); 729 return NULL; 730 } 731 732 spin_lock_irqsave(&dd->sc_lock, flags); 733 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); 734 if (ret) { 735 spin_unlock_irqrestore(&dd->sc_lock, flags); 736 free_percpu(sc->buffers_allocated); 737 kfree(sc); 738 return NULL; 739 } 740 741 sci = &dd->send_contexts[sw_index]; 742 sci->sc = sc; 743 744 sc->dd = dd; 745 sc->node = numa; 746 sc->type = type; 747 spin_lock_init(&sc->alloc_lock); 748 spin_lock_init(&sc->release_lock); 749 spin_lock_init(&sc->credit_ctrl_lock); 750 INIT_LIST_HEAD(&sc->piowait); 751 INIT_WORK(&sc->halt_work, sc_halted); 752 init_waitqueue_head(&sc->halt_wait); 753 754 /* grouping is always single context for now */ 755 sc->group = 0; 756 757 sc->sw_index = sw_index; 758 sc->hw_context = hw_context; 759 cr_group_addresses(sc, &dma); 760 sc->credits = sci->credits; 761 sc->size = sc->credits * PIO_BLOCK_SIZE; 762 763 /* PIO Send Memory Address details */ 764 #define PIO_ADDR_CONTEXT_MASK 0xfful 765 #define PIO_ADDR_CONTEXT_SHIFT 16 766 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) 767 << PIO_ADDR_CONTEXT_SHIFT); 768 769 /* set base and credits */ 770 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) 771 << SC(CTRL_CTXT_DEPTH_SHIFT)) 772 | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) 773 << SC(CTRL_CTXT_BASE_SHIFT)); 774 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); 775 776 set_pio_integrity(sc); 777 778 /* unmask all errors */ 779 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); 780 781 /* set the default partition key */ 782 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 783 (SC(CHECK_PARTITION_KEY_VALUE_MASK) & 784 DEFAULT_PKEY) << 785 SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); 786 787 /* per context type checks */ 788 if (type == SC_USER) { 789 opval = USER_OPCODE_CHECK_VAL; 790 opmask = USER_OPCODE_CHECK_MASK; 791 } else { 792 opval = OPCODE_CHECK_VAL_DISABLED; 793 opmask = OPCODE_CHECK_MASK_DISABLED; 794 } 795 796 /* set the send context check opcode mask and value */ 797 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 798 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | 799 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); 800 801 /* set up credit return */ 802 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); 803 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); 804 805 /* 806 * Calculate the initial credit return threshold. 807 * 808 * For Ack contexts, set a threshold for half the credits. 809 * For User contexts use the given percentage. This has been 810 * sanitized on driver start-up. 811 * For Kernel contexts, use the default MTU plus a header 812 * or half the credits, whichever is smaller. This should 813 * work for both the 3-deep buffering allocation and the 814 * pooling allocation. 815 */ 816 if (type == SC_ACK) { 817 thresh = sc_percent_to_threshold(sc, 50); 818 } else if (type == SC_USER) { 819 thresh = sc_percent_to_threshold(sc, 820 user_credit_return_threshold); 821 } else { /* kernel */ 822 thresh = min(sc_percent_to_threshold(sc, 50), 823 sc_mtu_to_threshold(sc, hfi1_max_mtu, 824 hdrqentsize)); 825 } 826 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT); 827 /* add in early return */ 828 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN)) 829 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); 830 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */ 831 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); 832 833 /* set up write-through credit_ctrl */ 834 sc->credit_ctrl = reg; 835 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); 836 837 /* User send contexts should not allow sending on VL15 */ 838 if (type == SC_USER) { 839 reg = 1ULL << 15; 840 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); 841 } 842 843 spin_unlock_irqrestore(&dd->sc_lock, flags); 844 845 /* 846 * Allocate shadow ring to track outstanding PIO buffers _after_ 847 * unlocking. We don't know the size until the lock is held and 848 * we can't allocate while the lock is held. No one is using 849 * the context yet, so allocate it now. 850 * 851 * User contexts do not get a shadow ring. 852 */ 853 if (type != SC_USER) { 854 /* 855 * Size the shadow ring 1 larger than the number of credits 856 * so head == tail can mean empty. 857 */ 858 sc->sr_size = sci->credits + 1; 859 sc->sr = kcalloc_node(sc->sr_size, 860 sizeof(union pio_shadow_ring), 861 GFP_KERNEL, numa); 862 if (!sc->sr) { 863 sc_free(sc); 864 return NULL; 865 } 866 } 867 868 hfi1_cdbg(PIO, 869 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n", 870 sw_index, 871 hw_context, 872 sc_type_name(type), 873 sc->group, 874 sc->credits, 875 sc->credit_ctrl, 876 thresh); 877 878 return sc; 879 } 880 881 /* free a per-NUMA send context structure */ 882 void sc_free(struct send_context *sc) 883 { 884 struct hfi1_devdata *dd; 885 unsigned long flags; 886 u32 sw_index; 887 u32 hw_context; 888 889 if (!sc) 890 return; 891 892 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ 893 dd = sc->dd; 894 if (!list_empty(&sc->piowait)) 895 dd_dev_err(dd, "piowait list not empty!\n"); 896 sw_index = sc->sw_index; 897 hw_context = sc->hw_context; 898 sc_disable(sc); /* make sure the HW is disabled */ 899 flush_work(&sc->halt_work); 900 901 spin_lock_irqsave(&dd->sc_lock, flags); 902 dd->send_contexts[sw_index].sc = NULL; 903 904 /* clear/disable all registers set in sc_alloc */ 905 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); 906 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); 907 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); 908 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); 909 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); 910 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); 911 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); 912 913 /* release the index and context for re-use */ 914 sc_hw_free(dd, sw_index, hw_context); 915 spin_unlock_irqrestore(&dd->sc_lock, flags); 916 917 kfree(sc->sr); 918 free_percpu(sc->buffers_allocated); 919 kfree(sc); 920 } 921 922 /* disable the context */ 923 void sc_disable(struct send_context *sc) 924 { 925 u64 reg; 926 unsigned long flags; 927 struct pio_buf *pbuf; 928 929 if (!sc) 930 return; 931 932 /* do all steps, even if already disabled */ 933 spin_lock_irqsave(&sc->alloc_lock, flags); 934 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); 935 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); 936 sc->flags &= ~SCF_ENABLED; 937 sc_wait_for_packet_egress(sc, 1); 938 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); 939 spin_unlock_irqrestore(&sc->alloc_lock, flags); 940 941 /* 942 * Flush any waiters. Once the context is disabled, 943 * credit return interrupts are stopped (although there 944 * could be one in-process when the context is disabled). 945 * Wait one microsecond for any lingering interrupts, then 946 * proceed with the flush. 947 */ 948 udelay(1); 949 spin_lock_irqsave(&sc->release_lock, flags); 950 if (sc->sr) { /* this context has a shadow ring */ 951 while (sc->sr_tail != sc->sr_head) { 952 pbuf = &sc->sr[sc->sr_tail].pbuf; 953 if (pbuf->cb) 954 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); 955 sc->sr_tail++; 956 if (sc->sr_tail >= sc->sr_size) 957 sc->sr_tail = 0; 958 } 959 } 960 spin_unlock_irqrestore(&sc->release_lock, flags); 961 } 962 963 /* return SendEgressCtxtStatus.PacketOccupancy */ 964 #define packet_occupancy(r) \ 965 (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\ 966 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT) 967 968 /* is egress halted on the context? */ 969 #define egress_halted(r) \ 970 ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK) 971 972 /* wait for packet egress, optionally pause for credit return */ 973 static void sc_wait_for_packet_egress(struct send_context *sc, int pause) 974 { 975 struct hfi1_devdata *dd = sc->dd; 976 u64 reg = 0; 977 u64 reg_prev; 978 u32 loop = 0; 979 980 while (1) { 981 reg_prev = reg; 982 reg = read_csr(dd, sc->hw_context * 8 + 983 SEND_EGRESS_CTXT_STATUS); 984 /* done if egress is stopped */ 985 if (egress_halted(reg)) 986 break; 987 reg = packet_occupancy(reg); 988 if (reg == 0) 989 break; 990 /* counter is reset if occupancy count changes */ 991 if (reg != reg_prev) 992 loop = 0; 993 if (loop > 50000) { 994 /* timed out - bounce the link */ 995 dd_dev_err(dd, 996 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", 997 __func__, sc->sw_index, 998 sc->hw_context, (u32)reg); 999 queue_work(dd->pport->link_wq, 1000 &dd->pport->link_bounce_work); 1001 break; 1002 } 1003 loop++; 1004 udelay(1); 1005 } 1006 1007 if (pause) 1008 /* Add additional delay to ensure chip returns all credits */ 1009 pause_for_credit_return(dd); 1010 } 1011 1012 void sc_wait(struct hfi1_devdata *dd) 1013 { 1014 int i; 1015 1016 for (i = 0; i < dd->num_send_contexts; i++) { 1017 struct send_context *sc = dd->send_contexts[i].sc; 1018 1019 if (!sc) 1020 continue; 1021 sc_wait_for_packet_egress(sc, 0); 1022 } 1023 } 1024 1025 /* 1026 * Restart a context after it has been halted due to error. 1027 * 1028 * If the first step fails - wait for the halt to be asserted, return early. 1029 * Otherwise complain about timeouts but keep going. 1030 * 1031 * It is expected that allocations (enabled flag bit) have been shut off 1032 * already (only applies to kernel contexts). 1033 */ 1034 int sc_restart(struct send_context *sc) 1035 { 1036 struct hfi1_devdata *dd = sc->dd; 1037 u64 reg; 1038 u32 loop; 1039 int count; 1040 1041 /* bounce off if not halted, or being free'd */ 1042 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) 1043 return -EINVAL; 1044 1045 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, 1046 sc->hw_context); 1047 1048 /* 1049 * Step 1: Wait for the context to actually halt. 1050 * 1051 * The error interrupt is asynchronous to actually setting halt 1052 * on the context. 1053 */ 1054 loop = 0; 1055 while (1) { 1056 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); 1057 if (reg & SC(STATUS_CTXT_HALTED_SMASK)) 1058 break; 1059 if (loop > 100) { 1060 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", 1061 __func__, sc->sw_index, sc->hw_context); 1062 return -ETIME; 1063 } 1064 loop++; 1065 udelay(1); 1066 } 1067 1068 /* 1069 * Step 2: Ensure no users are still trying to write to PIO. 1070 * 1071 * For kernel contexts, we have already turned off buffer allocation. 1072 * Now wait for the buffer count to go to zero. 1073 * 1074 * For user contexts, the user handling code has cut off write access 1075 * to the context's PIO pages before calling this routine and will 1076 * restore write access after this routine returns. 1077 */ 1078 if (sc->type != SC_USER) { 1079 /* kernel context */ 1080 loop = 0; 1081 while (1) { 1082 count = get_buffers_allocated(sc); 1083 if (count == 0) 1084 break; 1085 if (loop > 100) { 1086 dd_dev_err(dd, 1087 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", 1088 __func__, sc->sw_index, 1089 sc->hw_context, count); 1090 } 1091 loop++; 1092 udelay(1); 1093 } 1094 } 1095 1096 /* 1097 * Step 3: Wait for all packets to egress. 1098 * This is done while disabling the send context 1099 * 1100 * Step 4: Disable the context 1101 * 1102 * This is a superset of the halt. After the disable, the 1103 * errors can be cleared. 1104 */ 1105 sc_disable(sc); 1106 1107 /* 1108 * Step 5: Enable the context 1109 * 1110 * This enable will clear the halted flag and per-send context 1111 * error flags. 1112 */ 1113 return sc_enable(sc); 1114 } 1115 1116 /* 1117 * PIO freeze processing. To be called after the TXE block is fully frozen. 1118 * Go through all frozen send contexts and disable them. The contexts are 1119 * already stopped by the freeze. 1120 */ 1121 void pio_freeze(struct hfi1_devdata *dd) 1122 { 1123 struct send_context *sc; 1124 int i; 1125 1126 for (i = 0; i < dd->num_send_contexts; i++) { 1127 sc = dd->send_contexts[i].sc; 1128 /* 1129 * Don't disable unallocated, unfrozen, or user send contexts. 1130 * User send contexts will be disabled when the process 1131 * calls into the driver to reset its context. 1132 */ 1133 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1134 continue; 1135 1136 /* only need to disable, the context is already stopped */ 1137 sc_disable(sc); 1138 } 1139 } 1140 1141 /* 1142 * Unfreeze PIO for kernel send contexts. The precondition for calling this 1143 * is that all PIO send contexts have been disabled and the SPC freeze has 1144 * been cleared. Now perform the last step and re-enable each kernel context. 1145 * User (PSM) processing will occur when PSM calls into the kernel to 1146 * acknowledge the freeze. 1147 */ 1148 void pio_kernel_unfreeze(struct hfi1_devdata *dd) 1149 { 1150 struct send_context *sc; 1151 int i; 1152 1153 for (i = 0; i < dd->num_send_contexts; i++) { 1154 sc = dd->send_contexts[i].sc; 1155 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1156 continue; 1157 1158 sc_enable(sc); /* will clear the sc frozen flag */ 1159 } 1160 } 1161 1162 /* 1163 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. 1164 * Returns: 1165 * -ETIMEDOUT - if we wait too long 1166 * -EIO - if there was an error 1167 */ 1168 static int pio_init_wait_progress(struct hfi1_devdata *dd) 1169 { 1170 u64 reg; 1171 int max, count = 0; 1172 1173 /* max is the longest possible HW init time / delay */ 1174 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; 1175 while (1) { 1176 reg = read_csr(dd, SEND_PIO_INIT_CTXT); 1177 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK)) 1178 break; 1179 if (count >= max) 1180 return -ETIMEDOUT; 1181 udelay(5); 1182 count++; 1183 } 1184 1185 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; 1186 } 1187 1188 /* 1189 * Reset all of the send contexts to their power-on state. Used 1190 * only during manual init - no lock against sc_enable needed. 1191 */ 1192 void pio_reset_all(struct hfi1_devdata *dd) 1193 { 1194 int ret; 1195 1196 /* make sure the init engine is not busy */ 1197 ret = pio_init_wait_progress(dd); 1198 /* ignore any timeout */ 1199 if (ret == -EIO) { 1200 /* clear the error */ 1201 write_csr(dd, SEND_PIO_ERR_CLEAR, 1202 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); 1203 } 1204 1205 /* reset init all */ 1206 write_csr(dd, SEND_PIO_INIT_CTXT, 1207 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); 1208 udelay(2); 1209 ret = pio_init_wait_progress(dd); 1210 if (ret < 0) { 1211 dd_dev_err(dd, 1212 "PIO send context init %s while initializing all PIO blocks\n", 1213 ret == -ETIMEDOUT ? "is stuck" : "had an error"); 1214 } 1215 } 1216 1217 /* enable the context */ 1218 int sc_enable(struct send_context *sc) 1219 { 1220 u64 sc_ctrl, reg, pio; 1221 struct hfi1_devdata *dd; 1222 unsigned long flags; 1223 int ret = 0; 1224 1225 if (!sc) 1226 return -EINVAL; 1227 dd = sc->dd; 1228 1229 /* 1230 * Obtain the allocator lock to guard against any allocation 1231 * attempts (which should not happen prior to context being 1232 * enabled). On the release/disable side we don't need to 1233 * worry about locking since the releaser will not do anything 1234 * if the context accounting values have not changed. 1235 */ 1236 spin_lock_irqsave(&sc->alloc_lock, flags); 1237 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); 1238 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK))) 1239 goto unlock; /* already enabled */ 1240 1241 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ 1242 1243 *sc->hw_free = 0; 1244 sc->free = 0; 1245 sc->alloc_free = 0; 1246 sc->fill = 0; 1247 sc->fill_wrap = 0; 1248 sc->sr_head = 0; 1249 sc->sr_tail = 0; 1250 sc->flags = 0; 1251 /* the alloc lock insures no fast path allocation */ 1252 reset_buffers_allocated(sc); 1253 1254 /* 1255 * Clear all per-context errors. Some of these will be set when 1256 * we are re-enabling after a context halt. Now that the context 1257 * is disabled, the halt will not clear until after the PIO init 1258 * engine runs below. 1259 */ 1260 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); 1261 if (reg) 1262 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); 1263 1264 /* 1265 * The HW PIO initialization engine can handle only one init 1266 * request at a time. Serialize access to each device's engine. 1267 */ 1268 spin_lock(&dd->sc_init_lock); 1269 /* 1270 * Since access to this code block is serialized and 1271 * each access waits for the initialization to complete 1272 * before releasing the lock, the PIO initialization engine 1273 * should not be in use, so we don't have to wait for the 1274 * InProgress bit to go down. 1275 */ 1276 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << 1277 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) | 1278 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK; 1279 write_csr(dd, SEND_PIO_INIT_CTXT, pio); 1280 /* 1281 * Wait until the engine is done. Give the chip the required time 1282 * so, hopefully, we read the register just once. 1283 */ 1284 udelay(2); 1285 ret = pio_init_wait_progress(dd); 1286 spin_unlock(&dd->sc_init_lock); 1287 if (ret) { 1288 dd_dev_err(dd, 1289 "sctxt%u(%u): Context not enabled due to init failure %d\n", 1290 sc->sw_index, sc->hw_context, ret); 1291 goto unlock; 1292 } 1293 1294 /* 1295 * All is well. Enable the context. 1296 */ 1297 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK); 1298 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); 1299 /* 1300 * Read SendCtxtCtrl to force the write out and prevent a timing 1301 * hazard where a PIO write may reach the context before the enable. 1302 */ 1303 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); 1304 sc->flags |= SCF_ENABLED; 1305 1306 unlock: 1307 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1308 1309 return ret; 1310 } 1311 1312 /* force a credit return on the context */ 1313 void sc_return_credits(struct send_context *sc) 1314 { 1315 if (!sc) 1316 return; 1317 1318 /* a 0->1 transition schedules a credit return */ 1319 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 1320 SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); 1321 /* 1322 * Ensure that the write is flushed and the credit return is 1323 * scheduled. We care more about the 0 -> 1 transition. 1324 */ 1325 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); 1326 /* set back to 0 for next time */ 1327 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); 1328 } 1329 1330 /* allow all in-flight packets to drain on the context */ 1331 void sc_flush(struct send_context *sc) 1332 { 1333 if (!sc) 1334 return; 1335 1336 sc_wait_for_packet_egress(sc, 1); 1337 } 1338 1339 /* drop all packets on the context, no waiting until they are sent */ 1340 void sc_drop(struct send_context *sc) 1341 { 1342 if (!sc) 1343 return; 1344 1345 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", 1346 __func__, sc->sw_index, sc->hw_context); 1347 } 1348 1349 /* 1350 * Start the software reaction to a context halt or SPC freeze: 1351 * - mark the context as halted or frozen 1352 * - stop buffer allocations 1353 * 1354 * Called from the error interrupt. Other work is deferred until 1355 * out of the interrupt. 1356 */ 1357 void sc_stop(struct send_context *sc, int flag) 1358 { 1359 unsigned long flags; 1360 1361 /* mark the context */ 1362 sc->flags |= flag; 1363 1364 /* stop buffer allocations */ 1365 spin_lock_irqsave(&sc->alloc_lock, flags); 1366 sc->flags &= ~SCF_ENABLED; 1367 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1368 wake_up(&sc->halt_wait); 1369 } 1370 1371 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32)) 1372 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS) 1373 1374 /* 1375 * The send context buffer "allocator". 1376 * 1377 * @sc: the PIO send context we are allocating from 1378 * @len: length of whole packet - including PBC - in dwords 1379 * @cb: optional callback to call when the buffer is finished sending 1380 * @arg: argument for cb 1381 * 1382 * Return a pointer to a PIO buffer if successful, NULL if not enough room. 1383 */ 1384 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, 1385 pio_release_cb cb, void *arg) 1386 { 1387 struct pio_buf *pbuf = NULL; 1388 unsigned long flags; 1389 unsigned long avail; 1390 unsigned long blocks = dwords_to_blocks(dw_len); 1391 u32 fill_wrap; 1392 int trycount = 0; 1393 u32 head, next; 1394 1395 spin_lock_irqsave(&sc->alloc_lock, flags); 1396 if (!(sc->flags & SCF_ENABLED)) { 1397 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1398 goto done; 1399 } 1400 1401 retry: 1402 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); 1403 if (blocks > avail) { 1404 /* not enough room */ 1405 if (unlikely(trycount)) { /* already tried to get more room */ 1406 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1407 goto done; 1408 } 1409 /* copy from receiver cache line and recalculate */ 1410 sc->alloc_free = READ_ONCE(sc->free); 1411 avail = 1412 (unsigned long)sc->credits - 1413 (sc->fill - sc->alloc_free); 1414 if (blocks > avail) { 1415 /* still no room, actively update */ 1416 sc_release_update(sc); 1417 sc->alloc_free = READ_ONCE(sc->free); 1418 trycount++; 1419 goto retry; 1420 } 1421 } 1422 1423 /* there is enough room */ 1424 1425 preempt_disable(); 1426 this_cpu_inc(*sc->buffers_allocated); 1427 1428 /* read this once */ 1429 head = sc->sr_head; 1430 1431 /* "allocate" the buffer */ 1432 sc->fill += blocks; 1433 fill_wrap = sc->fill_wrap; 1434 sc->fill_wrap += blocks; 1435 if (sc->fill_wrap >= sc->credits) 1436 sc->fill_wrap = sc->fill_wrap - sc->credits; 1437 1438 /* 1439 * Fill the parts that the releaser looks at before moving the head. 1440 * The only necessary piece is the sent_at field. The credits 1441 * we have just allocated cannot have been returned yet, so the 1442 * cb and arg will not be looked at for a "while". Put them 1443 * on this side of the memory barrier anyway. 1444 */ 1445 pbuf = &sc->sr[head].pbuf; 1446 pbuf->sent_at = sc->fill; 1447 pbuf->cb = cb; 1448 pbuf->arg = arg; 1449 pbuf->sc = sc; /* could be filled in at sc->sr init time */ 1450 /* make sure this is in memory before updating the head */ 1451 1452 /* calculate next head index, do not store */ 1453 next = head + 1; 1454 if (next >= sc->sr_size) 1455 next = 0; 1456 /* 1457 * update the head - must be last! - the releaser can look at fields 1458 * in pbuf once we move the head 1459 */ 1460 smp_wmb(); 1461 sc->sr_head = next; 1462 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1463 1464 /* finish filling in the buffer outside the lock */ 1465 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; 1466 pbuf->end = sc->base_addr + sc->size; 1467 pbuf->qw_written = 0; 1468 pbuf->carry_bytes = 0; 1469 pbuf->carry.val64 = 0; 1470 done: 1471 return pbuf; 1472 } 1473 1474 /* 1475 * There are at least two entities that can turn on credit return 1476 * interrupts and they can overlap. Avoid problems by implementing 1477 * a count scheme that is enforced by a lock. The lock is needed because 1478 * the count and CSR write must be paired. 1479 */ 1480 1481 /* 1482 * Start credit return interrupts. This is managed by a count. If already 1483 * on, just increment the count. 1484 */ 1485 void sc_add_credit_return_intr(struct send_context *sc) 1486 { 1487 unsigned long flags; 1488 1489 /* lock must surround both the count change and the CSR update */ 1490 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 1491 if (sc->credit_intr_count == 0) { 1492 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); 1493 write_kctxt_csr(sc->dd, sc->hw_context, 1494 SC(CREDIT_CTRL), sc->credit_ctrl); 1495 } 1496 sc->credit_intr_count++; 1497 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 1498 } 1499 1500 /* 1501 * Stop credit return interrupts. This is managed by a count. Decrement the 1502 * count, if the last user, then turn the credit interrupts off. 1503 */ 1504 void sc_del_credit_return_intr(struct send_context *sc) 1505 { 1506 unsigned long flags; 1507 1508 WARN_ON(sc->credit_intr_count == 0); 1509 1510 /* lock must surround both the count change and the CSR update */ 1511 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); 1512 sc->credit_intr_count--; 1513 if (sc->credit_intr_count == 0) { 1514 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); 1515 write_kctxt_csr(sc->dd, sc->hw_context, 1516 SC(CREDIT_CTRL), sc->credit_ctrl); 1517 } 1518 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); 1519 } 1520 1521 /* 1522 * The caller must be careful when calling this. All needint calls 1523 * must be paired with !needint. 1524 */ 1525 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) 1526 { 1527 if (needint) 1528 sc_add_credit_return_intr(sc); 1529 else 1530 sc_del_credit_return_intr(sc); 1531 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); 1532 if (needint) { 1533 mmiowb(); 1534 sc_return_credits(sc); 1535 } 1536 } 1537 1538 /** 1539 * sc_piobufavail - callback when a PIO buffer is available 1540 * @sc: the send context 1541 * 1542 * This is called from the interrupt handler when a PIO buffer is 1543 * available after hfi1_verbs_send() returned an error that no buffers were 1544 * available. Disable the interrupt if there are no more QPs waiting. 1545 */ 1546 static void sc_piobufavail(struct send_context *sc) 1547 { 1548 struct hfi1_devdata *dd = sc->dd; 1549 struct hfi1_ibdev *dev = &dd->verbs_dev; 1550 struct list_head *list; 1551 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; 1552 struct rvt_qp *qp; 1553 struct hfi1_qp_priv *priv; 1554 unsigned long flags; 1555 uint i, n = 0, max_idx = 0; 1556 u8 max_starved_cnt = 0; 1557 1558 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && 1559 dd->send_contexts[sc->sw_index].type != SC_VL15) 1560 return; 1561 list = &sc->piowait; 1562 /* 1563 * Note: checking that the piowait list is empty and clearing 1564 * the buffer available interrupt needs to be atomic or we 1565 * could end up with QPs on the wait list with the interrupt 1566 * disabled. 1567 */ 1568 write_seqlock_irqsave(&dev->iowait_lock, flags); 1569 while (!list_empty(list)) { 1570 struct iowait *wait; 1571 1572 if (n == ARRAY_SIZE(qps)) 1573 break; 1574 wait = list_first_entry(list, struct iowait, list); 1575 qp = iowait_to_qp(wait); 1576 priv = qp->priv; 1577 list_del_init(&priv->s_iowait.list); 1578 priv->s_iowait.lock = NULL; 1579 iowait_starve_find_max(wait, &max_starved_cnt, n, &max_idx); 1580 /* refcount held until actual wake up */ 1581 qps[n++] = qp; 1582 } 1583 /* 1584 * If there had been waiters and there are more 1585 * insure that we redo the force to avoid a potential hang. 1586 */ 1587 if (n) { 1588 hfi1_sc_wantpiobuf_intr(sc, 0); 1589 if (!list_empty(list)) 1590 hfi1_sc_wantpiobuf_intr(sc, 1); 1591 } 1592 write_sequnlock_irqrestore(&dev->iowait_lock, flags); 1593 1594 /* Wake up the most starved one first */ 1595 if (n) 1596 hfi1_qp_wakeup(qps[max_idx], 1597 RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN); 1598 for (i = 0; i < n; i++) 1599 if (i != max_idx) 1600 hfi1_qp_wakeup(qps[i], 1601 RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN); 1602 } 1603 1604 /* translate a send credit update to a bit code of reasons */ 1605 static inline int fill_code(u64 hw_free) 1606 { 1607 int code = 0; 1608 1609 if (hw_free & CR_STATUS_SMASK) 1610 code |= PRC_STATUS_ERR; 1611 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK) 1612 code |= PRC_PBC; 1613 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK) 1614 code |= PRC_THRESHOLD; 1615 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK) 1616 code |= PRC_FILL_ERR; 1617 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK) 1618 code |= PRC_SC_DISABLE; 1619 return code; 1620 } 1621 1622 /* use the jiffies compare to get the wrap right */ 1623 #define sent_before(a, b) time_before(a, b) /* a < b */ 1624 1625 /* 1626 * The send context buffer "releaser". 1627 */ 1628 void sc_release_update(struct send_context *sc) 1629 { 1630 struct pio_buf *pbuf; 1631 u64 hw_free; 1632 u32 head, tail; 1633 unsigned long old_free; 1634 unsigned long free; 1635 unsigned long extra; 1636 unsigned long flags; 1637 int code; 1638 1639 if (!sc) 1640 return; 1641 1642 spin_lock_irqsave(&sc->release_lock, flags); 1643 /* update free */ 1644 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ 1645 old_free = sc->free; 1646 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT) 1647 - (old_free & CR_COUNTER_MASK)) 1648 & CR_COUNTER_MASK; 1649 free = old_free + extra; 1650 trace_hfi1_piofree(sc, extra); 1651 1652 /* call sent buffer callbacks */ 1653 code = -1; /* code not yet set */ 1654 head = READ_ONCE(sc->sr_head); /* snapshot the head */ 1655 tail = sc->sr_tail; 1656 while (head != tail) { 1657 pbuf = &sc->sr[tail].pbuf; 1658 1659 if (sent_before(free, pbuf->sent_at)) { 1660 /* not sent yet */ 1661 break; 1662 } 1663 if (pbuf->cb) { 1664 if (code < 0) /* fill in code on first user */ 1665 code = fill_code(hw_free); 1666 (*pbuf->cb)(pbuf->arg, code); 1667 } 1668 1669 tail++; 1670 if (tail >= sc->sr_size) 1671 tail = 0; 1672 } 1673 sc->sr_tail = tail; 1674 /* make sure tail is updated before free */ 1675 smp_wmb(); 1676 sc->free = free; 1677 spin_unlock_irqrestore(&sc->release_lock, flags); 1678 sc_piobufavail(sc); 1679 } 1680 1681 /* 1682 * Send context group releaser. Argument is the send context that caused 1683 * the interrupt. Called from the send context interrupt handler. 1684 * 1685 * Call release on all contexts in the group. 1686 * 1687 * This routine takes the sc_lock without an irqsave because it is only 1688 * called from an interrupt handler. Adjust if that changes. 1689 */ 1690 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) 1691 { 1692 struct send_context *sc; 1693 u32 sw_index; 1694 u32 gc, gc_end; 1695 1696 spin_lock(&dd->sc_lock); 1697 sw_index = dd->hw_to_sw[hw_context]; 1698 if (unlikely(sw_index >= dd->num_send_contexts)) { 1699 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", 1700 __func__, hw_context, sw_index); 1701 goto done; 1702 } 1703 sc = dd->send_contexts[sw_index].sc; 1704 if (unlikely(!sc)) 1705 goto done; 1706 1707 gc = group_context(hw_context, sc->group); 1708 gc_end = gc + group_size(sc->group); 1709 for (; gc < gc_end; gc++) { 1710 sw_index = dd->hw_to_sw[gc]; 1711 if (unlikely(sw_index >= dd->num_send_contexts)) { 1712 dd_dev_err(dd, 1713 "%s: invalid hw (%u) to sw (%u) mapping\n", 1714 __func__, hw_context, sw_index); 1715 continue; 1716 } 1717 sc_release_update(dd->send_contexts[sw_index].sc); 1718 } 1719 done: 1720 spin_unlock(&dd->sc_lock); 1721 } 1722 1723 /* 1724 * pio_select_send_context_vl() - select send context 1725 * @dd: devdata 1726 * @selector: a spreading factor 1727 * @vl: this vl 1728 * 1729 * This function returns a send context based on the selector and a vl. 1730 * The mapping fields are protected by RCU 1731 */ 1732 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, 1733 u32 selector, u8 vl) 1734 { 1735 struct pio_vl_map *m; 1736 struct pio_map_elem *e; 1737 struct send_context *rval; 1738 1739 /* 1740 * NOTE This should only happen if SC->VL changed after the initial 1741 * checks on the QP/AH 1742 * Default will return VL0's send context below 1743 */ 1744 if (unlikely(vl >= num_vls)) { 1745 rval = NULL; 1746 goto done; 1747 } 1748 1749 rcu_read_lock(); 1750 m = rcu_dereference(dd->pio_map); 1751 if (unlikely(!m)) { 1752 rcu_read_unlock(); 1753 return dd->vld[0].sc; 1754 } 1755 e = m->map[vl & m->mask]; 1756 rval = e->ksc[selector & e->mask]; 1757 rcu_read_unlock(); 1758 1759 done: 1760 rval = !rval ? dd->vld[0].sc : rval; 1761 return rval; 1762 } 1763 1764 /* 1765 * pio_select_send_context_sc() - select send context 1766 * @dd: devdata 1767 * @selector: a spreading factor 1768 * @sc5: the 5 bit sc 1769 * 1770 * This function returns an send context based on the selector and an sc 1771 */ 1772 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, 1773 u32 selector, u8 sc5) 1774 { 1775 u8 vl = sc_to_vlt(dd, sc5); 1776 1777 return pio_select_send_context_vl(dd, selector, vl); 1778 } 1779 1780 /* 1781 * Free the indicated map struct 1782 */ 1783 static void pio_map_free(struct pio_vl_map *m) 1784 { 1785 int i; 1786 1787 for (i = 0; m && i < m->actual_vls; i++) 1788 kfree(m->map[i]); 1789 kfree(m); 1790 } 1791 1792 /* 1793 * Handle RCU callback 1794 */ 1795 static void pio_map_rcu_callback(struct rcu_head *list) 1796 { 1797 struct pio_vl_map *m = container_of(list, struct pio_vl_map, list); 1798 1799 pio_map_free(m); 1800 } 1801 1802 /* 1803 * Set credit return threshold for the kernel send context 1804 */ 1805 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) 1806 { 1807 u32 thres; 1808 1809 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], 1810 50), 1811 sc_mtu_to_threshold(dd->kernel_send_context[scontext], 1812 dd->vld[i].mtu, 1813 dd->rcd[0]->rcvhdrqentsize)); 1814 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); 1815 } 1816 1817 /* 1818 * pio_map_init - called when #vls change 1819 * @dd: hfi1_devdata 1820 * @port: port number 1821 * @num_vls: number of vls 1822 * @vl_scontexts: per vl send context mapping (optional) 1823 * 1824 * This routine changes the mapping based on the number of vls. 1825 * 1826 * vl_scontexts is used to specify a non-uniform vl/send context 1827 * loading. NULL implies auto computing the loading and giving each 1828 * VL an uniform distribution of send contexts per VL. 1829 * 1830 * The auto algorithm computers the sc_per_vl and the number of extra 1831 * send contexts. Any extra send contexts are added from the last VL 1832 * on down 1833 * 1834 * rcu locking is used here to control access to the mapping fields. 1835 * 1836 * If either the num_vls or num_send_contexts are non-power of 2, the 1837 * array sizes in the struct pio_vl_map and the struct pio_map_elem are 1838 * rounded up to the next highest power of 2 and the first entry is 1839 * reused in a round robin fashion. 1840 * 1841 * If an error occurs the map change is not done and the mapping is not 1842 * chaged. 1843 * 1844 */ 1845 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) 1846 { 1847 int i, j; 1848 int extra, sc_per_vl; 1849 int scontext = 1; 1850 int num_kernel_send_contexts = 0; 1851 u8 lvl_scontexts[OPA_MAX_VLS]; 1852 struct pio_vl_map *oldmap, *newmap; 1853 1854 if (!vl_scontexts) { 1855 for (i = 0; i < dd->num_send_contexts; i++) 1856 if (dd->send_contexts[i].type == SC_KERNEL) 1857 num_kernel_send_contexts++; 1858 /* truncate divide */ 1859 sc_per_vl = num_kernel_send_contexts / num_vls; 1860 /* extras */ 1861 extra = num_kernel_send_contexts % num_vls; 1862 vl_scontexts = lvl_scontexts; 1863 /* add extras from last vl down */ 1864 for (i = num_vls - 1; i >= 0; i--, extra--) 1865 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); 1866 } 1867 /* build new map */ 1868 newmap = kzalloc(sizeof(*newmap) + 1869 roundup_pow_of_two(num_vls) * 1870 sizeof(struct pio_map_elem *), 1871 GFP_KERNEL); 1872 if (!newmap) 1873 goto bail; 1874 newmap->actual_vls = num_vls; 1875 newmap->vls = roundup_pow_of_two(num_vls); 1876 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1877 for (i = 0; i < newmap->vls; i++) { 1878 /* save for wrap around */ 1879 int first_scontext = scontext; 1880 1881 if (i < newmap->actual_vls) { 1882 int sz = roundup_pow_of_two(vl_scontexts[i]); 1883 1884 /* only allocate once */ 1885 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + 1886 sz * sizeof(struct 1887 send_context *), 1888 GFP_KERNEL); 1889 if (!newmap->map[i]) 1890 goto bail; 1891 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1892 /* 1893 * assign send contexts and 1894 * adjust credit return threshold 1895 */ 1896 for (j = 0; j < sz; j++) { 1897 if (dd->kernel_send_context[scontext]) { 1898 newmap->map[i]->ksc[j] = 1899 dd->kernel_send_context[scontext]; 1900 set_threshold(dd, scontext, i); 1901 } 1902 if (++scontext >= first_scontext + 1903 vl_scontexts[i]) 1904 /* wrap back to first send context */ 1905 scontext = first_scontext; 1906 } 1907 } else { 1908 /* just re-use entry without allocating */ 1909 newmap->map[i] = newmap->map[i % num_vls]; 1910 } 1911 scontext = first_scontext + vl_scontexts[i]; 1912 } 1913 /* newmap in hand, save old map */ 1914 spin_lock_irq(&dd->pio_map_lock); 1915 oldmap = rcu_dereference_protected(dd->pio_map, 1916 lockdep_is_held(&dd->pio_map_lock)); 1917 1918 /* publish newmap */ 1919 rcu_assign_pointer(dd->pio_map, newmap); 1920 1921 spin_unlock_irq(&dd->pio_map_lock); 1922 /* success, free any old map after grace period */ 1923 if (oldmap) 1924 call_rcu(&oldmap->list, pio_map_rcu_callback); 1925 return 0; 1926 bail: 1927 /* free any partial allocation */ 1928 pio_map_free(newmap); 1929 return -ENOMEM; 1930 } 1931 1932 void free_pio_map(struct hfi1_devdata *dd) 1933 { 1934 /* Free PIO map if allocated */ 1935 if (rcu_access_pointer(dd->pio_map)) { 1936 spin_lock_irq(&dd->pio_map_lock); 1937 pio_map_free(rcu_access_pointer(dd->pio_map)); 1938 RCU_INIT_POINTER(dd->pio_map, NULL); 1939 spin_unlock_irq(&dd->pio_map_lock); 1940 synchronize_rcu(); 1941 } 1942 kfree(dd->kernel_send_context); 1943 dd->kernel_send_context = NULL; 1944 } 1945 1946 int init_pervl_scs(struct hfi1_devdata *dd) 1947 { 1948 int i; 1949 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ 1950 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ 1951 u32 ctxt; 1952 struct hfi1_pportdata *ppd = dd->pport; 1953 1954 dd->vld[15].sc = sc_alloc(dd, SC_VL15, 1955 dd->rcd[0]->rcvhdrqentsize, dd->node); 1956 if (!dd->vld[15].sc) 1957 return -ENOMEM; 1958 1959 hfi1_init_ctxt(dd->vld[15].sc); 1960 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); 1961 1962 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, 1963 sizeof(struct send_context *), 1964 GFP_KERNEL, dd->node); 1965 if (!dd->kernel_send_context) 1966 goto freesc15; 1967 1968 dd->kernel_send_context[0] = dd->vld[15].sc; 1969 1970 for (i = 0; i < num_vls; i++) { 1971 /* 1972 * Since this function does not deal with a specific 1973 * receive context but we need the RcvHdrQ entry size, 1974 * use the size from rcd[0]. It is guaranteed to be 1975 * valid at this point and will remain the same for all 1976 * receive contexts. 1977 */ 1978 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, 1979 dd->rcd[0]->rcvhdrqentsize, dd->node); 1980 if (!dd->vld[i].sc) 1981 goto nomem; 1982 dd->kernel_send_context[i + 1] = dd->vld[i].sc; 1983 hfi1_init_ctxt(dd->vld[i].sc); 1984 /* non VL15 start with the max MTU */ 1985 dd->vld[i].mtu = hfi1_max_mtu; 1986 } 1987 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { 1988 dd->kernel_send_context[i + 1] = 1989 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); 1990 if (!dd->kernel_send_context[i + 1]) 1991 goto nomem; 1992 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); 1993 } 1994 1995 sc_enable(dd->vld[15].sc); 1996 ctxt = dd->vld[15].sc->hw_context; 1997 mask = all_vl_mask & ~(1LL << 15); 1998 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 1999 dd_dev_info(dd, 2000 "Using send context %u(%u) for VL15\n", 2001 dd->vld[15].sc->sw_index, ctxt); 2002 2003 for (i = 0; i < num_vls; i++) { 2004 sc_enable(dd->vld[i].sc); 2005 ctxt = dd->vld[i].sc->hw_context; 2006 mask = all_vl_mask & ~(data_vls_mask); 2007 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 2008 } 2009 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { 2010 sc_enable(dd->kernel_send_context[i + 1]); 2011 ctxt = dd->kernel_send_context[i + 1]->hw_context; 2012 mask = all_vl_mask & ~(data_vls_mask); 2013 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); 2014 } 2015 2016 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) 2017 goto nomem; 2018 return 0; 2019 2020 nomem: 2021 for (i = 0; i < num_vls; i++) { 2022 sc_free(dd->vld[i].sc); 2023 dd->vld[i].sc = NULL; 2024 } 2025 2026 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) 2027 sc_free(dd->kernel_send_context[i + 1]); 2028 2029 kfree(dd->kernel_send_context); 2030 dd->kernel_send_context = NULL; 2031 2032 freesc15: 2033 sc_free(dd->vld[15].sc); 2034 return -ENOMEM; 2035 } 2036 2037 int init_credit_return(struct hfi1_devdata *dd) 2038 { 2039 int ret; 2040 int i; 2041 2042 dd->cr_base = kcalloc( 2043 node_affinity.num_possible_nodes, 2044 sizeof(struct credit_return_base), 2045 GFP_KERNEL); 2046 if (!dd->cr_base) { 2047 ret = -ENOMEM; 2048 goto done; 2049 } 2050 for_each_node_with_cpus(i) { 2051 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2052 2053 set_dev_node(&dd->pcidev->dev, i); 2054 dd->cr_base[i].va = dma_zalloc_coherent( 2055 &dd->pcidev->dev, 2056 bytes, 2057 &dd->cr_base[i].dma, 2058 GFP_KERNEL); 2059 if (!dd->cr_base[i].va) { 2060 set_dev_node(&dd->pcidev->dev, dd->node); 2061 dd_dev_err(dd, 2062 "Unable to allocate credit return DMA range for NUMA %d\n", 2063 i); 2064 ret = -ENOMEM; 2065 goto done; 2066 } 2067 } 2068 set_dev_node(&dd->pcidev->dev, dd->node); 2069 2070 ret = 0; 2071 done: 2072 return ret; 2073 } 2074 2075 void free_credit_return(struct hfi1_devdata *dd) 2076 { 2077 int i; 2078 2079 if (!dd->cr_base) 2080 return; 2081 for (i = 0; i < node_affinity.num_possible_nodes; i++) { 2082 if (dd->cr_base[i].va) { 2083 dma_free_coherent(&dd->pcidev->dev, 2084 TXE_NUM_CONTEXTS * 2085 sizeof(struct credit_return), 2086 dd->cr_base[i].va, 2087 dd->cr_base[i].dma); 2088 } 2089 } 2090 kfree(dd->cr_base); 2091 dd->cr_base = NULL; 2092 } 2093