1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/idr.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <linux/numa.h> 58 #include <rdma/rdma_vt.h> 59 60 #include "hfi.h" 61 #include "device.h" 62 #include "common.h" 63 #include "trace.h" 64 #include "mad.h" 65 #include "sdma.h" 66 #include "debugfs.h" 67 #include "verbs.h" 68 #include "aspm.h" 69 #include "affinity.h" 70 #include "vnic.h" 71 #include "exp_rcv.h" 72 73 #undef pr_fmt 74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 75 76 /* 77 * min buffers we want to have per context, after driver 78 */ 79 #define HFI1_MIN_USER_CTXT_BUFCNT 7 80 81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85 86 #define NUM_IB_PORTS 1 87 88 /* 89 * Number of user receive contexts we are configured to use (to allow for more 90 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 91 */ 92 int num_user_contexts = -1; 93 module_param_named(num_user_contexts, num_user_contexts, int, 0444); 94 MODULE_PARM_DESC( 95 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 96 97 uint krcvqs[RXE_NUM_DATA_VL]; 98 int krcvqsset; 99 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 100 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 101 102 /* computed based on above array */ 103 unsigned long n_krcvqs; 104 105 static unsigned hfi1_rcvarr_split = 25; 106 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 107 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 108 109 static uint eager_buffer_size = (8 << 20); /* 8MB */ 110 module_param(eager_buffer_size, uint, S_IRUGO); 111 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 112 113 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 114 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 115 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 116 117 static uint hfi1_hdrq_entsize = 32; 118 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); 119 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); 120 121 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 122 module_param(user_credit_return_threshold, uint, S_IRUGO); 123 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 124 125 static inline u64 encode_rcv_header_entry_size(u16 size); 126 127 static struct idr hfi1_unit_table; 128 129 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 130 struct hfi1_pportdata *ppd) 131 { 132 struct hfi1_ctxtdata *rcd; 133 int ret; 134 135 /* Control context has to be always 0 */ 136 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 137 138 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 139 if (ret < 0) { 140 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 141 return ret; 142 } 143 144 /* 145 * Set up the kernel context flags here and now because they use 146 * default values for all receive side memories. User contexts will 147 * be handled as they are created. 148 */ 149 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 150 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 151 HFI1_CAP_KGET(NODROP_EGR_FULL) | 152 HFI1_CAP_KGET(DMA_RTAIL); 153 154 /* Control context must use DMA_RTAIL */ 155 if (rcd->ctxt == HFI1_CTRL_CTXT) 156 rcd->flags |= HFI1_CAP_DMA_RTAIL; 157 rcd->seq_cnt = 1; 158 159 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 160 if (!rcd->sc) { 161 dd_dev_err(dd, "Kernel send context allocation failed\n"); 162 return -ENOMEM; 163 } 164 hfi1_init_ctxt(rcd->sc); 165 166 return 0; 167 } 168 169 /* 170 * Create the receive context array and one or more kernel contexts 171 */ 172 int hfi1_create_kctxts(struct hfi1_devdata *dd) 173 { 174 u16 i; 175 int ret; 176 177 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 178 GFP_KERNEL, dd->node); 179 if (!dd->rcd) 180 return -ENOMEM; 181 182 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 183 ret = hfi1_create_kctxt(dd, dd->pport); 184 if (ret) 185 goto bail; 186 } 187 188 return 0; 189 bail: 190 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 191 hfi1_free_ctxt(dd->rcd[i]); 192 193 /* All the contexts should be freed, free the array */ 194 kfree(dd->rcd); 195 dd->rcd = NULL; 196 return ret; 197 } 198 199 /* 200 * Helper routines for the receive context reference count (rcd and uctxt). 201 */ 202 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 203 { 204 kref_init(&rcd->kref); 205 } 206 207 /** 208 * hfi1_rcd_free - When reference is zero clean up. 209 * @kref: pointer to an initialized rcd data structure 210 * 211 */ 212 static void hfi1_rcd_free(struct kref *kref) 213 { 214 unsigned long flags; 215 struct hfi1_ctxtdata *rcd = 216 container_of(kref, struct hfi1_ctxtdata, kref); 217 218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 219 rcd->dd->rcd[rcd->ctxt] = NULL; 220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 221 222 hfi1_free_ctxtdata(rcd->dd, rcd); 223 224 kfree(rcd); 225 } 226 227 /** 228 * hfi1_rcd_put - decrement reference for rcd 229 * @rcd: pointer to an initialized rcd data structure 230 * 231 * Use this to put a reference after the init. 232 */ 233 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 234 { 235 if (rcd) 236 return kref_put(&rcd->kref, hfi1_rcd_free); 237 238 return 0; 239 } 240 241 /** 242 * hfi1_rcd_get - increment reference for rcd 243 * @rcd: pointer to an initialized rcd data structure 244 * 245 * Use this to get a reference after the init. 246 * 247 * Return : reflect kref_get_unless_zero(), which returns non-zero on 248 * increment, otherwise 0. 249 */ 250 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 251 { 252 return kref_get_unless_zero(&rcd->kref); 253 } 254 255 /** 256 * allocate_rcd_index - allocate an rcd index from the rcd array 257 * @dd: pointer to a valid devdata structure 258 * @rcd: rcd data structure to assign 259 * @index: pointer to index that is allocated 260 * 261 * Find an empty index in the rcd array, and assign the given rcd to it. 262 * If the array is full, we are EBUSY. 263 * 264 */ 265 static int allocate_rcd_index(struct hfi1_devdata *dd, 266 struct hfi1_ctxtdata *rcd, u16 *index) 267 { 268 unsigned long flags; 269 u16 ctxt; 270 271 spin_lock_irqsave(&dd->uctxt_lock, flags); 272 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 273 if (!dd->rcd[ctxt]) 274 break; 275 276 if (ctxt < dd->num_rcv_contexts) { 277 rcd->ctxt = ctxt; 278 dd->rcd[ctxt] = rcd; 279 hfi1_rcd_init(rcd); 280 } 281 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 282 283 if (ctxt >= dd->num_rcv_contexts) 284 return -EBUSY; 285 286 *index = ctxt; 287 288 return 0; 289 } 290 291 /** 292 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 293 * array 294 * @dd: pointer to a valid devdata structure 295 * @ctxt: the index of an possilbe rcd 296 * 297 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 298 * ctxt index is valid. 299 * 300 * The caller is responsible for making the _put(). 301 * 302 */ 303 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 304 u16 ctxt) 305 { 306 if (ctxt < dd->num_rcv_contexts) 307 return hfi1_rcd_get_by_index(dd, ctxt); 308 309 return NULL; 310 } 311 312 /** 313 * hfi1_rcd_get_by_index 314 * @dd: pointer to a valid devdata structure 315 * @ctxt: the index of an possilbe rcd 316 * 317 * We need to protect access to the rcd array. If access is needed to 318 * one or more index, get the protecting spinlock and then increment the 319 * kref. 320 * 321 * The caller is responsible for making the _put(). 322 * 323 */ 324 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 325 { 326 unsigned long flags; 327 struct hfi1_ctxtdata *rcd = NULL; 328 329 spin_lock_irqsave(&dd->uctxt_lock, flags); 330 if (dd->rcd[ctxt]) { 331 rcd = dd->rcd[ctxt]; 332 if (!hfi1_rcd_get(rcd)) 333 rcd = NULL; 334 } 335 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 336 337 return rcd; 338 } 339 340 /* 341 * Common code for user and kernel context create and setup. 342 * NOTE: the initial kref is done here (hf1_rcd_init()). 343 */ 344 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 345 struct hfi1_ctxtdata **context) 346 { 347 struct hfi1_devdata *dd = ppd->dd; 348 struct hfi1_ctxtdata *rcd; 349 unsigned kctxt_ngroups = 0; 350 u32 base; 351 352 if (dd->rcv_entries.nctxt_extra > 353 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 354 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 355 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 356 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 357 if (rcd) { 358 u32 rcvtids, max_entries; 359 u16 ctxt; 360 int ret; 361 362 ret = allocate_rcd_index(dd, rcd, &ctxt); 363 if (ret) { 364 *context = NULL; 365 kfree(rcd); 366 return ret; 367 } 368 369 INIT_LIST_HEAD(&rcd->qp_wait_list); 370 hfi1_exp_tid_group_init(rcd); 371 rcd->ppd = ppd; 372 rcd->dd = dd; 373 rcd->numa_id = numa; 374 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 375 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; 376 377 mutex_init(&rcd->exp_mutex); 378 spin_lock_init(&rcd->exp_lock); 379 INIT_LIST_HEAD(&rcd->flow_queue.queue_head); 380 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); 381 382 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 383 384 /* 385 * Calculate the context's RcvArray entry starting point. 386 * We do this here because we have to take into account all 387 * the RcvArray entries that previous context would have 388 * taken and we have to account for any extra groups assigned 389 * to the static (kernel) or dynamic (vnic/user) contexts. 390 */ 391 if (ctxt < dd->first_dyn_alloc_ctxt) { 392 if (ctxt < kctxt_ngroups) { 393 base = ctxt * (dd->rcv_entries.ngroups + 1); 394 rcd->rcv_array_groups++; 395 } else { 396 base = kctxt_ngroups + 397 (ctxt * dd->rcv_entries.ngroups); 398 } 399 } else { 400 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 401 402 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 403 kctxt_ngroups); 404 if (ct < dd->rcv_entries.nctxt_extra) { 405 base += ct * (dd->rcv_entries.ngroups + 1); 406 rcd->rcv_array_groups++; 407 } else { 408 base += dd->rcv_entries.nctxt_extra + 409 (ct * dd->rcv_entries.ngroups); 410 } 411 } 412 rcd->eager_base = base * dd->rcv_entries.group_size; 413 414 rcd->rcvhdrq_cnt = rcvhdrcnt; 415 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 416 rcd->rhf_offset = 417 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); 418 /* 419 * Simple Eager buffer allocation: we have already pre-allocated 420 * the number of RcvArray entry groups. Each ctxtdata structure 421 * holds the number of groups for that context. 422 * 423 * To follow CSR requirements and maintain cacheline alignment, 424 * make sure all sizes and bases are multiples of group_size. 425 * 426 * The expected entry count is what is left after assigning 427 * eager. 428 */ 429 max_entries = rcd->rcv_array_groups * 430 dd->rcv_entries.group_size; 431 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 432 rcd->egrbufs.count = round_down(rcvtids, 433 dd->rcv_entries.group_size); 434 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 435 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 436 rcd->ctxt); 437 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 438 } 439 hfi1_cdbg(PROC, 440 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 441 rcd->ctxt, rcd->egrbufs.count); 442 443 /* 444 * Allocate array that will hold the eager buffer accounting 445 * data. 446 * This will allocate the maximum possible buffer count based 447 * on the value of the RcvArray split parameter. 448 * The resulting value will be rounded down to the closest 449 * multiple of dd->rcv_entries.group_size. 450 */ 451 rcd->egrbufs.buffers = 452 kcalloc_node(rcd->egrbufs.count, 453 sizeof(*rcd->egrbufs.buffers), 454 GFP_KERNEL, numa); 455 if (!rcd->egrbufs.buffers) 456 goto bail; 457 rcd->egrbufs.rcvtids = 458 kcalloc_node(rcd->egrbufs.count, 459 sizeof(*rcd->egrbufs.rcvtids), 460 GFP_KERNEL, numa); 461 if (!rcd->egrbufs.rcvtids) 462 goto bail; 463 rcd->egrbufs.size = eager_buffer_size; 464 /* 465 * The size of the buffers programmed into the RcvArray 466 * entries needs to be big enough to handle the highest 467 * MTU supported. 468 */ 469 if (rcd->egrbufs.size < hfi1_max_mtu) { 470 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 471 hfi1_cdbg(PROC, 472 "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 473 rcd->ctxt, rcd->egrbufs.size); 474 } 475 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 476 477 /* Applicable only for statically created kernel contexts */ 478 if (ctxt < dd->first_dyn_alloc_ctxt) { 479 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 480 GFP_KERNEL, numa); 481 if (!rcd->opstats) 482 goto bail; 483 484 /* Initialize TID flow generations for the context */ 485 hfi1_kern_init_ctxt_generations(rcd); 486 } 487 488 *context = rcd; 489 return 0; 490 } 491 492 bail: 493 *context = NULL; 494 hfi1_free_ctxt(rcd); 495 return -ENOMEM; 496 } 497 498 /** 499 * hfi1_free_ctxt 500 * @rcd: pointer to an initialized rcd data structure 501 * 502 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 503 * When a context is done being used (kernel or user), this function is called 504 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 505 * Other users of the context do a get/put sequence to make sure that the 506 * structure isn't removed while in use. 507 */ 508 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 509 { 510 hfi1_rcd_put(rcd); 511 } 512 513 /* 514 * Convert a receive header entry size that to the encoding used in the CSR. 515 * 516 * Return a zero if the given size is invalid. 517 */ 518 static inline u64 encode_rcv_header_entry_size(u16 size) 519 { 520 /* there are only 3 valid receive header entry sizes */ 521 if (size == 2) 522 return 1; 523 if (size == 16) 524 return 2; 525 else if (size == 32) 526 return 4; 527 return 0; /* invalid */ 528 } 529 530 /* 531 * Select the largest ccti value over all SLs to determine the intra- 532 * packet gap for the link. 533 * 534 * called with cca_timer_lock held (to protect access to cca_timer 535 * array), and rcu_read_lock() (to protect access to cc_state). 536 */ 537 void set_link_ipg(struct hfi1_pportdata *ppd) 538 { 539 struct hfi1_devdata *dd = ppd->dd; 540 struct cc_state *cc_state; 541 int i; 542 u16 cce, ccti_limit, max_ccti = 0; 543 u16 shift, mult; 544 u64 src; 545 u32 current_egress_rate; /* Mbits /sec */ 546 u32 max_pkt_time; 547 /* 548 * max_pkt_time is the maximum packet egress time in units 549 * of the fabric clock period 1/(805 MHz). 550 */ 551 552 cc_state = get_cc_state(ppd); 553 554 if (!cc_state) 555 /* 556 * This should _never_ happen - rcu_read_lock() is held, 557 * and set_link_ipg() should not be called if cc_state 558 * is NULL. 559 */ 560 return; 561 562 for (i = 0; i < OPA_MAX_SLS; i++) { 563 u16 ccti = ppd->cca_timer[i].ccti; 564 565 if (ccti > max_ccti) 566 max_ccti = ccti; 567 } 568 569 ccti_limit = cc_state->cct.ccti_limit; 570 if (max_ccti > ccti_limit) 571 max_ccti = ccti_limit; 572 573 cce = cc_state->cct.entries[max_ccti].entry; 574 shift = (cce & 0xc000) >> 14; 575 mult = (cce & 0x3fff); 576 577 current_egress_rate = active_egress_rate(ppd); 578 579 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 580 581 src = (max_pkt_time >> shift) * mult; 582 583 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 584 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 585 586 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 587 } 588 589 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 590 { 591 struct cca_timer *cca_timer; 592 struct hfi1_pportdata *ppd; 593 int sl; 594 u16 ccti_timer, ccti_min; 595 struct cc_state *cc_state; 596 unsigned long flags; 597 enum hrtimer_restart ret = HRTIMER_NORESTART; 598 599 cca_timer = container_of(t, struct cca_timer, hrtimer); 600 ppd = cca_timer->ppd; 601 sl = cca_timer->sl; 602 603 rcu_read_lock(); 604 605 cc_state = get_cc_state(ppd); 606 607 if (!cc_state) { 608 rcu_read_unlock(); 609 return HRTIMER_NORESTART; 610 } 611 612 /* 613 * 1) decrement ccti for SL 614 * 2) calculate IPG for link (set_link_ipg()) 615 * 3) restart timer, unless ccti is at min value 616 */ 617 618 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 619 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 620 621 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 622 623 if (cca_timer->ccti > ccti_min) { 624 cca_timer->ccti--; 625 set_link_ipg(ppd); 626 } 627 628 if (cca_timer->ccti > ccti_min) { 629 unsigned long nsec = 1024 * ccti_timer; 630 /* ccti_timer is in units of 1.024 usec */ 631 hrtimer_forward_now(t, ns_to_ktime(nsec)); 632 ret = HRTIMER_RESTART; 633 } 634 635 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 636 rcu_read_unlock(); 637 return ret; 638 } 639 640 /* 641 * Common code for initializing the physical port structure. 642 */ 643 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 644 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 645 { 646 int i; 647 uint default_pkey_idx; 648 struct cc_state *cc_state; 649 650 ppd->dd = dd; 651 ppd->hw_pidx = hw_pidx; 652 ppd->port = port; /* IB port number, not index */ 653 ppd->prev_link_width = LINK_WIDTH_DEFAULT; 654 /* 655 * There are C_VL_COUNT number of PortVLXmitWait counters. 656 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 657 */ 658 for (i = 0; i < C_VL_COUNT + 1; i++) { 659 ppd->port_vl_xmit_wait_last[i] = 0; 660 ppd->vl_xmit_flit_cnt[i] = 0; 661 } 662 663 default_pkey_idx = 1; 664 665 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 666 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 667 668 if (loopback) { 669 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n", 670 !default_pkey_idx); 671 ppd->pkeys[!default_pkey_idx] = 0x8001; 672 } 673 674 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 675 INIT_WORK(&ppd->link_up_work, handle_link_up); 676 INIT_WORK(&ppd->link_down_work, handle_link_down); 677 INIT_WORK(&ppd->freeze_work, handle_freeze); 678 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 679 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 680 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 681 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 682 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 683 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 684 685 mutex_init(&ppd->hls_lock); 686 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 687 688 ppd->qsfp_info.ppd = ppd; 689 ppd->sm_trap_qp = 0x0; 690 ppd->sa_qp = 0x1; 691 692 ppd->hfi1_wq = NULL; 693 694 spin_lock_init(&ppd->cca_timer_lock); 695 696 for (i = 0; i < OPA_MAX_SLS; i++) { 697 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 698 HRTIMER_MODE_REL); 699 ppd->cca_timer[i].ppd = ppd; 700 ppd->cca_timer[i].sl = i; 701 ppd->cca_timer[i].ccti = 0; 702 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 703 } 704 705 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 706 707 spin_lock_init(&ppd->cc_state_lock); 708 spin_lock_init(&ppd->cc_log_lock); 709 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 710 RCU_INIT_POINTER(ppd->cc_state, cc_state); 711 if (!cc_state) 712 goto bail; 713 return; 714 715 bail: 716 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); 717 } 718 719 /* 720 * Do initialization for device that is only needed on 721 * first detect, not on resets. 722 */ 723 static int loadtime_init(struct hfi1_devdata *dd) 724 { 725 return 0; 726 } 727 728 /** 729 * init_after_reset - re-initialize after a reset 730 * @dd: the hfi1_ib device 731 * 732 * sanity check at least some of the values after reset, and 733 * ensure no receive or transmit (explicitly, in case reset 734 * failed 735 */ 736 static int init_after_reset(struct hfi1_devdata *dd) 737 { 738 int i; 739 struct hfi1_ctxtdata *rcd; 740 /* 741 * Ensure chip does no sends or receives, tail updates, or 742 * pioavail updates while we re-initialize. This is mostly 743 * for the driver data structures, not chip registers. 744 */ 745 for (i = 0; i < dd->num_rcv_contexts; i++) { 746 rcd = hfi1_rcd_get_by_index(dd, i); 747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 748 HFI1_RCVCTRL_INTRAVAIL_DIS | 749 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 750 hfi1_rcd_put(rcd); 751 } 752 pio_send_control(dd, PSC_GLOBAL_DISABLE); 753 for (i = 0; i < dd->num_send_contexts; i++) 754 sc_disable(dd->send_contexts[i].sc); 755 756 return 0; 757 } 758 759 static void enable_chip(struct hfi1_devdata *dd) 760 { 761 struct hfi1_ctxtdata *rcd; 762 u32 rcvmask; 763 u16 i; 764 765 /* enable PIO send */ 766 pio_send_control(dd, PSC_GLOBAL_ENABLE); 767 768 /* 769 * Enable kernel ctxts' receive and receive interrupt. 770 * Other ctxts done as user opens and initializes them. 771 */ 772 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 773 rcd = hfi1_rcd_get_by_index(dd, i); 774 if (!rcd) 775 continue; 776 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 777 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 778 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 779 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 780 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 781 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 782 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 783 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 784 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 785 if (HFI1_CAP_IS_KSET(TID_RDMA)) 786 rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB; 787 hfi1_rcvctrl(dd, rcvmask, rcd); 788 sc_enable(rcd->sc); 789 hfi1_rcd_put(rcd); 790 } 791 } 792 793 /** 794 * create_workqueues - create per port workqueues 795 * @dd: the hfi1_ib device 796 */ 797 static int create_workqueues(struct hfi1_devdata *dd) 798 { 799 int pidx; 800 struct hfi1_pportdata *ppd; 801 802 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 803 ppd = dd->pport + pidx; 804 if (!ppd->hfi1_wq) { 805 ppd->hfi1_wq = 806 alloc_workqueue( 807 "hfi%d_%d", 808 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 809 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 810 dd->unit, pidx); 811 if (!ppd->hfi1_wq) 812 goto wq_error; 813 } 814 if (!ppd->link_wq) { 815 /* 816 * Make the link workqueue single-threaded to enforce 817 * serialization. 818 */ 819 ppd->link_wq = 820 alloc_workqueue( 821 "hfi_link_%d_%d", 822 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 823 1, /* max_active */ 824 dd->unit, pidx); 825 if (!ppd->link_wq) 826 goto wq_error; 827 } 828 } 829 return 0; 830 wq_error: 831 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 832 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 833 ppd = dd->pport + pidx; 834 if (ppd->hfi1_wq) { 835 destroy_workqueue(ppd->hfi1_wq); 836 ppd->hfi1_wq = NULL; 837 } 838 if (ppd->link_wq) { 839 destroy_workqueue(ppd->link_wq); 840 ppd->link_wq = NULL; 841 } 842 } 843 return -ENOMEM; 844 } 845 846 /** 847 * enable_general_intr() - Enable the IRQs that will be handled by the 848 * general interrupt handler. 849 * @dd: valid devdata 850 * 851 */ 852 static void enable_general_intr(struct hfi1_devdata *dd) 853 { 854 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); 855 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); 856 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); 857 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); 858 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); 859 set_intr_bits(dd, IS_DC_START, IS_DC_END, true); 860 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); 861 } 862 863 /** 864 * hfi1_init - do the actual initialization sequence on the chip 865 * @dd: the hfi1_ib device 866 * @reinit: re-initializing, so don't allocate new memory 867 * 868 * Do the actual initialization sequence on the chip. This is done 869 * both from the init routine called from the PCI infrastructure, and 870 * when we reset the chip, or detect that it was reset internally, 871 * or it's administratively re-enabled. 872 * 873 * Memory allocation here and in called routines is only done in 874 * the first case (reinit == 0). We have to be careful, because even 875 * without memory allocation, we need to re-write all the chip registers 876 * TIDs, etc. after the reset or enable has completed. 877 */ 878 int hfi1_init(struct hfi1_devdata *dd, int reinit) 879 { 880 int ret = 0, pidx, lastfail = 0; 881 unsigned long len; 882 u16 i; 883 struct hfi1_ctxtdata *rcd; 884 struct hfi1_pportdata *ppd; 885 886 /* Set up send low level handlers */ 887 dd->process_pio_send = hfi1_verbs_send_pio; 888 dd->process_dma_send = hfi1_verbs_send_dma; 889 dd->pio_inline_send = pio_copy; 890 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 891 892 if (is_ax(dd)) { 893 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 894 dd->do_drop = 1; 895 } else { 896 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 897 dd->do_drop = 0; 898 } 899 900 /* make sure the link is not "up" */ 901 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 902 ppd = dd->pport + pidx; 903 ppd->linkup = 0; 904 } 905 906 if (reinit) 907 ret = init_after_reset(dd); 908 else 909 ret = loadtime_init(dd); 910 if (ret) 911 goto done; 912 913 /* allocate dummy tail memory for all receive contexts */ 914 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 915 sizeof(u64), 916 &dd->rcvhdrtail_dummy_dma, 917 GFP_KERNEL); 918 919 if (!dd->rcvhdrtail_dummy_kvaddr) { 920 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 921 ret = -ENOMEM; 922 goto done; 923 } 924 925 /* dd->rcd can be NULL if early initialization failed */ 926 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 927 /* 928 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 929 * re-init, the simplest way to handle this is to free 930 * existing, and re-allocate. 931 * Need to re-create rest of ctxt 0 ctxtdata as well. 932 */ 933 rcd = hfi1_rcd_get_by_index(dd, i); 934 if (!rcd) 935 continue; 936 937 rcd->do_interrupt = &handle_receive_interrupt; 938 939 lastfail = hfi1_create_rcvhdrq(dd, rcd); 940 if (!lastfail) 941 lastfail = hfi1_setup_eagerbufs(rcd); 942 if (!lastfail) 943 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit); 944 if (lastfail) { 945 dd_dev_err(dd, 946 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 947 ret = lastfail; 948 } 949 /* enable IRQ */ 950 hfi1_rcd_put(rcd); 951 } 952 953 /* Allocate enough memory for user event notification. */ 954 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * 955 sizeof(*dd->events)); 956 dd->events = vmalloc_user(len); 957 if (!dd->events) 958 dd_dev_err(dd, "Failed to allocate user events page\n"); 959 /* 960 * Allocate a page for device and port status. 961 * Page will be shared amongst all user processes. 962 */ 963 dd->status = vmalloc_user(PAGE_SIZE); 964 if (!dd->status) 965 dd_dev_err(dd, "Failed to allocate dev status page\n"); 966 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 967 ppd = dd->pport + pidx; 968 if (dd->status) 969 /* Currently, we only have one port */ 970 ppd->statusp = &dd->status->port; 971 972 set_mtu(ppd); 973 } 974 975 /* enable chip even if we have an error, so we can debug cause */ 976 enable_chip(dd); 977 978 done: 979 /* 980 * Set status even if port serdes is not initialized 981 * so that diags will work. 982 */ 983 if (dd->status) 984 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 985 HFI1_STATUS_INITTED; 986 if (!ret) { 987 /* enable all interrupts from the chip */ 988 enable_general_intr(dd); 989 init_qsfp_int(dd); 990 991 /* chip is OK for user apps; mark it as initialized */ 992 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 993 ppd = dd->pport + pidx; 994 995 /* 996 * start the serdes - must be after interrupts are 997 * enabled so we are notified when the link goes up 998 */ 999 lastfail = bringup_serdes(ppd); 1000 if (lastfail) 1001 dd_dev_info(dd, 1002 "Failed to bring up port %u\n", 1003 ppd->port); 1004 1005 /* 1006 * Set status even if port serdes is not initialized 1007 * so that diags will work. 1008 */ 1009 if (ppd->statusp) 1010 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 1011 HFI1_STATUS_INITTED; 1012 if (!ppd->link_speed_enabled) 1013 continue; 1014 } 1015 } 1016 1017 /* if ret is non-zero, we probably should do some cleanup here... */ 1018 return ret; 1019 } 1020 1021 static inline struct hfi1_devdata *__hfi1_lookup(int unit) 1022 { 1023 return idr_find(&hfi1_unit_table, unit); 1024 } 1025 1026 struct hfi1_devdata *hfi1_lookup(int unit) 1027 { 1028 struct hfi1_devdata *dd; 1029 unsigned long flags; 1030 1031 spin_lock_irqsave(&hfi1_devs_lock, flags); 1032 dd = __hfi1_lookup(unit); 1033 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1034 1035 return dd; 1036 } 1037 1038 /* 1039 * Stop the timers during unit shutdown, or after an error late 1040 * in initialization. 1041 */ 1042 static void stop_timers(struct hfi1_devdata *dd) 1043 { 1044 struct hfi1_pportdata *ppd; 1045 int pidx; 1046 1047 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1048 ppd = dd->pport + pidx; 1049 if (ppd->led_override_timer.function) { 1050 del_timer_sync(&ppd->led_override_timer); 1051 atomic_set(&ppd->led_override_timer_active, 0); 1052 } 1053 } 1054 } 1055 1056 /** 1057 * shutdown_device - shut down a device 1058 * @dd: the hfi1_ib device 1059 * 1060 * This is called to make the device quiet when we are about to 1061 * unload the driver, and also when the device is administratively 1062 * disabled. It does not free any data structures. 1063 * Everything it does has to be setup again by hfi1_init(dd, 1) 1064 */ 1065 static void shutdown_device(struct hfi1_devdata *dd) 1066 { 1067 struct hfi1_pportdata *ppd; 1068 struct hfi1_ctxtdata *rcd; 1069 unsigned pidx; 1070 int i; 1071 1072 if (dd->flags & HFI1_SHUTDOWN) 1073 return; 1074 dd->flags |= HFI1_SHUTDOWN; 1075 1076 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1077 ppd = dd->pport + pidx; 1078 1079 ppd->linkup = 0; 1080 if (ppd->statusp) 1081 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1082 HFI1_STATUS_IB_READY); 1083 } 1084 dd->flags &= ~HFI1_INITTED; 1085 1086 /* mask and clean up interrupts */ 1087 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 1088 msix_clean_up_interrupts(dd); 1089 1090 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1091 ppd = dd->pport + pidx; 1092 for (i = 0; i < dd->num_rcv_contexts; i++) { 1093 rcd = hfi1_rcd_get_by_index(dd, i); 1094 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1095 HFI1_RCVCTRL_CTXT_DIS | 1096 HFI1_RCVCTRL_INTRAVAIL_DIS | 1097 HFI1_RCVCTRL_PKEY_DIS | 1098 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1099 hfi1_rcd_put(rcd); 1100 } 1101 /* 1102 * Gracefully stop all sends allowing any in progress to 1103 * trickle out first. 1104 */ 1105 for (i = 0; i < dd->num_send_contexts; i++) 1106 sc_flush(dd->send_contexts[i].sc); 1107 } 1108 1109 /* 1110 * Enough for anything that's going to trickle out to have actually 1111 * done so. 1112 */ 1113 udelay(20); 1114 1115 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1116 ppd = dd->pport + pidx; 1117 1118 /* disable all contexts */ 1119 for (i = 0; i < dd->num_send_contexts; i++) 1120 sc_disable(dd->send_contexts[i].sc); 1121 /* disable the send device */ 1122 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1123 1124 shutdown_led_override(ppd); 1125 1126 /* 1127 * Clear SerdesEnable. 1128 * We can't count on interrupts since we are stopping. 1129 */ 1130 hfi1_quiet_serdes(ppd); 1131 1132 if (ppd->hfi1_wq) { 1133 destroy_workqueue(ppd->hfi1_wq); 1134 ppd->hfi1_wq = NULL; 1135 } 1136 if (ppd->link_wq) { 1137 destroy_workqueue(ppd->link_wq); 1138 ppd->link_wq = NULL; 1139 } 1140 } 1141 sdma_exit(dd); 1142 } 1143 1144 /** 1145 * hfi1_free_ctxtdata - free a context's allocated data 1146 * @dd: the hfi1_ib device 1147 * @rcd: the ctxtdata structure 1148 * 1149 * free up any allocated data for a context 1150 * It should never change any chip state, or global driver state. 1151 */ 1152 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1153 { 1154 u32 e; 1155 1156 if (!rcd) 1157 return; 1158 1159 if (rcd->rcvhdrq) { 1160 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), 1161 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1162 rcd->rcvhdrq = NULL; 1163 if (rcd->rcvhdrtail_kvaddr) { 1164 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1165 (void *)rcd->rcvhdrtail_kvaddr, 1166 rcd->rcvhdrqtailaddr_dma); 1167 rcd->rcvhdrtail_kvaddr = NULL; 1168 } 1169 } 1170 1171 /* all the RcvArray entries should have been cleared by now */ 1172 kfree(rcd->egrbufs.rcvtids); 1173 rcd->egrbufs.rcvtids = NULL; 1174 1175 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1176 if (rcd->egrbufs.buffers[e].dma) 1177 dma_free_coherent(&dd->pcidev->dev, 1178 rcd->egrbufs.buffers[e].len, 1179 rcd->egrbufs.buffers[e].addr, 1180 rcd->egrbufs.buffers[e].dma); 1181 } 1182 kfree(rcd->egrbufs.buffers); 1183 rcd->egrbufs.alloced = 0; 1184 rcd->egrbufs.buffers = NULL; 1185 1186 sc_free(rcd->sc); 1187 rcd->sc = NULL; 1188 1189 vfree(rcd->subctxt_uregbase); 1190 vfree(rcd->subctxt_rcvegrbuf); 1191 vfree(rcd->subctxt_rcvhdr_base); 1192 kfree(rcd->opstats); 1193 1194 rcd->subctxt_uregbase = NULL; 1195 rcd->subctxt_rcvegrbuf = NULL; 1196 rcd->subctxt_rcvhdr_base = NULL; 1197 rcd->opstats = NULL; 1198 } 1199 1200 /* 1201 * Release our hold on the shared asic data. If we are the last one, 1202 * return the structure to be finalized outside the lock. Must be 1203 * holding hfi1_devs_lock. 1204 */ 1205 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1206 { 1207 struct hfi1_asic_data *ad; 1208 int other; 1209 1210 if (!dd->asic_data) 1211 return NULL; 1212 dd->asic_data->dds[dd->hfi1_id] = NULL; 1213 other = dd->hfi1_id ? 0 : 1; 1214 ad = dd->asic_data; 1215 dd->asic_data = NULL; 1216 /* return NULL if the other dd still has a link */ 1217 return ad->dds[other] ? NULL : ad; 1218 } 1219 1220 static void finalize_asic_data(struct hfi1_devdata *dd, 1221 struct hfi1_asic_data *ad) 1222 { 1223 clean_up_i2c(dd, ad); 1224 kfree(ad); 1225 } 1226 1227 /** 1228 * hfi1_clean_devdata - cleans up per-unit data structure 1229 * @dd: pointer to a valid devdata structure 1230 * 1231 * It cleans up all data structures set up by 1232 * by hfi1_alloc_devdata(). 1233 */ 1234 static void hfi1_clean_devdata(struct hfi1_devdata *dd) 1235 { 1236 struct hfi1_asic_data *ad; 1237 unsigned long flags; 1238 1239 spin_lock_irqsave(&hfi1_devs_lock, flags); 1240 if (!list_empty(&dd->list)) { 1241 idr_remove(&hfi1_unit_table, dd->unit); 1242 list_del_init(&dd->list); 1243 } 1244 ad = release_asic_data(dd); 1245 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1246 1247 finalize_asic_data(dd, ad); 1248 free_platform_config(dd); 1249 rcu_barrier(); /* wait for rcu callbacks to complete */ 1250 free_percpu(dd->int_counter); 1251 free_percpu(dd->rcv_limit); 1252 free_percpu(dd->send_schedule); 1253 free_percpu(dd->tx_opstats); 1254 dd->int_counter = NULL; 1255 dd->rcv_limit = NULL; 1256 dd->send_schedule = NULL; 1257 dd->tx_opstats = NULL; 1258 kfree(dd->comp_vect); 1259 dd->comp_vect = NULL; 1260 sdma_clean(dd, dd->num_sdma); 1261 rvt_dealloc_device(&dd->verbs_dev.rdi); 1262 } 1263 1264 static void __hfi1_free_devdata(struct kobject *kobj) 1265 { 1266 struct hfi1_devdata *dd = 1267 container_of(kobj, struct hfi1_devdata, kobj); 1268 1269 hfi1_clean_devdata(dd); 1270 } 1271 1272 static struct kobj_type hfi1_devdata_type = { 1273 .release = __hfi1_free_devdata, 1274 }; 1275 1276 void hfi1_free_devdata(struct hfi1_devdata *dd) 1277 { 1278 kobject_put(&dd->kobj); 1279 } 1280 1281 /** 1282 * hfi1_alloc_devdata - Allocate our primary per-unit data structure. 1283 * @pdev: Valid PCI device 1284 * @extra: How many bytes to alloc past the default 1285 * 1286 * Must be done via verbs allocator, because the verbs cleanup process 1287 * both does cleanup and free of the data structure. 1288 * "extra" is for chip-specific data. 1289 * 1290 * Use the idr mechanism to get a unit number for this unit. 1291 */ 1292 static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, 1293 size_t extra) 1294 { 1295 unsigned long flags; 1296 struct hfi1_devdata *dd; 1297 int ret, nports; 1298 1299 /* extra is * number of ports */ 1300 nports = extra / sizeof(struct hfi1_pportdata); 1301 1302 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1303 nports); 1304 if (!dd) 1305 return ERR_PTR(-ENOMEM); 1306 dd->num_pports = nports; 1307 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1308 dd->pcidev = pdev; 1309 pci_set_drvdata(pdev, dd); 1310 1311 INIT_LIST_HEAD(&dd->list); 1312 idr_preload(GFP_KERNEL); 1313 spin_lock_irqsave(&hfi1_devs_lock, flags); 1314 1315 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1316 if (ret >= 0) { 1317 dd->unit = ret; 1318 list_add(&dd->list, &hfi1_dev_list); 1319 } 1320 dd->node = NUMA_NO_NODE; 1321 1322 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1323 idr_preload_end(); 1324 1325 if (ret < 0) { 1326 dev_err(&pdev->dev, 1327 "Could not allocate unit ID: error %d\n", -ret); 1328 goto bail; 1329 } 1330 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 1331 1332 /* 1333 * Initialize all locks for the device. This needs to be as early as 1334 * possible so locks are usable. 1335 */ 1336 spin_lock_init(&dd->sc_lock); 1337 spin_lock_init(&dd->sendctrl_lock); 1338 spin_lock_init(&dd->rcvctrl_lock); 1339 spin_lock_init(&dd->uctxt_lock); 1340 spin_lock_init(&dd->hfi1_diag_trans_lock); 1341 spin_lock_init(&dd->sc_init_lock); 1342 spin_lock_init(&dd->dc8051_memlock); 1343 seqlock_init(&dd->sc2vl_lock); 1344 spin_lock_init(&dd->sde_map_lock); 1345 spin_lock_init(&dd->pio_map_lock); 1346 mutex_init(&dd->dc8051_lock); 1347 init_waitqueue_head(&dd->event_queue); 1348 spin_lock_init(&dd->irq_src_lock); 1349 1350 dd->int_counter = alloc_percpu(u64); 1351 if (!dd->int_counter) { 1352 ret = -ENOMEM; 1353 goto bail; 1354 } 1355 1356 dd->rcv_limit = alloc_percpu(u64); 1357 if (!dd->rcv_limit) { 1358 ret = -ENOMEM; 1359 goto bail; 1360 } 1361 1362 dd->send_schedule = alloc_percpu(u64); 1363 if (!dd->send_schedule) { 1364 ret = -ENOMEM; 1365 goto bail; 1366 } 1367 1368 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 1369 if (!dd->tx_opstats) { 1370 ret = -ENOMEM; 1371 goto bail; 1372 } 1373 1374 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); 1375 if (!dd->comp_vect) { 1376 ret = -ENOMEM; 1377 goto bail; 1378 } 1379 1380 kobject_init(&dd->kobj, &hfi1_devdata_type); 1381 return dd; 1382 1383 bail: 1384 hfi1_clean_devdata(dd); 1385 return ERR_PTR(ret); 1386 } 1387 1388 /* 1389 * Called from freeze mode handlers, and from PCI error 1390 * reporting code. Should be paranoid about state of 1391 * system and data structures. 1392 */ 1393 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1394 { 1395 if (dd->flags & HFI1_INITTED) { 1396 u32 pidx; 1397 1398 dd->flags &= ~HFI1_INITTED; 1399 if (dd->pport) 1400 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1401 struct hfi1_pportdata *ppd; 1402 1403 ppd = dd->pport + pidx; 1404 if (dd->flags & HFI1_PRESENT) 1405 set_link_state(ppd, HLS_DN_DISABLE); 1406 1407 if (ppd->statusp) 1408 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1409 } 1410 } 1411 1412 /* 1413 * Mark as having had an error for driver, and also 1414 * for /sys and status word mapped to user programs. 1415 * This marks unit as not usable, until reset. 1416 */ 1417 if (dd->status) 1418 dd->status->dev |= HFI1_STATUS_HWERROR; 1419 } 1420 1421 static void remove_one(struct pci_dev *); 1422 static int init_one(struct pci_dev *, const struct pci_device_id *); 1423 static void shutdown_one(struct pci_dev *); 1424 1425 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1426 #define PFX DRIVER_NAME ": " 1427 1428 const struct pci_device_id hfi1_pci_tbl[] = { 1429 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1430 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1431 { 0, } 1432 }; 1433 1434 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1435 1436 static struct pci_driver hfi1_pci_driver = { 1437 .name = DRIVER_NAME, 1438 .probe = init_one, 1439 .remove = remove_one, 1440 .shutdown = shutdown_one, 1441 .id_table = hfi1_pci_tbl, 1442 .err_handler = &hfi1_pci_err_handler, 1443 }; 1444 1445 static void __init compute_krcvqs(void) 1446 { 1447 int i; 1448 1449 for (i = 0; i < krcvqsset; i++) 1450 n_krcvqs += krcvqs[i]; 1451 } 1452 1453 /* 1454 * Do all the generic driver unit- and chip-independent memory 1455 * allocation and initialization. 1456 */ 1457 static int __init hfi1_mod_init(void) 1458 { 1459 int ret; 1460 1461 ret = dev_init(); 1462 if (ret) 1463 goto bail; 1464 1465 ret = node_affinity_init(); 1466 if (ret) 1467 goto bail; 1468 1469 /* validate max MTU before any devices start */ 1470 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1471 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1472 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1473 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1474 } 1475 /* valid CUs run from 1-128 in powers of 2 */ 1476 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1477 hfi1_cu = 1; 1478 /* valid credit return threshold is 0-100, variable is unsigned */ 1479 if (user_credit_return_threshold > 100) 1480 user_credit_return_threshold = 100; 1481 1482 compute_krcvqs(); 1483 /* 1484 * sanitize receive interrupt count, time must wait until after 1485 * the hardware type is known 1486 */ 1487 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1488 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1489 /* reject invalid combinations */ 1490 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1491 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1492 rcv_intr_count = 1; 1493 } 1494 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1495 /* 1496 * Avoid indefinite packet delivery by requiring a timeout 1497 * if count is > 1. 1498 */ 1499 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1500 rcv_intr_timeout = 1; 1501 } 1502 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1503 /* 1504 * The dynamic algorithm expects a non-zero timeout 1505 * and a count > 1. 1506 */ 1507 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1508 rcv_intr_dynamic = 0; 1509 } 1510 1511 /* sanitize link CRC options */ 1512 link_crc_mask &= SUPPORTED_CRCS; 1513 1514 ret = opfn_init(); 1515 if (ret < 0) { 1516 pr_err("Failed to allocate opfn_wq"); 1517 goto bail_dev; 1518 } 1519 1520 hfi1_compute_tid_rdma_flow_wt(); 1521 /* 1522 * These must be called before the driver is registered with 1523 * the PCI subsystem. 1524 */ 1525 idr_init(&hfi1_unit_table); 1526 1527 hfi1_dbg_init(); 1528 ret = pci_register_driver(&hfi1_pci_driver); 1529 if (ret < 0) { 1530 pr_err("Unable to register driver: error %d\n", -ret); 1531 goto bail_dev; 1532 } 1533 goto bail; /* all OK */ 1534 1535 bail_dev: 1536 hfi1_dbg_exit(); 1537 idr_destroy(&hfi1_unit_table); 1538 dev_cleanup(); 1539 bail: 1540 return ret; 1541 } 1542 1543 module_init(hfi1_mod_init); 1544 1545 /* 1546 * Do the non-unit driver cleanup, memory free, etc. at unload. 1547 */ 1548 static void __exit hfi1_mod_cleanup(void) 1549 { 1550 pci_unregister_driver(&hfi1_pci_driver); 1551 opfn_exit(); 1552 node_affinity_destroy_all(); 1553 hfi1_dbg_exit(); 1554 1555 idr_destroy(&hfi1_unit_table); 1556 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1557 dev_cleanup(); 1558 } 1559 1560 module_exit(hfi1_mod_cleanup); 1561 1562 /* this can only be called after a successful initialization */ 1563 static void cleanup_device_data(struct hfi1_devdata *dd) 1564 { 1565 int ctxt; 1566 int pidx; 1567 1568 /* users can't do anything more with chip */ 1569 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1570 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1571 struct cc_state *cc_state; 1572 int i; 1573 1574 if (ppd->statusp) 1575 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1576 1577 for (i = 0; i < OPA_MAX_SLS; i++) 1578 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1579 1580 spin_lock(&ppd->cc_state_lock); 1581 cc_state = get_cc_state_protected(ppd); 1582 RCU_INIT_POINTER(ppd->cc_state, NULL); 1583 spin_unlock(&ppd->cc_state_lock); 1584 1585 if (cc_state) 1586 kfree_rcu(cc_state, rcu); 1587 } 1588 1589 free_credit_return(dd); 1590 1591 if (dd->rcvhdrtail_dummy_kvaddr) { 1592 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1593 (void *)dd->rcvhdrtail_dummy_kvaddr, 1594 dd->rcvhdrtail_dummy_dma); 1595 dd->rcvhdrtail_dummy_kvaddr = NULL; 1596 } 1597 1598 /* 1599 * Free any resources still in use (usually just kernel contexts) 1600 * at unload; we do for ctxtcnt, because that's what we allocate. 1601 */ 1602 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1603 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1604 1605 if (rcd) { 1606 hfi1_free_ctxt_rcv_groups(rcd); 1607 hfi1_free_ctxt(rcd); 1608 } 1609 } 1610 1611 kfree(dd->rcd); 1612 dd->rcd = NULL; 1613 1614 free_pio_map(dd); 1615 /* must follow rcv context free - need to remove rcv's hooks */ 1616 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1617 sc_free(dd->send_contexts[ctxt].sc); 1618 dd->num_send_contexts = 0; 1619 kfree(dd->send_contexts); 1620 dd->send_contexts = NULL; 1621 kfree(dd->hw_to_sw); 1622 dd->hw_to_sw = NULL; 1623 kfree(dd->boardname); 1624 vfree(dd->events); 1625 vfree(dd->status); 1626 } 1627 1628 /* 1629 * Clean up on unit shutdown, or error during unit load after 1630 * successful initialization. 1631 */ 1632 static void postinit_cleanup(struct hfi1_devdata *dd) 1633 { 1634 hfi1_start_cleanup(dd); 1635 hfi1_comp_vectors_clean_up(dd); 1636 hfi1_dev_affinity_clean_up(dd); 1637 1638 hfi1_pcie_ddcleanup(dd); 1639 hfi1_pcie_cleanup(dd->pcidev); 1640 1641 cleanup_device_data(dd); 1642 1643 hfi1_free_devdata(dd); 1644 } 1645 1646 static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) 1647 { 1648 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1649 dd_dev_err(dd, "Receive header queue count too small\n"); 1650 return -EINVAL; 1651 } 1652 1653 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1654 dd_dev_err(dd, 1655 "Receive header queue count cannot be greater than %u\n", 1656 HFI1_MAX_HDRQ_EGRBUF_CNT); 1657 return -EINVAL; 1658 } 1659 1660 if (thecnt % HDRQ_INCREMENT) { 1661 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", 1662 thecnt, HDRQ_INCREMENT); 1663 return -EINVAL; 1664 } 1665 1666 return 0; 1667 } 1668 1669 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1670 { 1671 int ret = 0, j, pidx, initfail; 1672 struct hfi1_devdata *dd; 1673 struct hfi1_pportdata *ppd; 1674 1675 /* First, lock the non-writable module parameters */ 1676 HFI1_CAP_LOCK(); 1677 1678 /* Validate dev ids */ 1679 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1680 ent->device == PCI_DEVICE_ID_INTEL1)) { 1681 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", 1682 ent->device); 1683 ret = -ENODEV; 1684 goto bail; 1685 } 1686 1687 /* Allocate the dd so we can get to work */ 1688 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * 1689 sizeof(struct hfi1_pportdata)); 1690 if (IS_ERR(dd)) { 1691 ret = PTR_ERR(dd); 1692 goto bail; 1693 } 1694 1695 /* Validate some global module parameters */ 1696 ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt); 1697 if (ret) 1698 goto bail; 1699 1700 /* use the encoding function as a sanitization check */ 1701 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1702 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", 1703 hfi1_hdrq_entsize); 1704 ret = -EINVAL; 1705 goto bail; 1706 } 1707 1708 /* The receive eager buffer size must be set before the receive 1709 * contexts are created. 1710 * 1711 * Set the eager buffer size. Validate that it falls in a range 1712 * allowed by the hardware - all powers of 2 between the min and 1713 * max. The maximum valid MTU is within the eager buffer range 1714 * so we do not need to cap the max_mtu by an eager buffer size 1715 * setting. 1716 */ 1717 if (eager_buffer_size) { 1718 if (!is_power_of_2(eager_buffer_size)) 1719 eager_buffer_size = 1720 roundup_pow_of_two(eager_buffer_size); 1721 eager_buffer_size = 1722 clamp_val(eager_buffer_size, 1723 MIN_EAGER_BUFFER * 8, 1724 MAX_EAGER_BUFFER_TOTAL); 1725 dd_dev_info(dd, "Eager buffer size %u\n", 1726 eager_buffer_size); 1727 } else { 1728 dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); 1729 ret = -EINVAL; 1730 goto bail; 1731 } 1732 1733 /* restrict value of hfi1_rcvarr_split */ 1734 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1735 1736 ret = hfi1_pcie_init(dd); 1737 if (ret) 1738 goto bail; 1739 1740 /* 1741 * Do device-specific initialization, function table setup, dd 1742 * allocation, etc. 1743 */ 1744 ret = hfi1_init_dd(dd); 1745 if (ret) 1746 goto clean_bail; /* error already printed */ 1747 1748 ret = create_workqueues(dd); 1749 if (ret) 1750 goto clean_bail; 1751 1752 /* do the generic initialization */ 1753 initfail = hfi1_init(dd, 0); 1754 1755 /* setup vnic */ 1756 hfi1_vnic_setup(dd); 1757 1758 ret = hfi1_register_ib_device(dd); 1759 1760 /* 1761 * Now ready for use. this should be cleared whenever we 1762 * detect a reset, or initiate one. If earlier failure, 1763 * we still create devices, so diags, etc. can be used 1764 * to determine cause of problem. 1765 */ 1766 if (!initfail && !ret) { 1767 dd->flags |= HFI1_INITTED; 1768 /* create debufs files after init and ib register */ 1769 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1770 } 1771 1772 j = hfi1_device_create(dd); 1773 if (j) 1774 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1775 1776 if (initfail || ret) { 1777 msix_clean_up_interrupts(dd); 1778 stop_timers(dd); 1779 flush_workqueue(ib_wq); 1780 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1781 hfi1_quiet_serdes(dd->pport + pidx); 1782 ppd = dd->pport + pidx; 1783 if (ppd->hfi1_wq) { 1784 destroy_workqueue(ppd->hfi1_wq); 1785 ppd->hfi1_wq = NULL; 1786 } 1787 if (ppd->link_wq) { 1788 destroy_workqueue(ppd->link_wq); 1789 ppd->link_wq = NULL; 1790 } 1791 } 1792 if (!j) 1793 hfi1_device_remove(dd); 1794 if (!ret) 1795 hfi1_unregister_ib_device(dd); 1796 hfi1_vnic_cleanup(dd); 1797 postinit_cleanup(dd); 1798 if (initfail) 1799 ret = initfail; 1800 goto bail; /* everything already cleaned */ 1801 } 1802 1803 sdma_start(dd); 1804 1805 return 0; 1806 1807 clean_bail: 1808 hfi1_pcie_cleanup(pdev); 1809 bail: 1810 return ret; 1811 } 1812 1813 static void wait_for_clients(struct hfi1_devdata *dd) 1814 { 1815 /* 1816 * Remove the device init value and complete the device if there is 1817 * no clients or wait for active clients to finish. 1818 */ 1819 if (atomic_dec_and_test(&dd->user_refcount)) 1820 complete(&dd->user_comp); 1821 1822 wait_for_completion(&dd->user_comp); 1823 } 1824 1825 static void remove_one(struct pci_dev *pdev) 1826 { 1827 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1828 1829 /* close debugfs files before ib unregister */ 1830 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1831 1832 /* remove the /dev hfi1 interface */ 1833 hfi1_device_remove(dd); 1834 1835 /* wait for existing user space clients to finish */ 1836 wait_for_clients(dd); 1837 1838 /* unregister from IB core */ 1839 hfi1_unregister_ib_device(dd); 1840 1841 /* cleanup vnic */ 1842 hfi1_vnic_cleanup(dd); 1843 1844 /* 1845 * Disable the IB link, disable interrupts on the device, 1846 * clear dma engines, etc. 1847 */ 1848 shutdown_device(dd); 1849 1850 stop_timers(dd); 1851 1852 /* wait until all of our (qsfp) queue_work() calls complete */ 1853 flush_workqueue(ib_wq); 1854 1855 postinit_cleanup(dd); 1856 } 1857 1858 static void shutdown_one(struct pci_dev *pdev) 1859 { 1860 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1861 1862 shutdown_device(dd); 1863 } 1864 1865 /** 1866 * hfi1_create_rcvhdrq - create a receive header queue 1867 * @dd: the hfi1_ib device 1868 * @rcd: the context data 1869 * 1870 * This must be contiguous memory (from an i/o perspective), and must be 1871 * DMA'able (which means for some systems, it will go through an IOMMU, 1872 * or be forced into a low address range). 1873 */ 1874 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1875 { 1876 unsigned amt; 1877 u64 reg; 1878 1879 if (!rcd->rcvhdrq) { 1880 gfp_t gfp_flags; 1881 1882 amt = rcvhdrq_size(rcd); 1883 1884 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 1885 gfp_flags = GFP_KERNEL; 1886 else 1887 gfp_flags = GFP_USER; 1888 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, 1889 &rcd->rcvhdrq_dma, 1890 gfp_flags | __GFP_COMP); 1891 1892 if (!rcd->rcvhdrq) { 1893 dd_dev_err(dd, 1894 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1895 amt, rcd->ctxt); 1896 goto bail; 1897 } 1898 1899 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1900 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1901 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 1902 PAGE_SIZE, 1903 &rcd->rcvhdrqtailaddr_dma, 1904 gfp_flags); 1905 if (!rcd->rcvhdrtail_kvaddr) 1906 goto bail_free; 1907 } 1908 } 1909 /* 1910 * These values are per-context: 1911 * RcvHdrCnt 1912 * RcvHdrEntSize 1913 * RcvHdrSize 1914 */ 1915 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1916 & RCV_HDR_CNT_CNT_MASK) 1917 << RCV_HDR_CNT_CNT_SHIFT; 1918 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1919 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1920 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1921 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1922 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1923 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) 1924 << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1925 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1926 1927 /* 1928 * Program dummy tail address for every receive context 1929 * before enabling any receive context 1930 */ 1931 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1932 dd->rcvhdrtail_dummy_dma); 1933 1934 return 0; 1935 1936 bail_free: 1937 dd_dev_err(dd, 1938 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1939 rcd->ctxt); 1940 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1941 rcd->rcvhdrq_dma); 1942 rcd->rcvhdrq = NULL; 1943 bail: 1944 return -ENOMEM; 1945 } 1946 1947 /** 1948 * allocate eager buffers, both kernel and user contexts. 1949 * @rcd: the context we are setting up. 1950 * 1951 * Allocate the eager TID buffers and program them into hip. 1952 * They are no longer completely contiguous, we do multiple allocation 1953 * calls. Otherwise we get the OOM code involved, by asking for too 1954 * much per call, with disastrous results on some kernels. 1955 */ 1956 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1957 { 1958 struct hfi1_devdata *dd = rcd->dd; 1959 u32 max_entries, egrtop, alloced_bytes = 0; 1960 gfp_t gfp_flags; 1961 u16 order, idx = 0; 1962 int ret = 0; 1963 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1964 1965 /* 1966 * GFP_USER, but without GFP_FS, so buffer cache can be 1967 * coalesced (we hope); otherwise, even at order 4, 1968 * heavy filesystem activity makes these fail, and we can 1969 * use compound pages. 1970 */ 1971 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1972 1973 /* 1974 * The minimum size of the eager buffers is a groups of MTU-sized 1975 * buffers. 1976 * The global eager_buffer_size parameter is checked against the 1977 * theoretical lower limit of the value. Here, we check against the 1978 * MTU. 1979 */ 1980 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1981 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1982 /* 1983 * If using one-pkt-per-egr-buffer, lower the eager buffer 1984 * size to the max MTU (page-aligned). 1985 */ 1986 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1987 rcd->egrbufs.rcvtid_size = round_mtu; 1988 1989 /* 1990 * Eager buffers sizes of 1MB or less require smaller TID sizes 1991 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1992 */ 1993 if (rcd->egrbufs.size <= (1 << 20)) 1994 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1995 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1996 1997 while (alloced_bytes < rcd->egrbufs.size && 1998 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1999 rcd->egrbufs.buffers[idx].addr = 2000 dma_alloc_coherent(&dd->pcidev->dev, 2001 rcd->egrbufs.rcvtid_size, 2002 &rcd->egrbufs.buffers[idx].dma, 2003 gfp_flags); 2004 if (rcd->egrbufs.buffers[idx].addr) { 2005 rcd->egrbufs.buffers[idx].len = 2006 rcd->egrbufs.rcvtid_size; 2007 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 2008 rcd->egrbufs.buffers[idx].addr; 2009 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 2010 rcd->egrbufs.buffers[idx].dma; 2011 rcd->egrbufs.alloced++; 2012 alloced_bytes += rcd->egrbufs.rcvtid_size; 2013 idx++; 2014 } else { 2015 u32 new_size, i, j; 2016 u64 offset = 0; 2017 2018 /* 2019 * Fail the eager buffer allocation if: 2020 * - we are already using the lowest acceptable size 2021 * - we are using one-pkt-per-egr-buffer (this implies 2022 * that we are accepting only one size) 2023 */ 2024 if (rcd->egrbufs.rcvtid_size == round_mtu || 2025 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 2026 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 2027 rcd->ctxt); 2028 ret = -ENOMEM; 2029 goto bail_rcvegrbuf_phys; 2030 } 2031 2032 new_size = rcd->egrbufs.rcvtid_size / 2; 2033 2034 /* 2035 * If the first attempt to allocate memory failed, don't 2036 * fail everything but continue with the next lower 2037 * size. 2038 */ 2039 if (idx == 0) { 2040 rcd->egrbufs.rcvtid_size = new_size; 2041 continue; 2042 } 2043 2044 /* 2045 * Re-partition already allocated buffers to a smaller 2046 * size. 2047 */ 2048 rcd->egrbufs.alloced = 0; 2049 for (i = 0, j = 0, offset = 0; j < idx; i++) { 2050 if (i >= rcd->egrbufs.count) 2051 break; 2052 rcd->egrbufs.rcvtids[i].dma = 2053 rcd->egrbufs.buffers[j].dma + offset; 2054 rcd->egrbufs.rcvtids[i].addr = 2055 rcd->egrbufs.buffers[j].addr + offset; 2056 rcd->egrbufs.alloced++; 2057 if ((rcd->egrbufs.buffers[j].dma + offset + 2058 new_size) == 2059 (rcd->egrbufs.buffers[j].dma + 2060 rcd->egrbufs.buffers[j].len)) { 2061 j++; 2062 offset = 0; 2063 } else { 2064 offset += new_size; 2065 } 2066 } 2067 rcd->egrbufs.rcvtid_size = new_size; 2068 } 2069 } 2070 rcd->egrbufs.numbufs = idx; 2071 rcd->egrbufs.size = alloced_bytes; 2072 2073 hfi1_cdbg(PROC, 2074 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 2075 rcd->ctxt, rcd->egrbufs.alloced, 2076 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2077 2078 /* 2079 * Set the contexts rcv array head update threshold to the closest 2080 * power of 2 (so we can use a mask instead of modulo) below half 2081 * the allocated entries. 2082 */ 2083 rcd->egrbufs.threshold = 2084 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2085 /* 2086 * Compute the expected RcvArray entry base. This is done after 2087 * allocating the eager buffers in order to maximize the 2088 * expected RcvArray entries for the context. 2089 */ 2090 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2091 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2092 rcd->expected_count = max_entries - egrtop; 2093 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2094 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2095 2096 rcd->expected_base = rcd->eager_base + egrtop; 2097 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2098 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2099 rcd->eager_base, rcd->expected_base); 2100 2101 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2102 hfi1_cdbg(PROC, 2103 "ctxt%u: current Eager buffer size is invalid %u\n", 2104 rcd->ctxt, rcd->egrbufs.rcvtid_size); 2105 ret = -EINVAL; 2106 goto bail_rcvegrbuf_phys; 2107 } 2108 2109 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2110 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 2111 rcd->egrbufs.rcvtids[idx].dma, order); 2112 cond_resched(); 2113 } 2114 2115 return 0; 2116 2117 bail_rcvegrbuf_phys: 2118 for (idx = 0; idx < rcd->egrbufs.alloced && 2119 rcd->egrbufs.buffers[idx].addr; 2120 idx++) { 2121 dma_free_coherent(&dd->pcidev->dev, 2122 rcd->egrbufs.buffers[idx].len, 2123 rcd->egrbufs.buffers[idx].addr, 2124 rcd->egrbufs.buffers[idx].dma); 2125 rcd->egrbufs.buffers[idx].addr = NULL; 2126 rcd->egrbufs.buffers[idx].dma = 0; 2127 rcd->egrbufs.buffers[idx].len = 0; 2128 } 2129 2130 return ret; 2131 } 2132