1 /* 2 * Copyright(c) 2015 - 2020 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/xarray.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <linux/numa.h> 58 #include <rdma/rdma_vt.h> 59 60 #include "hfi.h" 61 #include "device.h" 62 #include "common.h" 63 #include "trace.h" 64 #include "mad.h" 65 #include "sdma.h" 66 #include "debugfs.h" 67 #include "verbs.h" 68 #include "aspm.h" 69 #include "affinity.h" 70 #include "vnic.h" 71 #include "exp_rcv.h" 72 #include "netdev.h" 73 74 #undef pr_fmt 75 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 76 77 /* 78 * min buffers we want to have per context, after driver 79 */ 80 #define HFI1_MIN_USER_CTXT_BUFCNT 7 81 82 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 83 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 84 85 #define NUM_IB_PORTS 1 86 87 /* 88 * Number of user receive contexts we are configured to use (to allow for more 89 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 90 */ 91 int num_user_contexts = -1; 92 module_param_named(num_user_contexts, num_user_contexts, int, 0444); 93 MODULE_PARM_DESC( 94 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 95 96 uint krcvqs[RXE_NUM_DATA_VL]; 97 int krcvqsset; 98 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 99 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 100 101 /* computed based on above array */ 102 unsigned long n_krcvqs; 103 104 static unsigned hfi1_rcvarr_split = 25; 105 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 106 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 107 108 static uint eager_buffer_size = (8 << 20); /* 8MB */ 109 module_param(eager_buffer_size, uint, S_IRUGO); 110 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 111 112 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 113 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 114 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 115 116 static uint hfi1_hdrq_entsize = 32; 117 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444); 118 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)"); 119 120 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 121 module_param(user_credit_return_threshold, uint, S_IRUGO); 122 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 123 124 DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 125 126 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 127 struct hfi1_pportdata *ppd) 128 { 129 struct hfi1_ctxtdata *rcd; 130 int ret; 131 132 /* Control context has to be always 0 */ 133 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 134 135 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 136 if (ret < 0) { 137 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 138 return ret; 139 } 140 141 /* 142 * Set up the kernel context flags here and now because they use 143 * default values for all receive side memories. User contexts will 144 * be handled as they are created. 145 */ 146 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 147 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 148 HFI1_CAP_KGET(NODROP_EGR_FULL) | 149 HFI1_CAP_KGET(DMA_RTAIL); 150 151 /* Control context must use DMA_RTAIL */ 152 if (rcd->ctxt == HFI1_CTRL_CTXT) 153 rcd->flags |= HFI1_CAP_DMA_RTAIL; 154 rcd->fast_handler = get_dma_rtail_setting(rcd) ? 155 handle_receive_interrupt_dma_rtail : 156 handle_receive_interrupt_nodma_rtail; 157 rcd->slow_handler = handle_receive_interrupt; 158 159 hfi1_set_seq_cnt(rcd, 1); 160 161 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 162 if (!rcd->sc) { 163 dd_dev_err(dd, "Kernel send context allocation failed\n"); 164 return -ENOMEM; 165 } 166 hfi1_init_ctxt(rcd->sc); 167 168 return 0; 169 } 170 171 /* 172 * Create the receive context array and one or more kernel contexts 173 */ 174 int hfi1_create_kctxts(struct hfi1_devdata *dd) 175 { 176 u16 i; 177 int ret; 178 179 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 180 GFP_KERNEL, dd->node); 181 if (!dd->rcd) 182 return -ENOMEM; 183 184 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 185 ret = hfi1_create_kctxt(dd, dd->pport); 186 if (ret) 187 goto bail; 188 } 189 190 return 0; 191 bail: 192 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 193 hfi1_free_ctxt(dd->rcd[i]); 194 195 /* All the contexts should be freed, free the array */ 196 kfree(dd->rcd); 197 dd->rcd = NULL; 198 return ret; 199 } 200 201 /* 202 * Helper routines for the receive context reference count (rcd and uctxt). 203 */ 204 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 205 { 206 kref_init(&rcd->kref); 207 } 208 209 /** 210 * hfi1_rcd_free - When reference is zero clean up. 211 * @kref: pointer to an initialized rcd data structure 212 * 213 */ 214 static void hfi1_rcd_free(struct kref *kref) 215 { 216 unsigned long flags; 217 struct hfi1_ctxtdata *rcd = 218 container_of(kref, struct hfi1_ctxtdata, kref); 219 220 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 221 rcd->dd->rcd[rcd->ctxt] = NULL; 222 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 223 224 hfi1_free_ctxtdata(rcd->dd, rcd); 225 226 kfree(rcd); 227 } 228 229 /** 230 * hfi1_rcd_put - decrement reference for rcd 231 * @rcd: pointer to an initialized rcd data structure 232 * 233 * Use this to put a reference after the init. 234 */ 235 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 236 { 237 if (rcd) 238 return kref_put(&rcd->kref, hfi1_rcd_free); 239 240 return 0; 241 } 242 243 /** 244 * hfi1_rcd_get - increment reference for rcd 245 * @rcd: pointer to an initialized rcd data structure 246 * 247 * Use this to get a reference after the init. 248 * 249 * Return : reflect kref_get_unless_zero(), which returns non-zero on 250 * increment, otherwise 0. 251 */ 252 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 253 { 254 return kref_get_unless_zero(&rcd->kref); 255 } 256 257 /** 258 * allocate_rcd_index - allocate an rcd index from the rcd array 259 * @dd: pointer to a valid devdata structure 260 * @rcd: rcd data structure to assign 261 * @index: pointer to index that is allocated 262 * 263 * Find an empty index in the rcd array, and assign the given rcd to it. 264 * If the array is full, we are EBUSY. 265 * 266 */ 267 static int allocate_rcd_index(struct hfi1_devdata *dd, 268 struct hfi1_ctxtdata *rcd, u16 *index) 269 { 270 unsigned long flags; 271 u16 ctxt; 272 273 spin_lock_irqsave(&dd->uctxt_lock, flags); 274 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 275 if (!dd->rcd[ctxt]) 276 break; 277 278 if (ctxt < dd->num_rcv_contexts) { 279 rcd->ctxt = ctxt; 280 dd->rcd[ctxt] = rcd; 281 hfi1_rcd_init(rcd); 282 } 283 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 284 285 if (ctxt >= dd->num_rcv_contexts) 286 return -EBUSY; 287 288 *index = ctxt; 289 290 return 0; 291 } 292 293 /** 294 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 295 * array 296 * @dd: pointer to a valid devdata structure 297 * @ctxt: the index of an possilbe rcd 298 * 299 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 300 * ctxt index is valid. 301 * 302 * The caller is responsible for making the _put(). 303 * 304 */ 305 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 306 u16 ctxt) 307 { 308 if (ctxt < dd->num_rcv_contexts) 309 return hfi1_rcd_get_by_index(dd, ctxt); 310 311 return NULL; 312 } 313 314 /** 315 * hfi1_rcd_get_by_index 316 * @dd: pointer to a valid devdata structure 317 * @ctxt: the index of an possilbe rcd 318 * 319 * We need to protect access to the rcd array. If access is needed to 320 * one or more index, get the protecting spinlock and then increment the 321 * kref. 322 * 323 * The caller is responsible for making the _put(). 324 * 325 */ 326 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 327 { 328 unsigned long flags; 329 struct hfi1_ctxtdata *rcd = NULL; 330 331 spin_lock_irqsave(&dd->uctxt_lock, flags); 332 if (dd->rcd[ctxt]) { 333 rcd = dd->rcd[ctxt]; 334 if (!hfi1_rcd_get(rcd)) 335 rcd = NULL; 336 } 337 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 338 339 return rcd; 340 } 341 342 /* 343 * Common code for user and kernel context create and setup. 344 * NOTE: the initial kref is done here (hf1_rcd_init()). 345 */ 346 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 347 struct hfi1_ctxtdata **context) 348 { 349 struct hfi1_devdata *dd = ppd->dd; 350 struct hfi1_ctxtdata *rcd; 351 unsigned kctxt_ngroups = 0; 352 u32 base; 353 354 if (dd->rcv_entries.nctxt_extra > 355 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 356 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 357 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 358 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 359 if (rcd) { 360 u32 rcvtids, max_entries; 361 u16 ctxt; 362 int ret; 363 364 ret = allocate_rcd_index(dd, rcd, &ctxt); 365 if (ret) { 366 *context = NULL; 367 kfree(rcd); 368 return ret; 369 } 370 371 INIT_LIST_HEAD(&rcd->qp_wait_list); 372 hfi1_exp_tid_group_init(rcd); 373 rcd->ppd = ppd; 374 rcd->dd = dd; 375 rcd->numa_id = numa; 376 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 377 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; 378 rcd->msix_intr = CCE_NUM_MSIX_VECTORS; 379 380 mutex_init(&rcd->exp_mutex); 381 spin_lock_init(&rcd->exp_lock); 382 INIT_LIST_HEAD(&rcd->flow_queue.queue_head); 383 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head); 384 385 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 386 387 /* 388 * Calculate the context's RcvArray entry starting point. 389 * We do this here because we have to take into account all 390 * the RcvArray entries that previous context would have 391 * taken and we have to account for any extra groups assigned 392 * to the static (kernel) or dynamic (vnic/user) contexts. 393 */ 394 if (ctxt < dd->first_dyn_alloc_ctxt) { 395 if (ctxt < kctxt_ngroups) { 396 base = ctxt * (dd->rcv_entries.ngroups + 1); 397 rcd->rcv_array_groups++; 398 } else { 399 base = kctxt_ngroups + 400 (ctxt * dd->rcv_entries.ngroups); 401 } 402 } else { 403 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 404 405 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 406 kctxt_ngroups); 407 if (ct < dd->rcv_entries.nctxt_extra) { 408 base += ct * (dd->rcv_entries.ngroups + 1); 409 rcd->rcv_array_groups++; 410 } else { 411 base += dd->rcv_entries.nctxt_extra + 412 (ct * dd->rcv_entries.ngroups); 413 } 414 } 415 rcd->eager_base = base * dd->rcv_entries.group_size; 416 417 rcd->rcvhdrq_cnt = rcvhdrcnt; 418 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 419 rcd->rhf_offset = 420 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32); 421 /* 422 * Simple Eager buffer allocation: we have already pre-allocated 423 * the number of RcvArray entry groups. Each ctxtdata structure 424 * holds the number of groups for that context. 425 * 426 * To follow CSR requirements and maintain cacheline alignment, 427 * make sure all sizes and bases are multiples of group_size. 428 * 429 * The expected entry count is what is left after assigning 430 * eager. 431 */ 432 max_entries = rcd->rcv_array_groups * 433 dd->rcv_entries.group_size; 434 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 435 rcd->egrbufs.count = round_down(rcvtids, 436 dd->rcv_entries.group_size); 437 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 438 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 439 rcd->ctxt); 440 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 441 } 442 hfi1_cdbg(PROC, 443 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 444 rcd->ctxt, rcd->egrbufs.count); 445 446 /* 447 * Allocate array that will hold the eager buffer accounting 448 * data. 449 * This will allocate the maximum possible buffer count based 450 * on the value of the RcvArray split parameter. 451 * The resulting value will be rounded down to the closest 452 * multiple of dd->rcv_entries.group_size. 453 */ 454 rcd->egrbufs.buffers = 455 kcalloc_node(rcd->egrbufs.count, 456 sizeof(*rcd->egrbufs.buffers), 457 GFP_KERNEL, numa); 458 if (!rcd->egrbufs.buffers) 459 goto bail; 460 rcd->egrbufs.rcvtids = 461 kcalloc_node(rcd->egrbufs.count, 462 sizeof(*rcd->egrbufs.rcvtids), 463 GFP_KERNEL, numa); 464 if (!rcd->egrbufs.rcvtids) 465 goto bail; 466 rcd->egrbufs.size = eager_buffer_size; 467 /* 468 * The size of the buffers programmed into the RcvArray 469 * entries needs to be big enough to handle the highest 470 * MTU supported. 471 */ 472 if (rcd->egrbufs.size < hfi1_max_mtu) { 473 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 474 hfi1_cdbg(PROC, 475 "ctxt%u: eager bufs size too small. Adjusting to %u\n", 476 rcd->ctxt, rcd->egrbufs.size); 477 } 478 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 479 480 /* Applicable only for statically created kernel contexts */ 481 if (ctxt < dd->first_dyn_alloc_ctxt) { 482 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 483 GFP_KERNEL, numa); 484 if (!rcd->opstats) 485 goto bail; 486 487 /* Initialize TID flow generations for the context */ 488 hfi1_kern_init_ctxt_generations(rcd); 489 } 490 491 *context = rcd; 492 return 0; 493 } 494 495 bail: 496 *context = NULL; 497 hfi1_free_ctxt(rcd); 498 return -ENOMEM; 499 } 500 501 /** 502 * hfi1_free_ctxt 503 * @rcd: pointer to an initialized rcd data structure 504 * 505 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 506 * When a context is done being used (kernel or user), this function is called 507 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 508 * Other users of the context do a get/put sequence to make sure that the 509 * structure isn't removed while in use. 510 */ 511 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 512 { 513 hfi1_rcd_put(rcd); 514 } 515 516 /* 517 * Select the largest ccti value over all SLs to determine the intra- 518 * packet gap for the link. 519 * 520 * called with cca_timer_lock held (to protect access to cca_timer 521 * array), and rcu_read_lock() (to protect access to cc_state). 522 */ 523 void set_link_ipg(struct hfi1_pportdata *ppd) 524 { 525 struct hfi1_devdata *dd = ppd->dd; 526 struct cc_state *cc_state; 527 int i; 528 u16 cce, ccti_limit, max_ccti = 0; 529 u16 shift, mult; 530 u64 src; 531 u32 current_egress_rate; /* Mbits /sec */ 532 u32 max_pkt_time; 533 /* 534 * max_pkt_time is the maximum packet egress time in units 535 * of the fabric clock period 1/(805 MHz). 536 */ 537 538 cc_state = get_cc_state(ppd); 539 540 if (!cc_state) 541 /* 542 * This should _never_ happen - rcu_read_lock() is held, 543 * and set_link_ipg() should not be called if cc_state 544 * is NULL. 545 */ 546 return; 547 548 for (i = 0; i < OPA_MAX_SLS; i++) { 549 u16 ccti = ppd->cca_timer[i].ccti; 550 551 if (ccti > max_ccti) 552 max_ccti = ccti; 553 } 554 555 ccti_limit = cc_state->cct.ccti_limit; 556 if (max_ccti > ccti_limit) 557 max_ccti = ccti_limit; 558 559 cce = cc_state->cct.entries[max_ccti].entry; 560 shift = (cce & 0xc000) >> 14; 561 mult = (cce & 0x3fff); 562 563 current_egress_rate = active_egress_rate(ppd); 564 565 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 566 567 src = (max_pkt_time >> shift) * mult; 568 569 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 570 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 571 572 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 573 } 574 575 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 576 { 577 struct cca_timer *cca_timer; 578 struct hfi1_pportdata *ppd; 579 int sl; 580 u16 ccti_timer, ccti_min; 581 struct cc_state *cc_state; 582 unsigned long flags; 583 enum hrtimer_restart ret = HRTIMER_NORESTART; 584 585 cca_timer = container_of(t, struct cca_timer, hrtimer); 586 ppd = cca_timer->ppd; 587 sl = cca_timer->sl; 588 589 rcu_read_lock(); 590 591 cc_state = get_cc_state(ppd); 592 593 if (!cc_state) { 594 rcu_read_unlock(); 595 return HRTIMER_NORESTART; 596 } 597 598 /* 599 * 1) decrement ccti for SL 600 * 2) calculate IPG for link (set_link_ipg()) 601 * 3) restart timer, unless ccti is at min value 602 */ 603 604 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 605 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 606 607 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 608 609 if (cca_timer->ccti > ccti_min) { 610 cca_timer->ccti--; 611 set_link_ipg(ppd); 612 } 613 614 if (cca_timer->ccti > ccti_min) { 615 unsigned long nsec = 1024 * ccti_timer; 616 /* ccti_timer is in units of 1.024 usec */ 617 hrtimer_forward_now(t, ns_to_ktime(nsec)); 618 ret = HRTIMER_RESTART; 619 } 620 621 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 622 rcu_read_unlock(); 623 return ret; 624 } 625 626 /* 627 * Common code for initializing the physical port structure. 628 */ 629 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 630 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 631 { 632 int i; 633 uint default_pkey_idx; 634 struct cc_state *cc_state; 635 636 ppd->dd = dd; 637 ppd->hw_pidx = hw_pidx; 638 ppd->port = port; /* IB port number, not index */ 639 ppd->prev_link_width = LINK_WIDTH_DEFAULT; 640 /* 641 * There are C_VL_COUNT number of PortVLXmitWait counters. 642 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 643 */ 644 for (i = 0; i < C_VL_COUNT + 1; i++) { 645 ppd->port_vl_xmit_wait_last[i] = 0; 646 ppd->vl_xmit_flit_cnt[i] = 0; 647 } 648 649 default_pkey_idx = 1; 650 651 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 652 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 653 654 if (loopback) { 655 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n", 656 !default_pkey_idx); 657 ppd->pkeys[!default_pkey_idx] = 0x8001; 658 } 659 660 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 661 INIT_WORK(&ppd->link_up_work, handle_link_up); 662 INIT_WORK(&ppd->link_down_work, handle_link_down); 663 INIT_WORK(&ppd->freeze_work, handle_freeze); 664 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 665 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 666 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 667 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 668 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 669 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 670 671 mutex_init(&ppd->hls_lock); 672 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 673 674 ppd->qsfp_info.ppd = ppd; 675 ppd->sm_trap_qp = 0x0; 676 ppd->sa_qp = 0x1; 677 678 ppd->hfi1_wq = NULL; 679 680 spin_lock_init(&ppd->cca_timer_lock); 681 682 for (i = 0; i < OPA_MAX_SLS; i++) { 683 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 684 HRTIMER_MODE_REL); 685 ppd->cca_timer[i].ppd = ppd; 686 ppd->cca_timer[i].sl = i; 687 ppd->cca_timer[i].ccti = 0; 688 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 689 } 690 691 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 692 693 spin_lock_init(&ppd->cc_state_lock); 694 spin_lock_init(&ppd->cc_log_lock); 695 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 696 RCU_INIT_POINTER(ppd->cc_state, cc_state); 697 if (!cc_state) 698 goto bail; 699 return; 700 701 bail: 702 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port); 703 } 704 705 /* 706 * Do initialization for device that is only needed on 707 * first detect, not on resets. 708 */ 709 static int loadtime_init(struct hfi1_devdata *dd) 710 { 711 return 0; 712 } 713 714 /** 715 * init_after_reset - re-initialize after a reset 716 * @dd: the hfi1_ib device 717 * 718 * sanity check at least some of the values after reset, and 719 * ensure no receive or transmit (explicitly, in case reset 720 * failed 721 */ 722 static int init_after_reset(struct hfi1_devdata *dd) 723 { 724 int i; 725 struct hfi1_ctxtdata *rcd; 726 /* 727 * Ensure chip does no sends or receives, tail updates, or 728 * pioavail updates while we re-initialize. This is mostly 729 * for the driver data structures, not chip registers. 730 */ 731 for (i = 0; i < dd->num_rcv_contexts; i++) { 732 rcd = hfi1_rcd_get_by_index(dd, i); 733 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 734 HFI1_RCVCTRL_INTRAVAIL_DIS | 735 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 736 hfi1_rcd_put(rcd); 737 } 738 pio_send_control(dd, PSC_GLOBAL_DISABLE); 739 for (i = 0; i < dd->num_send_contexts; i++) 740 sc_disable(dd->send_contexts[i].sc); 741 742 return 0; 743 } 744 745 static void enable_chip(struct hfi1_devdata *dd) 746 { 747 struct hfi1_ctxtdata *rcd; 748 u32 rcvmask; 749 u16 i; 750 751 /* enable PIO send */ 752 pio_send_control(dd, PSC_GLOBAL_ENABLE); 753 754 /* 755 * Enable kernel ctxts' receive and receive interrupt. 756 * Other ctxts done as user opens and initializes them. 757 */ 758 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 759 rcd = hfi1_rcd_get_by_index(dd, i); 760 if (!rcd) 761 continue; 762 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 763 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 764 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 765 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 766 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 767 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 768 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 769 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 770 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 771 if (HFI1_CAP_IS_KSET(TID_RDMA)) 772 rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB; 773 hfi1_rcvctrl(dd, rcvmask, rcd); 774 sc_enable(rcd->sc); 775 hfi1_rcd_put(rcd); 776 } 777 } 778 779 /** 780 * create_workqueues - create per port workqueues 781 * @dd: the hfi1_ib device 782 */ 783 static int create_workqueues(struct hfi1_devdata *dd) 784 { 785 int pidx; 786 struct hfi1_pportdata *ppd; 787 788 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 789 ppd = dd->pport + pidx; 790 if (!ppd->hfi1_wq) { 791 ppd->hfi1_wq = 792 alloc_workqueue( 793 "hfi%d_%d", 794 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | 795 WQ_MEM_RECLAIM, 796 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 797 dd->unit, pidx); 798 if (!ppd->hfi1_wq) 799 goto wq_error; 800 } 801 if (!ppd->link_wq) { 802 /* 803 * Make the link workqueue single-threaded to enforce 804 * serialization. 805 */ 806 ppd->link_wq = 807 alloc_workqueue( 808 "hfi_link_%d_%d", 809 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 810 1, /* max_active */ 811 dd->unit, pidx); 812 if (!ppd->link_wq) 813 goto wq_error; 814 } 815 } 816 return 0; 817 wq_error: 818 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 819 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 820 ppd = dd->pport + pidx; 821 if (ppd->hfi1_wq) { 822 destroy_workqueue(ppd->hfi1_wq); 823 ppd->hfi1_wq = NULL; 824 } 825 if (ppd->link_wq) { 826 destroy_workqueue(ppd->link_wq); 827 ppd->link_wq = NULL; 828 } 829 } 830 return -ENOMEM; 831 } 832 833 /** 834 * enable_general_intr() - Enable the IRQs that will be handled by the 835 * general interrupt handler. 836 * @dd: valid devdata 837 * 838 */ 839 static void enable_general_intr(struct hfi1_devdata *dd) 840 { 841 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true); 842 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true); 843 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true); 844 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true); 845 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true); 846 set_intr_bits(dd, IS_DC_START, IS_DC_END, true); 847 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true); 848 } 849 850 /** 851 * hfi1_init - do the actual initialization sequence on the chip 852 * @dd: the hfi1_ib device 853 * @reinit: re-initializing, so don't allocate new memory 854 * 855 * Do the actual initialization sequence on the chip. This is done 856 * both from the init routine called from the PCI infrastructure, and 857 * when we reset the chip, or detect that it was reset internally, 858 * or it's administratively re-enabled. 859 * 860 * Memory allocation here and in called routines is only done in 861 * the first case (reinit == 0). We have to be careful, because even 862 * without memory allocation, we need to re-write all the chip registers 863 * TIDs, etc. after the reset or enable has completed. 864 */ 865 int hfi1_init(struct hfi1_devdata *dd, int reinit) 866 { 867 int ret = 0, pidx, lastfail = 0; 868 unsigned long len; 869 u16 i; 870 struct hfi1_ctxtdata *rcd; 871 struct hfi1_pportdata *ppd; 872 873 /* Set up send low level handlers */ 874 dd->process_pio_send = hfi1_verbs_send_pio; 875 dd->process_dma_send = hfi1_verbs_send_dma; 876 dd->pio_inline_send = pio_copy; 877 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 878 879 if (is_ax(dd)) { 880 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 881 dd->do_drop = true; 882 } else { 883 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 884 dd->do_drop = false; 885 } 886 887 /* make sure the link is not "up" */ 888 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 889 ppd = dd->pport + pidx; 890 ppd->linkup = 0; 891 } 892 893 if (reinit) 894 ret = init_after_reset(dd); 895 else 896 ret = loadtime_init(dd); 897 if (ret) 898 goto done; 899 900 /* allocate dummy tail memory for all receive contexts */ 901 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 902 sizeof(u64), 903 &dd->rcvhdrtail_dummy_dma, 904 GFP_KERNEL); 905 906 if (!dd->rcvhdrtail_dummy_kvaddr) { 907 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 908 ret = -ENOMEM; 909 goto done; 910 } 911 912 /* dd->rcd can be NULL if early initialization failed */ 913 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 914 /* 915 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 916 * re-init, the simplest way to handle this is to free 917 * existing, and re-allocate. 918 * Need to re-create rest of ctxt 0 ctxtdata as well. 919 */ 920 rcd = hfi1_rcd_get_by_index(dd, i); 921 if (!rcd) 922 continue; 923 924 rcd->do_interrupt = &handle_receive_interrupt; 925 926 lastfail = hfi1_create_rcvhdrq(dd, rcd); 927 if (!lastfail) 928 lastfail = hfi1_setup_eagerbufs(rcd); 929 if (!lastfail) 930 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit); 931 if (lastfail) { 932 dd_dev_err(dd, 933 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 934 ret = lastfail; 935 } 936 /* enable IRQ */ 937 hfi1_rcd_put(rcd); 938 } 939 940 /* Allocate enough memory for user event notification. */ 941 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS * 942 sizeof(*dd->events)); 943 dd->events = vmalloc_user(len); 944 if (!dd->events) 945 dd_dev_err(dd, "Failed to allocate user events page\n"); 946 /* 947 * Allocate a page for device and port status. 948 * Page will be shared amongst all user processes. 949 */ 950 dd->status = vmalloc_user(PAGE_SIZE); 951 if (!dd->status) 952 dd_dev_err(dd, "Failed to allocate dev status page\n"); 953 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 954 ppd = dd->pport + pidx; 955 if (dd->status) 956 /* Currently, we only have one port */ 957 ppd->statusp = &dd->status->port; 958 959 set_mtu(ppd); 960 } 961 962 /* enable chip even if we have an error, so we can debug cause */ 963 enable_chip(dd); 964 965 done: 966 /* 967 * Set status even if port serdes is not initialized 968 * so that diags will work. 969 */ 970 if (dd->status) 971 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 972 HFI1_STATUS_INITTED; 973 if (!ret) { 974 /* enable all interrupts from the chip */ 975 enable_general_intr(dd); 976 init_qsfp_int(dd); 977 978 /* chip is OK for user apps; mark it as initialized */ 979 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 980 ppd = dd->pport + pidx; 981 982 /* 983 * start the serdes - must be after interrupts are 984 * enabled so we are notified when the link goes up 985 */ 986 lastfail = bringup_serdes(ppd); 987 if (lastfail) 988 dd_dev_info(dd, 989 "Failed to bring up port %u\n", 990 ppd->port); 991 992 /* 993 * Set status even if port serdes is not initialized 994 * so that diags will work. 995 */ 996 if (ppd->statusp) 997 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 998 HFI1_STATUS_INITTED; 999 if (!ppd->link_speed_enabled) 1000 continue; 1001 } 1002 } 1003 1004 /* if ret is non-zero, we probably should do some cleanup here... */ 1005 return ret; 1006 } 1007 1008 struct hfi1_devdata *hfi1_lookup(int unit) 1009 { 1010 return xa_load(&hfi1_dev_table, unit); 1011 } 1012 1013 /* 1014 * Stop the timers during unit shutdown, or after an error late 1015 * in initialization. 1016 */ 1017 static void stop_timers(struct hfi1_devdata *dd) 1018 { 1019 struct hfi1_pportdata *ppd; 1020 int pidx; 1021 1022 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1023 ppd = dd->pport + pidx; 1024 if (ppd->led_override_timer.function) { 1025 del_timer_sync(&ppd->led_override_timer); 1026 atomic_set(&ppd->led_override_timer_active, 0); 1027 } 1028 } 1029 } 1030 1031 /** 1032 * shutdown_device - shut down a device 1033 * @dd: the hfi1_ib device 1034 * 1035 * This is called to make the device quiet when we are about to 1036 * unload the driver, and also when the device is administratively 1037 * disabled. It does not free any data structures. 1038 * Everything it does has to be setup again by hfi1_init(dd, 1) 1039 */ 1040 static void shutdown_device(struct hfi1_devdata *dd) 1041 { 1042 struct hfi1_pportdata *ppd; 1043 struct hfi1_ctxtdata *rcd; 1044 unsigned pidx; 1045 int i; 1046 1047 if (dd->flags & HFI1_SHUTDOWN) 1048 return; 1049 dd->flags |= HFI1_SHUTDOWN; 1050 1051 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1052 ppd = dd->pport + pidx; 1053 1054 ppd->linkup = 0; 1055 if (ppd->statusp) 1056 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1057 HFI1_STATUS_IB_READY); 1058 } 1059 dd->flags &= ~HFI1_INITTED; 1060 1061 /* mask and clean up interrupts */ 1062 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 1063 msix_clean_up_interrupts(dd); 1064 1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1066 ppd = dd->pport + pidx; 1067 for (i = 0; i < dd->num_rcv_contexts; i++) { 1068 rcd = hfi1_rcd_get_by_index(dd, i); 1069 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1070 HFI1_RCVCTRL_CTXT_DIS | 1071 HFI1_RCVCTRL_INTRAVAIL_DIS | 1072 HFI1_RCVCTRL_PKEY_DIS | 1073 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1074 hfi1_rcd_put(rcd); 1075 } 1076 /* 1077 * Gracefully stop all sends allowing any in progress to 1078 * trickle out first. 1079 */ 1080 for (i = 0; i < dd->num_send_contexts; i++) 1081 sc_flush(dd->send_contexts[i].sc); 1082 } 1083 1084 /* 1085 * Enough for anything that's going to trickle out to have actually 1086 * done so. 1087 */ 1088 udelay(20); 1089 1090 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1091 ppd = dd->pport + pidx; 1092 1093 /* disable all contexts */ 1094 for (i = 0; i < dd->num_send_contexts; i++) 1095 sc_disable(dd->send_contexts[i].sc); 1096 /* disable the send device */ 1097 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1098 1099 shutdown_led_override(ppd); 1100 1101 /* 1102 * Clear SerdesEnable. 1103 * We can't count on interrupts since we are stopping. 1104 */ 1105 hfi1_quiet_serdes(ppd); 1106 1107 if (ppd->hfi1_wq) { 1108 destroy_workqueue(ppd->hfi1_wq); 1109 ppd->hfi1_wq = NULL; 1110 } 1111 if (ppd->link_wq) { 1112 destroy_workqueue(ppd->link_wq); 1113 ppd->link_wq = NULL; 1114 } 1115 } 1116 sdma_exit(dd); 1117 } 1118 1119 /** 1120 * hfi1_free_ctxtdata - free a context's allocated data 1121 * @dd: the hfi1_ib device 1122 * @rcd: the ctxtdata structure 1123 * 1124 * free up any allocated data for a context 1125 * It should never change any chip state, or global driver state. 1126 */ 1127 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1128 { 1129 u32 e; 1130 1131 if (!rcd) 1132 return; 1133 1134 if (rcd->rcvhdrq) { 1135 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd), 1136 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1137 rcd->rcvhdrq = NULL; 1138 if (hfi1_rcvhdrtail_kvaddr(rcd)) { 1139 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1140 (void *)hfi1_rcvhdrtail_kvaddr(rcd), 1141 rcd->rcvhdrqtailaddr_dma); 1142 rcd->rcvhdrtail_kvaddr = NULL; 1143 } 1144 } 1145 1146 /* all the RcvArray entries should have been cleared by now */ 1147 kfree(rcd->egrbufs.rcvtids); 1148 rcd->egrbufs.rcvtids = NULL; 1149 1150 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1151 if (rcd->egrbufs.buffers[e].dma) 1152 dma_free_coherent(&dd->pcidev->dev, 1153 rcd->egrbufs.buffers[e].len, 1154 rcd->egrbufs.buffers[e].addr, 1155 rcd->egrbufs.buffers[e].dma); 1156 } 1157 kfree(rcd->egrbufs.buffers); 1158 rcd->egrbufs.alloced = 0; 1159 rcd->egrbufs.buffers = NULL; 1160 1161 sc_free(rcd->sc); 1162 rcd->sc = NULL; 1163 1164 vfree(rcd->subctxt_uregbase); 1165 vfree(rcd->subctxt_rcvegrbuf); 1166 vfree(rcd->subctxt_rcvhdr_base); 1167 kfree(rcd->opstats); 1168 1169 rcd->subctxt_uregbase = NULL; 1170 rcd->subctxt_rcvegrbuf = NULL; 1171 rcd->subctxt_rcvhdr_base = NULL; 1172 rcd->opstats = NULL; 1173 } 1174 1175 /* 1176 * Release our hold on the shared asic data. If we are the last one, 1177 * return the structure to be finalized outside the lock. Must be 1178 * holding hfi1_dev_table lock. 1179 */ 1180 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1181 { 1182 struct hfi1_asic_data *ad; 1183 int other; 1184 1185 if (!dd->asic_data) 1186 return NULL; 1187 dd->asic_data->dds[dd->hfi1_id] = NULL; 1188 other = dd->hfi1_id ? 0 : 1; 1189 ad = dd->asic_data; 1190 dd->asic_data = NULL; 1191 /* return NULL if the other dd still has a link */ 1192 return ad->dds[other] ? NULL : ad; 1193 } 1194 1195 static void finalize_asic_data(struct hfi1_devdata *dd, 1196 struct hfi1_asic_data *ad) 1197 { 1198 clean_up_i2c(dd, ad); 1199 kfree(ad); 1200 } 1201 1202 /** 1203 * hfi1_free_devdata - cleans up and frees per-unit data structure 1204 * @dd: pointer to a valid devdata structure 1205 * 1206 * It cleans up and frees all data structures set up by 1207 * by hfi1_alloc_devdata(). 1208 */ 1209 void hfi1_free_devdata(struct hfi1_devdata *dd) 1210 { 1211 struct hfi1_asic_data *ad; 1212 unsigned long flags; 1213 1214 xa_lock_irqsave(&hfi1_dev_table, flags); 1215 __xa_erase(&hfi1_dev_table, dd->unit); 1216 ad = release_asic_data(dd); 1217 xa_unlock_irqrestore(&hfi1_dev_table, flags); 1218 1219 finalize_asic_data(dd, ad); 1220 free_platform_config(dd); 1221 rcu_barrier(); /* wait for rcu callbacks to complete */ 1222 free_percpu(dd->int_counter); 1223 free_percpu(dd->rcv_limit); 1224 free_percpu(dd->send_schedule); 1225 free_percpu(dd->tx_opstats); 1226 dd->int_counter = NULL; 1227 dd->rcv_limit = NULL; 1228 dd->send_schedule = NULL; 1229 dd->tx_opstats = NULL; 1230 kfree(dd->comp_vect); 1231 dd->comp_vect = NULL; 1232 sdma_clean(dd, dd->num_sdma); 1233 rvt_dealloc_device(&dd->verbs_dev.rdi); 1234 } 1235 1236 /** 1237 * hfi1_alloc_devdata - Allocate our primary per-unit data structure. 1238 * @pdev: Valid PCI device 1239 * @extra: How many bytes to alloc past the default 1240 * 1241 * Must be done via verbs allocator, because the verbs cleanup process 1242 * both does cleanup and free of the data structure. 1243 * "extra" is for chip-specific data. 1244 */ 1245 static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, 1246 size_t extra) 1247 { 1248 struct hfi1_devdata *dd; 1249 int ret, nports; 1250 1251 /* extra is * number of ports */ 1252 nports = extra / sizeof(struct hfi1_pportdata); 1253 1254 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1255 nports); 1256 if (!dd) 1257 return ERR_PTR(-ENOMEM); 1258 dd->num_pports = nports; 1259 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1260 dd->pcidev = pdev; 1261 pci_set_drvdata(pdev, dd); 1262 dd->node = NUMA_NO_NODE; 1263 1264 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, 1265 GFP_KERNEL); 1266 if (ret < 0) { 1267 dev_err(&pdev->dev, 1268 "Could not allocate unit ID: error %d\n", -ret); 1269 goto bail; 1270 } 1271 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 1272 1273 /* 1274 * Initialize all locks for the device. This needs to be as early as 1275 * possible so locks are usable. 1276 */ 1277 spin_lock_init(&dd->sc_lock); 1278 spin_lock_init(&dd->sendctrl_lock); 1279 spin_lock_init(&dd->rcvctrl_lock); 1280 spin_lock_init(&dd->uctxt_lock); 1281 spin_lock_init(&dd->hfi1_diag_trans_lock); 1282 spin_lock_init(&dd->sc_init_lock); 1283 spin_lock_init(&dd->dc8051_memlock); 1284 seqlock_init(&dd->sc2vl_lock); 1285 spin_lock_init(&dd->sde_map_lock); 1286 spin_lock_init(&dd->pio_map_lock); 1287 mutex_init(&dd->dc8051_lock); 1288 init_waitqueue_head(&dd->event_queue); 1289 spin_lock_init(&dd->irq_src_lock); 1290 1291 dd->int_counter = alloc_percpu(u64); 1292 if (!dd->int_counter) { 1293 ret = -ENOMEM; 1294 goto bail; 1295 } 1296 1297 dd->rcv_limit = alloc_percpu(u64); 1298 if (!dd->rcv_limit) { 1299 ret = -ENOMEM; 1300 goto bail; 1301 } 1302 1303 dd->send_schedule = alloc_percpu(u64); 1304 if (!dd->send_schedule) { 1305 ret = -ENOMEM; 1306 goto bail; 1307 } 1308 1309 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 1310 if (!dd->tx_opstats) { 1311 ret = -ENOMEM; 1312 goto bail; 1313 } 1314 1315 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL); 1316 if (!dd->comp_vect) { 1317 ret = -ENOMEM; 1318 goto bail; 1319 } 1320 1321 atomic_set(&dd->ipoib_rsm_usr_num, 0); 1322 return dd; 1323 1324 bail: 1325 hfi1_free_devdata(dd); 1326 return ERR_PTR(ret); 1327 } 1328 1329 /* 1330 * Called from freeze mode handlers, and from PCI error 1331 * reporting code. Should be paranoid about state of 1332 * system and data structures. 1333 */ 1334 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1335 { 1336 if (dd->flags & HFI1_INITTED) { 1337 u32 pidx; 1338 1339 dd->flags &= ~HFI1_INITTED; 1340 if (dd->pport) 1341 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1342 struct hfi1_pportdata *ppd; 1343 1344 ppd = dd->pport + pidx; 1345 if (dd->flags & HFI1_PRESENT) 1346 set_link_state(ppd, HLS_DN_DISABLE); 1347 1348 if (ppd->statusp) 1349 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1350 } 1351 } 1352 1353 /* 1354 * Mark as having had an error for driver, and also 1355 * for /sys and status word mapped to user programs. 1356 * This marks unit as not usable, until reset. 1357 */ 1358 if (dd->status) 1359 dd->status->dev |= HFI1_STATUS_HWERROR; 1360 } 1361 1362 static void remove_one(struct pci_dev *); 1363 static int init_one(struct pci_dev *, const struct pci_device_id *); 1364 static void shutdown_one(struct pci_dev *); 1365 1366 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1367 #define PFX DRIVER_NAME ": " 1368 1369 const struct pci_device_id hfi1_pci_tbl[] = { 1370 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1371 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1372 { 0, } 1373 }; 1374 1375 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1376 1377 static struct pci_driver hfi1_pci_driver = { 1378 .name = DRIVER_NAME, 1379 .probe = init_one, 1380 .remove = remove_one, 1381 .shutdown = shutdown_one, 1382 .id_table = hfi1_pci_tbl, 1383 .err_handler = &hfi1_pci_err_handler, 1384 }; 1385 1386 static void __init compute_krcvqs(void) 1387 { 1388 int i; 1389 1390 for (i = 0; i < krcvqsset; i++) 1391 n_krcvqs += krcvqs[i]; 1392 } 1393 1394 /* 1395 * Do all the generic driver unit- and chip-independent memory 1396 * allocation and initialization. 1397 */ 1398 static int __init hfi1_mod_init(void) 1399 { 1400 int ret; 1401 1402 ret = dev_init(); 1403 if (ret) 1404 goto bail; 1405 1406 ret = node_affinity_init(); 1407 if (ret) 1408 goto bail; 1409 1410 /* validate max MTU before any devices start */ 1411 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1412 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1413 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1414 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1415 } 1416 /* valid CUs run from 1-128 in powers of 2 */ 1417 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1418 hfi1_cu = 1; 1419 /* valid credit return threshold is 0-100, variable is unsigned */ 1420 if (user_credit_return_threshold > 100) 1421 user_credit_return_threshold = 100; 1422 1423 compute_krcvqs(); 1424 /* 1425 * sanitize receive interrupt count, time must wait until after 1426 * the hardware type is known 1427 */ 1428 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1429 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1430 /* reject invalid combinations */ 1431 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1432 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1433 rcv_intr_count = 1; 1434 } 1435 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1436 /* 1437 * Avoid indefinite packet delivery by requiring a timeout 1438 * if count is > 1. 1439 */ 1440 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1441 rcv_intr_timeout = 1; 1442 } 1443 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1444 /* 1445 * The dynamic algorithm expects a non-zero timeout 1446 * and a count > 1. 1447 */ 1448 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1449 rcv_intr_dynamic = 0; 1450 } 1451 1452 /* sanitize link CRC options */ 1453 link_crc_mask &= SUPPORTED_CRCS; 1454 1455 ret = opfn_init(); 1456 if (ret < 0) { 1457 pr_err("Failed to allocate opfn_wq"); 1458 goto bail_dev; 1459 } 1460 1461 /* 1462 * These must be called before the driver is registered with 1463 * the PCI subsystem. 1464 */ 1465 hfi1_dbg_init(); 1466 ret = pci_register_driver(&hfi1_pci_driver); 1467 if (ret < 0) { 1468 pr_err("Unable to register driver: error %d\n", -ret); 1469 goto bail_dev; 1470 } 1471 goto bail; /* all OK */ 1472 1473 bail_dev: 1474 hfi1_dbg_exit(); 1475 dev_cleanup(); 1476 bail: 1477 return ret; 1478 } 1479 1480 module_init(hfi1_mod_init); 1481 1482 /* 1483 * Do the non-unit driver cleanup, memory free, etc. at unload. 1484 */ 1485 static void __exit hfi1_mod_cleanup(void) 1486 { 1487 pci_unregister_driver(&hfi1_pci_driver); 1488 opfn_exit(); 1489 node_affinity_destroy_all(); 1490 hfi1_dbg_exit(); 1491 1492 WARN_ON(!xa_empty(&hfi1_dev_table)); 1493 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1494 dev_cleanup(); 1495 } 1496 1497 module_exit(hfi1_mod_cleanup); 1498 1499 /* this can only be called after a successful initialization */ 1500 static void cleanup_device_data(struct hfi1_devdata *dd) 1501 { 1502 int ctxt; 1503 int pidx; 1504 1505 /* users can't do anything more with chip */ 1506 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1507 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1508 struct cc_state *cc_state; 1509 int i; 1510 1511 if (ppd->statusp) 1512 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1513 1514 for (i = 0; i < OPA_MAX_SLS; i++) 1515 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1516 1517 spin_lock(&ppd->cc_state_lock); 1518 cc_state = get_cc_state_protected(ppd); 1519 RCU_INIT_POINTER(ppd->cc_state, NULL); 1520 spin_unlock(&ppd->cc_state_lock); 1521 1522 if (cc_state) 1523 kfree_rcu(cc_state, rcu); 1524 } 1525 1526 free_credit_return(dd); 1527 1528 if (dd->rcvhdrtail_dummy_kvaddr) { 1529 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1530 (void *)dd->rcvhdrtail_dummy_kvaddr, 1531 dd->rcvhdrtail_dummy_dma); 1532 dd->rcvhdrtail_dummy_kvaddr = NULL; 1533 } 1534 1535 /* 1536 * Free any resources still in use (usually just kernel contexts) 1537 * at unload; we do for ctxtcnt, because that's what we allocate. 1538 */ 1539 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1540 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1541 1542 if (rcd) { 1543 hfi1_free_ctxt_rcv_groups(rcd); 1544 hfi1_free_ctxt(rcd); 1545 } 1546 } 1547 1548 kfree(dd->rcd); 1549 dd->rcd = NULL; 1550 1551 free_pio_map(dd); 1552 /* must follow rcv context free - need to remove rcv's hooks */ 1553 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1554 sc_free(dd->send_contexts[ctxt].sc); 1555 dd->num_send_contexts = 0; 1556 kfree(dd->send_contexts); 1557 dd->send_contexts = NULL; 1558 kfree(dd->hw_to_sw); 1559 dd->hw_to_sw = NULL; 1560 kfree(dd->boardname); 1561 vfree(dd->events); 1562 vfree(dd->status); 1563 } 1564 1565 /* 1566 * Clean up on unit shutdown, or error during unit load after 1567 * successful initialization. 1568 */ 1569 static void postinit_cleanup(struct hfi1_devdata *dd) 1570 { 1571 hfi1_start_cleanup(dd); 1572 hfi1_comp_vectors_clean_up(dd); 1573 hfi1_dev_affinity_clean_up(dd); 1574 1575 hfi1_pcie_ddcleanup(dd); 1576 hfi1_pcie_cleanup(dd->pcidev); 1577 1578 cleanup_device_data(dd); 1579 1580 hfi1_free_devdata(dd); 1581 } 1582 1583 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1584 { 1585 int ret = 0, j, pidx, initfail; 1586 struct hfi1_devdata *dd; 1587 struct hfi1_pportdata *ppd; 1588 1589 /* First, lock the non-writable module parameters */ 1590 HFI1_CAP_LOCK(); 1591 1592 /* Validate dev ids */ 1593 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1594 ent->device == PCI_DEVICE_ID_INTEL1)) { 1595 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", 1596 ent->device); 1597 ret = -ENODEV; 1598 goto bail; 1599 } 1600 1601 /* Allocate the dd so we can get to work */ 1602 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * 1603 sizeof(struct hfi1_pportdata)); 1604 if (IS_ERR(dd)) { 1605 ret = PTR_ERR(dd); 1606 goto bail; 1607 } 1608 1609 /* Validate some global module parameters */ 1610 ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt); 1611 if (ret) 1612 goto bail; 1613 1614 /* use the encoding function as a sanitization check */ 1615 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1616 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n", 1617 hfi1_hdrq_entsize); 1618 ret = -EINVAL; 1619 goto bail; 1620 } 1621 1622 /* The receive eager buffer size must be set before the receive 1623 * contexts are created. 1624 * 1625 * Set the eager buffer size. Validate that it falls in a range 1626 * allowed by the hardware - all powers of 2 between the min and 1627 * max. The maximum valid MTU is within the eager buffer range 1628 * so we do not need to cap the max_mtu by an eager buffer size 1629 * setting. 1630 */ 1631 if (eager_buffer_size) { 1632 if (!is_power_of_2(eager_buffer_size)) 1633 eager_buffer_size = 1634 roundup_pow_of_two(eager_buffer_size); 1635 eager_buffer_size = 1636 clamp_val(eager_buffer_size, 1637 MIN_EAGER_BUFFER * 8, 1638 MAX_EAGER_BUFFER_TOTAL); 1639 dd_dev_info(dd, "Eager buffer size %u\n", 1640 eager_buffer_size); 1641 } else { 1642 dd_dev_err(dd, "Invalid Eager buffer size of 0\n"); 1643 ret = -EINVAL; 1644 goto bail; 1645 } 1646 1647 /* restrict value of hfi1_rcvarr_split */ 1648 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1649 1650 ret = hfi1_pcie_init(dd); 1651 if (ret) 1652 goto bail; 1653 1654 /* 1655 * Do device-specific initialization, function table setup, dd 1656 * allocation, etc. 1657 */ 1658 ret = hfi1_init_dd(dd); 1659 if (ret) 1660 goto clean_bail; /* error already printed */ 1661 1662 ret = create_workqueues(dd); 1663 if (ret) 1664 goto clean_bail; 1665 1666 /* do the generic initialization */ 1667 initfail = hfi1_init(dd, 0); 1668 1669 ret = hfi1_register_ib_device(dd); 1670 1671 /* 1672 * Now ready for use. this should be cleared whenever we 1673 * detect a reset, or initiate one. If earlier failure, 1674 * we still create devices, so diags, etc. can be used 1675 * to determine cause of problem. 1676 */ 1677 if (!initfail && !ret) { 1678 dd->flags |= HFI1_INITTED; 1679 /* create debufs files after init and ib register */ 1680 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1681 } 1682 1683 j = hfi1_device_create(dd); 1684 if (j) 1685 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1686 1687 if (initfail || ret) { 1688 msix_clean_up_interrupts(dd); 1689 stop_timers(dd); 1690 flush_workqueue(ib_wq); 1691 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1692 hfi1_quiet_serdes(dd->pport + pidx); 1693 ppd = dd->pport + pidx; 1694 if (ppd->hfi1_wq) { 1695 destroy_workqueue(ppd->hfi1_wq); 1696 ppd->hfi1_wq = NULL; 1697 } 1698 if (ppd->link_wq) { 1699 destroy_workqueue(ppd->link_wq); 1700 ppd->link_wq = NULL; 1701 } 1702 } 1703 if (!j) 1704 hfi1_device_remove(dd); 1705 if (!ret) 1706 hfi1_unregister_ib_device(dd); 1707 postinit_cleanup(dd); 1708 if (initfail) 1709 ret = initfail; 1710 goto bail; /* everything already cleaned */ 1711 } 1712 1713 sdma_start(dd); 1714 1715 return 0; 1716 1717 clean_bail: 1718 hfi1_pcie_cleanup(pdev); 1719 bail: 1720 return ret; 1721 } 1722 1723 static void wait_for_clients(struct hfi1_devdata *dd) 1724 { 1725 /* 1726 * Remove the device init value and complete the device if there is 1727 * no clients or wait for active clients to finish. 1728 */ 1729 if (atomic_dec_and_test(&dd->user_refcount)) 1730 complete(&dd->user_comp); 1731 1732 wait_for_completion(&dd->user_comp); 1733 } 1734 1735 static void remove_one(struct pci_dev *pdev) 1736 { 1737 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1738 1739 /* close debugfs files before ib unregister */ 1740 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1741 1742 /* remove the /dev hfi1 interface */ 1743 hfi1_device_remove(dd); 1744 1745 /* wait for existing user space clients to finish */ 1746 wait_for_clients(dd); 1747 1748 /* unregister from IB core */ 1749 hfi1_unregister_ib_device(dd); 1750 1751 /* free netdev data */ 1752 hfi1_netdev_free(dd); 1753 1754 /* 1755 * Disable the IB link, disable interrupts on the device, 1756 * clear dma engines, etc. 1757 */ 1758 shutdown_device(dd); 1759 1760 stop_timers(dd); 1761 1762 /* wait until all of our (qsfp) queue_work() calls complete */ 1763 flush_workqueue(ib_wq); 1764 1765 postinit_cleanup(dd); 1766 } 1767 1768 static void shutdown_one(struct pci_dev *pdev) 1769 { 1770 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1771 1772 shutdown_device(dd); 1773 } 1774 1775 /** 1776 * hfi1_create_rcvhdrq - create a receive header queue 1777 * @dd: the hfi1_ib device 1778 * @rcd: the context data 1779 * 1780 * This must be contiguous memory (from an i/o perspective), and must be 1781 * DMA'able (which means for some systems, it will go through an IOMMU, 1782 * or be forced into a low address range). 1783 */ 1784 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1785 { 1786 unsigned amt; 1787 1788 if (!rcd->rcvhdrq) { 1789 gfp_t gfp_flags; 1790 1791 amt = rcvhdrq_size(rcd); 1792 1793 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 1794 gfp_flags = GFP_KERNEL; 1795 else 1796 gfp_flags = GFP_USER; 1797 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, 1798 &rcd->rcvhdrq_dma, 1799 gfp_flags | __GFP_COMP); 1800 1801 if (!rcd->rcvhdrq) { 1802 dd_dev_err(dd, 1803 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1804 amt, rcd->ctxt); 1805 goto bail; 1806 } 1807 1808 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1809 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1810 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, 1811 PAGE_SIZE, 1812 &rcd->rcvhdrqtailaddr_dma, 1813 gfp_flags); 1814 if (!rcd->rcvhdrtail_kvaddr) 1815 goto bail_free; 1816 } 1817 } 1818 1819 set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize, 1820 rcd->rcvhdrq_cnt); 1821 1822 return 0; 1823 1824 bail_free: 1825 dd_dev_err(dd, 1826 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1827 rcd->ctxt); 1828 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1829 rcd->rcvhdrq_dma); 1830 rcd->rcvhdrq = NULL; 1831 bail: 1832 return -ENOMEM; 1833 } 1834 1835 /** 1836 * allocate eager buffers, both kernel and user contexts. 1837 * @rcd: the context we are setting up. 1838 * 1839 * Allocate the eager TID buffers and program them into hip. 1840 * They are no longer completely contiguous, we do multiple allocation 1841 * calls. Otherwise we get the OOM code involved, by asking for too 1842 * much per call, with disastrous results on some kernels. 1843 */ 1844 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1845 { 1846 struct hfi1_devdata *dd = rcd->dd; 1847 u32 max_entries, egrtop, alloced_bytes = 0; 1848 gfp_t gfp_flags; 1849 u16 order, idx = 0; 1850 int ret = 0; 1851 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1852 1853 /* 1854 * GFP_USER, but without GFP_FS, so buffer cache can be 1855 * coalesced (we hope); otherwise, even at order 4, 1856 * heavy filesystem activity makes these fail, and we can 1857 * use compound pages. 1858 */ 1859 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1860 1861 /* 1862 * The minimum size of the eager buffers is a groups of MTU-sized 1863 * buffers. 1864 * The global eager_buffer_size parameter is checked against the 1865 * theoretical lower limit of the value. Here, we check against the 1866 * MTU. 1867 */ 1868 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1869 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1870 /* 1871 * If using one-pkt-per-egr-buffer, lower the eager buffer 1872 * size to the max MTU (page-aligned). 1873 */ 1874 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1875 rcd->egrbufs.rcvtid_size = round_mtu; 1876 1877 /* 1878 * Eager buffers sizes of 1MB or less require smaller TID sizes 1879 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1880 */ 1881 if (rcd->egrbufs.size <= (1 << 20)) 1882 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1883 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1884 1885 while (alloced_bytes < rcd->egrbufs.size && 1886 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1887 rcd->egrbufs.buffers[idx].addr = 1888 dma_alloc_coherent(&dd->pcidev->dev, 1889 rcd->egrbufs.rcvtid_size, 1890 &rcd->egrbufs.buffers[idx].dma, 1891 gfp_flags); 1892 if (rcd->egrbufs.buffers[idx].addr) { 1893 rcd->egrbufs.buffers[idx].len = 1894 rcd->egrbufs.rcvtid_size; 1895 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1896 rcd->egrbufs.buffers[idx].addr; 1897 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 1898 rcd->egrbufs.buffers[idx].dma; 1899 rcd->egrbufs.alloced++; 1900 alloced_bytes += rcd->egrbufs.rcvtid_size; 1901 idx++; 1902 } else { 1903 u32 new_size, i, j; 1904 u64 offset = 0; 1905 1906 /* 1907 * Fail the eager buffer allocation if: 1908 * - we are already using the lowest acceptable size 1909 * - we are using one-pkt-per-egr-buffer (this implies 1910 * that we are accepting only one size) 1911 */ 1912 if (rcd->egrbufs.rcvtid_size == round_mtu || 1913 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1914 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1915 rcd->ctxt); 1916 ret = -ENOMEM; 1917 goto bail_rcvegrbuf_phys; 1918 } 1919 1920 new_size = rcd->egrbufs.rcvtid_size / 2; 1921 1922 /* 1923 * If the first attempt to allocate memory failed, don't 1924 * fail everything but continue with the next lower 1925 * size. 1926 */ 1927 if (idx == 0) { 1928 rcd->egrbufs.rcvtid_size = new_size; 1929 continue; 1930 } 1931 1932 /* 1933 * Re-partition already allocated buffers to a smaller 1934 * size. 1935 */ 1936 rcd->egrbufs.alloced = 0; 1937 for (i = 0, j = 0, offset = 0; j < idx; i++) { 1938 if (i >= rcd->egrbufs.count) 1939 break; 1940 rcd->egrbufs.rcvtids[i].dma = 1941 rcd->egrbufs.buffers[j].dma + offset; 1942 rcd->egrbufs.rcvtids[i].addr = 1943 rcd->egrbufs.buffers[j].addr + offset; 1944 rcd->egrbufs.alloced++; 1945 if ((rcd->egrbufs.buffers[j].dma + offset + 1946 new_size) == 1947 (rcd->egrbufs.buffers[j].dma + 1948 rcd->egrbufs.buffers[j].len)) { 1949 j++; 1950 offset = 0; 1951 } else { 1952 offset += new_size; 1953 } 1954 } 1955 rcd->egrbufs.rcvtid_size = new_size; 1956 } 1957 } 1958 rcd->egrbufs.numbufs = idx; 1959 rcd->egrbufs.size = alloced_bytes; 1960 1961 hfi1_cdbg(PROC, 1962 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n", 1963 rcd->ctxt, rcd->egrbufs.alloced, 1964 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 1965 1966 /* 1967 * Set the contexts rcv array head update threshold to the closest 1968 * power of 2 (so we can use a mask instead of modulo) below half 1969 * the allocated entries. 1970 */ 1971 rcd->egrbufs.threshold = 1972 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 1973 /* 1974 * Compute the expected RcvArray entry base. This is done after 1975 * allocating the eager buffers in order to maximize the 1976 * expected RcvArray entries for the context. 1977 */ 1978 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 1979 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 1980 rcd->expected_count = max_entries - egrtop; 1981 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 1982 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 1983 1984 rcd->expected_base = rcd->eager_base + egrtop; 1985 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 1986 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 1987 rcd->eager_base, rcd->expected_base); 1988 1989 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 1990 hfi1_cdbg(PROC, 1991 "ctxt%u: current Eager buffer size is invalid %u\n", 1992 rcd->ctxt, rcd->egrbufs.rcvtid_size); 1993 ret = -EINVAL; 1994 goto bail_rcvegrbuf_phys; 1995 } 1996 1997 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 1998 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 1999 rcd->egrbufs.rcvtids[idx].dma, order); 2000 cond_resched(); 2001 } 2002 2003 return 0; 2004 2005 bail_rcvegrbuf_phys: 2006 for (idx = 0; idx < rcd->egrbufs.alloced && 2007 rcd->egrbufs.buffers[idx].addr; 2008 idx++) { 2009 dma_free_coherent(&dd->pcidev->dev, 2010 rcd->egrbufs.buffers[idx].len, 2011 rcd->egrbufs.buffers[idx].addr, 2012 rcd->egrbufs.buffers[idx].dma); 2013 rcd->egrbufs.buffers[idx].addr = NULL; 2014 rcd->egrbufs.buffers[idx].dma = 0; 2015 rcd->egrbufs.buffers[idx].len = 0; 2016 } 2017 2018 return ret; 2019 } 2020