1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/idr.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <rdma/rdma_vt.h> 58 59 #include "hfi.h" 60 #include "device.h" 61 #include "common.h" 62 #include "trace.h" 63 #include "mad.h" 64 #include "sdma.h" 65 #include "debugfs.h" 66 #include "verbs.h" 67 #include "aspm.h" 68 #include "affinity.h" 69 #include "vnic.h" 70 #include "exp_rcv.h" 71 72 #undef pr_fmt 73 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74 75 #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 76 /* 77 * min buffers we want to have per context, after driver 78 */ 79 #define HFI1_MIN_USER_CTXT_BUFCNT 7 80 81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85 86 /* 87 * Number of user receive contexts we are configured to use (to allow for more 88 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 89 */ 90 int num_user_contexts = -1; 91 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); 92 MODULE_PARM_DESC( 93 num_user_contexts, "Set max number of user contexts to use"); 94 95 uint krcvqs[RXE_NUM_DATA_VL]; 96 int krcvqsset; 97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 99 100 /* computed based on above array */ 101 unsigned long n_krcvqs; 102 103 static unsigned hfi1_rcvarr_split = 25; 104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 106 107 static uint eager_buffer_size = (8 << 20); /* 8MB */ 108 module_param(eager_buffer_size, uint, S_IRUGO); 109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 110 111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 114 115 static uint hfi1_hdrq_entsize = 32; 116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); 117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); 118 119 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 120 module_param(user_credit_return_threshold, uint, S_IRUGO); 121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 122 123 static inline u64 encode_rcv_header_entry_size(u16 size); 124 125 static struct idr hfi1_unit_table; 126 u32 hfi1_cpulist_count; 127 unsigned long *hfi1_cpulist; 128 129 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 130 struct hfi1_pportdata *ppd) 131 { 132 struct hfi1_ctxtdata *rcd; 133 int ret; 134 135 /* Control context has to be always 0 */ 136 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 137 138 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 139 if (ret < 0) { 140 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 141 return ret; 142 } 143 144 /* 145 * Set up the kernel context flags here and now because they use 146 * default values for all receive side memories. User contexts will 147 * be handled as they are created. 148 */ 149 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 150 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 151 HFI1_CAP_KGET(NODROP_EGR_FULL) | 152 HFI1_CAP_KGET(DMA_RTAIL); 153 154 /* Control context must use DMA_RTAIL */ 155 if (rcd->ctxt == HFI1_CTRL_CTXT) 156 rcd->flags |= HFI1_CAP_DMA_RTAIL; 157 rcd->seq_cnt = 1; 158 159 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 160 if (!rcd->sc) { 161 dd_dev_err(dd, "Kernel send context allocation failed\n"); 162 return -ENOMEM; 163 } 164 hfi1_init_ctxt(rcd->sc); 165 166 return 0; 167 } 168 169 /* 170 * Create the receive context array and one or more kernel contexts 171 */ 172 int hfi1_create_kctxts(struct hfi1_devdata *dd) 173 { 174 u16 i; 175 int ret; 176 177 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), 178 GFP_KERNEL, dd->node); 179 if (!dd->rcd) 180 return -ENOMEM; 181 182 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 183 ret = hfi1_create_kctxt(dd, dd->pport); 184 if (ret) 185 goto bail; 186 } 187 188 return 0; 189 bail: 190 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 191 hfi1_free_ctxt(dd->rcd[i]); 192 193 /* All the contexts should be freed, free the array */ 194 kfree(dd->rcd); 195 dd->rcd = NULL; 196 return ret; 197 } 198 199 /* 200 * Helper routines for the receive context reference count (rcd and uctxt). 201 */ 202 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 203 { 204 kref_init(&rcd->kref); 205 } 206 207 /** 208 * hfi1_rcd_free - When reference is zero clean up. 209 * @kref: pointer to an initialized rcd data structure 210 * 211 */ 212 static void hfi1_rcd_free(struct kref *kref) 213 { 214 unsigned long flags; 215 struct hfi1_ctxtdata *rcd = 216 container_of(kref, struct hfi1_ctxtdata, kref); 217 218 hfi1_free_ctxtdata(rcd->dd, rcd); 219 220 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 221 rcd->dd->rcd[rcd->ctxt] = NULL; 222 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 223 224 kfree(rcd); 225 } 226 227 /** 228 * hfi1_rcd_put - decrement reference for rcd 229 * @rcd: pointer to an initialized rcd data structure 230 * 231 * Use this to put a reference after the init. 232 */ 233 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 234 { 235 if (rcd) 236 return kref_put(&rcd->kref, hfi1_rcd_free); 237 238 return 0; 239 } 240 241 /** 242 * hfi1_rcd_get - increment reference for rcd 243 * @rcd: pointer to an initialized rcd data structure 244 * 245 * Use this to get a reference after the init. 246 */ 247 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 248 { 249 kref_get(&rcd->kref); 250 } 251 252 /** 253 * allocate_rcd_index - allocate an rcd index from the rcd array 254 * @dd: pointer to a valid devdata structure 255 * @rcd: rcd data structure to assign 256 * @index: pointer to index that is allocated 257 * 258 * Find an empty index in the rcd array, and assign the given rcd to it. 259 * If the array is full, we are EBUSY. 260 * 261 */ 262 static int allocate_rcd_index(struct hfi1_devdata *dd, 263 struct hfi1_ctxtdata *rcd, u16 *index) 264 { 265 unsigned long flags; 266 u16 ctxt; 267 268 spin_lock_irqsave(&dd->uctxt_lock, flags); 269 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 270 if (!dd->rcd[ctxt]) 271 break; 272 273 if (ctxt < dd->num_rcv_contexts) { 274 rcd->ctxt = ctxt; 275 dd->rcd[ctxt] = rcd; 276 hfi1_rcd_init(rcd); 277 } 278 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 279 280 if (ctxt >= dd->num_rcv_contexts) 281 return -EBUSY; 282 283 *index = ctxt; 284 285 return 0; 286 } 287 288 /** 289 * hfi1_rcd_get_by_index 290 * @dd: pointer to a valid devdata structure 291 * @ctxt: the index of an possilbe rcd 292 * 293 * We need to protect access to the rcd array. If access is needed to 294 * one or more index, get the protecting spinlock and then increment the 295 * kref. 296 * 297 * The caller is responsible for making the _put(). 298 * 299 */ 300 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 301 { 302 unsigned long flags; 303 struct hfi1_ctxtdata *rcd = NULL; 304 305 spin_lock_irqsave(&dd->uctxt_lock, flags); 306 if (dd->rcd[ctxt]) { 307 rcd = dd->rcd[ctxt]; 308 hfi1_rcd_get(rcd); 309 } 310 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 311 312 return rcd; 313 } 314 315 /* 316 * Common code for user and kernel context create and setup. 317 * NOTE: the initial kref is done here (hf1_rcd_init()). 318 */ 319 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 320 struct hfi1_ctxtdata **context) 321 { 322 struct hfi1_devdata *dd = ppd->dd; 323 struct hfi1_ctxtdata *rcd; 324 unsigned kctxt_ngroups = 0; 325 u32 base; 326 327 if (dd->rcv_entries.nctxt_extra > 328 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 329 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 330 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 331 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 332 if (rcd) { 333 u32 rcvtids, max_entries; 334 u16 ctxt; 335 int ret; 336 337 ret = allocate_rcd_index(dd, rcd, &ctxt); 338 if (ret) { 339 *context = NULL; 340 kfree(rcd); 341 return ret; 342 } 343 344 INIT_LIST_HEAD(&rcd->qp_wait_list); 345 hfi1_exp_tid_group_init(&rcd->tid_group_list); 346 hfi1_exp_tid_group_init(&rcd->tid_used_list); 347 hfi1_exp_tid_group_init(&rcd->tid_full_list); 348 rcd->ppd = ppd; 349 rcd->dd = dd; 350 __set_bit(0, rcd->in_use_ctxts); 351 rcd->numa_id = numa; 352 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 353 354 mutex_init(&rcd->exp_lock); 355 356 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 357 358 /* 359 * Calculate the context's RcvArray entry starting point. 360 * We do this here because we have to take into account all 361 * the RcvArray entries that previous context would have 362 * taken and we have to account for any extra groups assigned 363 * to the static (kernel) or dynamic (vnic/user) contexts. 364 */ 365 if (ctxt < dd->first_dyn_alloc_ctxt) { 366 if (ctxt < kctxt_ngroups) { 367 base = ctxt * (dd->rcv_entries.ngroups + 1); 368 rcd->rcv_array_groups++; 369 } else { 370 base = kctxt_ngroups + 371 (ctxt * dd->rcv_entries.ngroups); 372 } 373 } else { 374 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 375 376 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 377 kctxt_ngroups); 378 if (ct < dd->rcv_entries.nctxt_extra) { 379 base += ct * (dd->rcv_entries.ngroups + 1); 380 rcd->rcv_array_groups++; 381 } else { 382 base += dd->rcv_entries.nctxt_extra + 383 (ct * dd->rcv_entries.ngroups); 384 } 385 } 386 rcd->eager_base = base * dd->rcv_entries.group_size; 387 388 rcd->rcvhdrq_cnt = rcvhdrcnt; 389 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 390 /* 391 * Simple Eager buffer allocation: we have already pre-allocated 392 * the number of RcvArray entry groups. Each ctxtdata structure 393 * holds the number of groups for that context. 394 * 395 * To follow CSR requirements and maintain cacheline alignment, 396 * make sure all sizes and bases are multiples of group_size. 397 * 398 * The expected entry count is what is left after assigning 399 * eager. 400 */ 401 max_entries = rcd->rcv_array_groups * 402 dd->rcv_entries.group_size; 403 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 404 rcd->egrbufs.count = round_down(rcvtids, 405 dd->rcv_entries.group_size); 406 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 407 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 408 rcd->ctxt); 409 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 410 } 411 hfi1_cdbg(PROC, 412 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 413 rcd->ctxt, rcd->egrbufs.count); 414 415 /* 416 * Allocate array that will hold the eager buffer accounting 417 * data. 418 * This will allocate the maximum possible buffer count based 419 * on the value of the RcvArray split parameter. 420 * The resulting value will be rounded down to the closest 421 * multiple of dd->rcv_entries.group_size. 422 */ 423 rcd->egrbufs.buffers = kzalloc_node( 424 rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers), 425 GFP_KERNEL, numa); 426 if (!rcd->egrbufs.buffers) 427 goto bail; 428 rcd->egrbufs.rcvtids = kzalloc_node( 429 rcd->egrbufs.count * 430 sizeof(*rcd->egrbufs.rcvtids), 431 GFP_KERNEL, numa); 432 if (!rcd->egrbufs.rcvtids) 433 goto bail; 434 rcd->egrbufs.size = eager_buffer_size; 435 /* 436 * The size of the buffers programmed into the RcvArray 437 * entries needs to be big enough to handle the highest 438 * MTU supported. 439 */ 440 if (rcd->egrbufs.size < hfi1_max_mtu) { 441 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 442 hfi1_cdbg(PROC, 443 "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 444 rcd->ctxt, rcd->egrbufs.size); 445 } 446 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 447 448 /* Applicable only for statically created kernel contexts */ 449 if (ctxt < dd->first_dyn_alloc_ctxt) { 450 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 451 GFP_KERNEL, numa); 452 if (!rcd->opstats) 453 goto bail; 454 } 455 456 *context = rcd; 457 return 0; 458 } 459 460 bail: 461 *context = NULL; 462 hfi1_free_ctxt(rcd); 463 return -ENOMEM; 464 } 465 466 /** 467 * hfi1_free_ctxt 468 * @rcd: pointer to an initialized rcd data structure 469 * 470 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 471 * When a context is done being used (kernel or user), this function is called 472 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 473 * Other users of the context do a get/put sequence to make sure that the 474 * structure isn't removed while in use. 475 */ 476 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 477 { 478 hfi1_rcd_put(rcd); 479 } 480 481 /* 482 * Convert a receive header entry size that to the encoding used in the CSR. 483 * 484 * Return a zero if the given size is invalid. 485 */ 486 static inline u64 encode_rcv_header_entry_size(u16 size) 487 { 488 /* there are only 3 valid receive header entry sizes */ 489 if (size == 2) 490 return 1; 491 if (size == 16) 492 return 2; 493 else if (size == 32) 494 return 4; 495 return 0; /* invalid */ 496 } 497 498 /* 499 * Select the largest ccti value over all SLs to determine the intra- 500 * packet gap for the link. 501 * 502 * called with cca_timer_lock held (to protect access to cca_timer 503 * array), and rcu_read_lock() (to protect access to cc_state). 504 */ 505 void set_link_ipg(struct hfi1_pportdata *ppd) 506 { 507 struct hfi1_devdata *dd = ppd->dd; 508 struct cc_state *cc_state; 509 int i; 510 u16 cce, ccti_limit, max_ccti = 0; 511 u16 shift, mult; 512 u64 src; 513 u32 current_egress_rate; /* Mbits /sec */ 514 u32 max_pkt_time; 515 /* 516 * max_pkt_time is the maximum packet egress time in units 517 * of the fabric clock period 1/(805 MHz). 518 */ 519 520 cc_state = get_cc_state(ppd); 521 522 if (!cc_state) 523 /* 524 * This should _never_ happen - rcu_read_lock() is held, 525 * and set_link_ipg() should not be called if cc_state 526 * is NULL. 527 */ 528 return; 529 530 for (i = 0; i < OPA_MAX_SLS; i++) { 531 u16 ccti = ppd->cca_timer[i].ccti; 532 533 if (ccti > max_ccti) 534 max_ccti = ccti; 535 } 536 537 ccti_limit = cc_state->cct.ccti_limit; 538 if (max_ccti > ccti_limit) 539 max_ccti = ccti_limit; 540 541 cce = cc_state->cct.entries[max_ccti].entry; 542 shift = (cce & 0xc000) >> 14; 543 mult = (cce & 0x3fff); 544 545 current_egress_rate = active_egress_rate(ppd); 546 547 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 548 549 src = (max_pkt_time >> shift) * mult; 550 551 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 552 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 553 554 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 555 } 556 557 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 558 { 559 struct cca_timer *cca_timer; 560 struct hfi1_pportdata *ppd; 561 int sl; 562 u16 ccti_timer, ccti_min; 563 struct cc_state *cc_state; 564 unsigned long flags; 565 enum hrtimer_restart ret = HRTIMER_NORESTART; 566 567 cca_timer = container_of(t, struct cca_timer, hrtimer); 568 ppd = cca_timer->ppd; 569 sl = cca_timer->sl; 570 571 rcu_read_lock(); 572 573 cc_state = get_cc_state(ppd); 574 575 if (!cc_state) { 576 rcu_read_unlock(); 577 return HRTIMER_NORESTART; 578 } 579 580 /* 581 * 1) decrement ccti for SL 582 * 2) calculate IPG for link (set_link_ipg()) 583 * 3) restart timer, unless ccti is at min value 584 */ 585 586 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 587 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 588 589 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 590 591 if (cca_timer->ccti > ccti_min) { 592 cca_timer->ccti--; 593 set_link_ipg(ppd); 594 } 595 596 if (cca_timer->ccti > ccti_min) { 597 unsigned long nsec = 1024 * ccti_timer; 598 /* ccti_timer is in units of 1.024 usec */ 599 hrtimer_forward_now(t, ns_to_ktime(nsec)); 600 ret = HRTIMER_RESTART; 601 } 602 603 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 604 rcu_read_unlock(); 605 return ret; 606 } 607 608 /* 609 * Common code for initializing the physical port structure. 610 */ 611 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 612 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 613 { 614 int i; 615 uint default_pkey_idx; 616 struct cc_state *cc_state; 617 618 ppd->dd = dd; 619 ppd->hw_pidx = hw_pidx; 620 ppd->port = port; /* IB port number, not index */ 621 622 default_pkey_idx = 1; 623 624 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 625 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 626 627 if (loopback) { 628 hfi1_early_err(&pdev->dev, 629 "Faking data partition 0x8001 in idx %u\n", 630 !default_pkey_idx); 631 ppd->pkeys[!default_pkey_idx] = 0x8001; 632 } 633 634 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 635 INIT_WORK(&ppd->link_up_work, handle_link_up); 636 INIT_WORK(&ppd->link_down_work, handle_link_down); 637 INIT_WORK(&ppd->freeze_work, handle_freeze); 638 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 639 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 640 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 641 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 642 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 643 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 644 645 mutex_init(&ppd->hls_lock); 646 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 647 648 ppd->qsfp_info.ppd = ppd; 649 ppd->sm_trap_qp = 0x0; 650 ppd->sa_qp = 0x1; 651 652 ppd->hfi1_wq = NULL; 653 654 spin_lock_init(&ppd->cca_timer_lock); 655 656 for (i = 0; i < OPA_MAX_SLS; i++) { 657 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 658 HRTIMER_MODE_REL); 659 ppd->cca_timer[i].ppd = ppd; 660 ppd->cca_timer[i].sl = i; 661 ppd->cca_timer[i].ccti = 0; 662 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 663 } 664 665 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 666 667 spin_lock_init(&ppd->cc_state_lock); 668 spin_lock_init(&ppd->cc_log_lock); 669 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 670 RCU_INIT_POINTER(ppd->cc_state, cc_state); 671 if (!cc_state) 672 goto bail; 673 return; 674 675 bail: 676 677 hfi1_early_err(&pdev->dev, 678 "Congestion Control Agent disabled for port %d\n", port); 679 } 680 681 /* 682 * Do initialization for device that is only needed on 683 * first detect, not on resets. 684 */ 685 static int loadtime_init(struct hfi1_devdata *dd) 686 { 687 return 0; 688 } 689 690 /** 691 * init_after_reset - re-initialize after a reset 692 * @dd: the hfi1_ib device 693 * 694 * sanity check at least some of the values after reset, and 695 * ensure no receive or transmit (explicitly, in case reset 696 * failed 697 */ 698 static int init_after_reset(struct hfi1_devdata *dd) 699 { 700 int i; 701 struct hfi1_ctxtdata *rcd; 702 /* 703 * Ensure chip does no sends or receives, tail updates, or 704 * pioavail updates while we re-initialize. This is mostly 705 * for the driver data structures, not chip registers. 706 */ 707 for (i = 0; i < dd->num_rcv_contexts; i++) { 708 rcd = hfi1_rcd_get_by_index(dd, i); 709 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 710 HFI1_RCVCTRL_INTRAVAIL_DIS | 711 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 712 hfi1_rcd_put(rcd); 713 } 714 pio_send_control(dd, PSC_GLOBAL_DISABLE); 715 for (i = 0; i < dd->num_send_contexts; i++) 716 sc_disable(dd->send_contexts[i].sc); 717 718 return 0; 719 } 720 721 static void enable_chip(struct hfi1_devdata *dd) 722 { 723 struct hfi1_ctxtdata *rcd; 724 u32 rcvmask; 725 u16 i; 726 727 /* enable PIO send */ 728 pio_send_control(dd, PSC_GLOBAL_ENABLE); 729 730 /* 731 * Enable kernel ctxts' receive and receive interrupt. 732 * Other ctxts done as user opens and initializes them. 733 */ 734 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 735 rcd = hfi1_rcd_get_by_index(dd, i); 736 if (!rcd) 737 continue; 738 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 739 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 740 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 741 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 742 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 743 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 744 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 745 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 746 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 747 hfi1_rcvctrl(dd, rcvmask, rcd); 748 sc_enable(rcd->sc); 749 hfi1_rcd_put(rcd); 750 } 751 } 752 753 /** 754 * create_workqueues - create per port workqueues 755 * @dd: the hfi1_ib device 756 */ 757 static int create_workqueues(struct hfi1_devdata *dd) 758 { 759 int pidx; 760 struct hfi1_pportdata *ppd; 761 762 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 763 ppd = dd->pport + pidx; 764 if (!ppd->hfi1_wq) { 765 ppd->hfi1_wq = 766 alloc_workqueue( 767 "hfi%d_%d", 768 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 769 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 770 dd->unit, pidx); 771 if (!ppd->hfi1_wq) 772 goto wq_error; 773 } 774 if (!ppd->link_wq) { 775 /* 776 * Make the link workqueue single-threaded to enforce 777 * serialization. 778 */ 779 ppd->link_wq = 780 alloc_workqueue( 781 "hfi_link_%d_%d", 782 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 783 1, /* max_active */ 784 dd->unit, pidx); 785 if (!ppd->link_wq) 786 goto wq_error; 787 } 788 } 789 return 0; 790 wq_error: 791 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 792 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 793 ppd = dd->pport + pidx; 794 if (ppd->hfi1_wq) { 795 destroy_workqueue(ppd->hfi1_wq); 796 ppd->hfi1_wq = NULL; 797 } 798 if (ppd->link_wq) { 799 destroy_workqueue(ppd->link_wq); 800 ppd->link_wq = NULL; 801 } 802 } 803 return -ENOMEM; 804 } 805 806 /** 807 * hfi1_init - do the actual initialization sequence on the chip 808 * @dd: the hfi1_ib device 809 * @reinit: re-initializing, so don't allocate new memory 810 * 811 * Do the actual initialization sequence on the chip. This is done 812 * both from the init routine called from the PCI infrastructure, and 813 * when we reset the chip, or detect that it was reset internally, 814 * or it's administratively re-enabled. 815 * 816 * Memory allocation here and in called routines is only done in 817 * the first case (reinit == 0). We have to be careful, because even 818 * without memory allocation, we need to re-write all the chip registers 819 * TIDs, etc. after the reset or enable has completed. 820 */ 821 int hfi1_init(struct hfi1_devdata *dd, int reinit) 822 { 823 int ret = 0, pidx, lastfail = 0; 824 unsigned long len; 825 u16 i; 826 struct hfi1_ctxtdata *rcd; 827 struct hfi1_pportdata *ppd; 828 829 /* Set up recv low level handlers */ 830 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = 831 kdeth_process_expected; 832 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = 833 kdeth_process_eager; 834 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; 835 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = 836 process_receive_error; 837 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = 838 process_receive_bypass; 839 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = 840 process_receive_invalid; 841 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = 842 process_receive_invalid; 843 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = 844 process_receive_invalid; 845 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; 846 847 /* Set up send low level handlers */ 848 dd->process_pio_send = hfi1_verbs_send_pio; 849 dd->process_dma_send = hfi1_verbs_send_dma; 850 dd->pio_inline_send = pio_copy; 851 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 852 853 if (is_ax(dd)) { 854 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 855 dd->do_drop = 1; 856 } else { 857 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 858 dd->do_drop = 0; 859 } 860 861 /* make sure the link is not "up" */ 862 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 863 ppd = dd->pport + pidx; 864 ppd->linkup = 0; 865 } 866 867 if (reinit) 868 ret = init_after_reset(dd); 869 else 870 ret = loadtime_init(dd); 871 if (ret) 872 goto done; 873 874 /* allocate dummy tail memory for all receive contexts */ 875 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 876 &dd->pcidev->dev, sizeof(u64), 877 &dd->rcvhdrtail_dummy_dma, 878 GFP_KERNEL); 879 880 if (!dd->rcvhdrtail_dummy_kvaddr) { 881 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 882 ret = -ENOMEM; 883 goto done; 884 } 885 886 /* dd->rcd can be NULL if early initialization failed */ 887 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 888 /* 889 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 890 * re-init, the simplest way to handle this is to free 891 * existing, and re-allocate. 892 * Need to re-create rest of ctxt 0 ctxtdata as well. 893 */ 894 rcd = hfi1_rcd_get_by_index(dd, i); 895 if (!rcd) 896 continue; 897 898 rcd->do_interrupt = &handle_receive_interrupt; 899 900 lastfail = hfi1_create_rcvhdrq(dd, rcd); 901 if (!lastfail) 902 lastfail = hfi1_setup_eagerbufs(rcd); 903 if (lastfail) { 904 dd_dev_err(dd, 905 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 906 ret = lastfail; 907 } 908 hfi1_rcd_put(rcd); 909 } 910 911 /* Allocate enough memory for user event notification. */ 912 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * 913 sizeof(*dd->events)); 914 dd->events = vmalloc_user(len); 915 if (!dd->events) 916 dd_dev_err(dd, "Failed to allocate user events page\n"); 917 /* 918 * Allocate a page for device and port status. 919 * Page will be shared amongst all user processes. 920 */ 921 dd->status = vmalloc_user(PAGE_SIZE); 922 if (!dd->status) 923 dd_dev_err(dd, "Failed to allocate dev status page\n"); 924 else 925 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - 926 sizeof(dd->status->freezemsg)); 927 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 928 ppd = dd->pport + pidx; 929 if (dd->status) 930 /* Currently, we only have one port */ 931 ppd->statusp = &dd->status->port; 932 933 set_mtu(ppd); 934 } 935 936 /* enable chip even if we have an error, so we can debug cause */ 937 enable_chip(dd); 938 939 done: 940 /* 941 * Set status even if port serdes is not initialized 942 * so that diags will work. 943 */ 944 if (dd->status) 945 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 946 HFI1_STATUS_INITTED; 947 if (!ret) { 948 /* enable all interrupts from the chip */ 949 set_intr_state(dd, 1); 950 951 /* chip is OK for user apps; mark it as initialized */ 952 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 953 ppd = dd->pport + pidx; 954 955 /* 956 * start the serdes - must be after interrupts are 957 * enabled so we are notified when the link goes up 958 */ 959 lastfail = bringup_serdes(ppd); 960 if (lastfail) 961 dd_dev_info(dd, 962 "Failed to bring up port %u\n", 963 ppd->port); 964 965 /* 966 * Set status even if port serdes is not initialized 967 * so that diags will work. 968 */ 969 if (ppd->statusp) 970 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 971 HFI1_STATUS_INITTED; 972 if (!ppd->link_speed_enabled) 973 continue; 974 } 975 } 976 977 /* if ret is non-zero, we probably should do some cleanup here... */ 978 return ret; 979 } 980 981 static inline struct hfi1_devdata *__hfi1_lookup(int unit) 982 { 983 return idr_find(&hfi1_unit_table, unit); 984 } 985 986 struct hfi1_devdata *hfi1_lookup(int unit) 987 { 988 struct hfi1_devdata *dd; 989 unsigned long flags; 990 991 spin_lock_irqsave(&hfi1_devs_lock, flags); 992 dd = __hfi1_lookup(unit); 993 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 994 995 return dd; 996 } 997 998 /* 999 * Stop the timers during unit shutdown, or after an error late 1000 * in initialization. 1001 */ 1002 static void stop_timers(struct hfi1_devdata *dd) 1003 { 1004 struct hfi1_pportdata *ppd; 1005 int pidx; 1006 1007 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1008 ppd = dd->pport + pidx; 1009 if (ppd->led_override_timer.data) { 1010 del_timer_sync(&ppd->led_override_timer); 1011 atomic_set(&ppd->led_override_timer_active, 0); 1012 } 1013 } 1014 } 1015 1016 /** 1017 * shutdown_device - shut down a device 1018 * @dd: the hfi1_ib device 1019 * 1020 * This is called to make the device quiet when we are about to 1021 * unload the driver, and also when the device is administratively 1022 * disabled. It does not free any data structures. 1023 * Everything it does has to be setup again by hfi1_init(dd, 1) 1024 */ 1025 static void shutdown_device(struct hfi1_devdata *dd) 1026 { 1027 struct hfi1_pportdata *ppd; 1028 struct hfi1_ctxtdata *rcd; 1029 unsigned pidx; 1030 int i; 1031 1032 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1033 ppd = dd->pport + pidx; 1034 1035 ppd->linkup = 0; 1036 if (ppd->statusp) 1037 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1038 HFI1_STATUS_IB_READY); 1039 } 1040 dd->flags &= ~HFI1_INITTED; 1041 1042 /* mask interrupts, but not errors */ 1043 set_intr_state(dd, 0); 1044 1045 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1046 ppd = dd->pport + pidx; 1047 for (i = 0; i < dd->num_rcv_contexts; i++) { 1048 rcd = hfi1_rcd_get_by_index(dd, i); 1049 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1050 HFI1_RCVCTRL_CTXT_DIS | 1051 HFI1_RCVCTRL_INTRAVAIL_DIS | 1052 HFI1_RCVCTRL_PKEY_DIS | 1053 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1054 hfi1_rcd_put(rcd); 1055 } 1056 /* 1057 * Gracefully stop all sends allowing any in progress to 1058 * trickle out first. 1059 */ 1060 for (i = 0; i < dd->num_send_contexts; i++) 1061 sc_flush(dd->send_contexts[i].sc); 1062 } 1063 1064 /* 1065 * Enough for anything that's going to trickle out to have actually 1066 * done so. 1067 */ 1068 udelay(20); 1069 1070 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1071 ppd = dd->pport + pidx; 1072 1073 /* disable all contexts */ 1074 for (i = 0; i < dd->num_send_contexts; i++) 1075 sc_disable(dd->send_contexts[i].sc); 1076 /* disable the send device */ 1077 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1078 1079 shutdown_led_override(ppd); 1080 1081 /* 1082 * Clear SerdesEnable. 1083 * We can't count on interrupts since we are stopping. 1084 */ 1085 hfi1_quiet_serdes(ppd); 1086 1087 if (ppd->hfi1_wq) { 1088 destroy_workqueue(ppd->hfi1_wq); 1089 ppd->hfi1_wq = NULL; 1090 } 1091 if (ppd->link_wq) { 1092 destroy_workqueue(ppd->link_wq); 1093 ppd->link_wq = NULL; 1094 } 1095 } 1096 sdma_exit(dd); 1097 } 1098 1099 /** 1100 * hfi1_free_ctxtdata - free a context's allocated data 1101 * @dd: the hfi1_ib device 1102 * @rcd: the ctxtdata structure 1103 * 1104 * free up any allocated data for a context 1105 * It should never change any chip state, or global driver state. 1106 */ 1107 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1108 { 1109 u32 e; 1110 1111 if (!rcd) 1112 return; 1113 1114 if (rcd->rcvhdrq) { 1115 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 1116 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1117 rcd->rcvhdrq = NULL; 1118 if (rcd->rcvhdrtail_kvaddr) { 1119 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1120 (void *)rcd->rcvhdrtail_kvaddr, 1121 rcd->rcvhdrqtailaddr_dma); 1122 rcd->rcvhdrtail_kvaddr = NULL; 1123 } 1124 } 1125 1126 /* all the RcvArray entries should have been cleared by now */ 1127 kfree(rcd->egrbufs.rcvtids); 1128 rcd->egrbufs.rcvtids = NULL; 1129 1130 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1131 if (rcd->egrbufs.buffers[e].dma) 1132 dma_free_coherent(&dd->pcidev->dev, 1133 rcd->egrbufs.buffers[e].len, 1134 rcd->egrbufs.buffers[e].addr, 1135 rcd->egrbufs.buffers[e].dma); 1136 } 1137 kfree(rcd->egrbufs.buffers); 1138 rcd->egrbufs.alloced = 0; 1139 rcd->egrbufs.buffers = NULL; 1140 1141 sc_free(rcd->sc); 1142 rcd->sc = NULL; 1143 1144 vfree(rcd->subctxt_uregbase); 1145 vfree(rcd->subctxt_rcvegrbuf); 1146 vfree(rcd->subctxt_rcvhdr_base); 1147 kfree(rcd->opstats); 1148 1149 rcd->subctxt_uregbase = NULL; 1150 rcd->subctxt_rcvegrbuf = NULL; 1151 rcd->subctxt_rcvhdr_base = NULL; 1152 rcd->opstats = NULL; 1153 } 1154 1155 /* 1156 * Release our hold on the shared asic data. If we are the last one, 1157 * return the structure to be finalized outside the lock. Must be 1158 * holding hfi1_devs_lock. 1159 */ 1160 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1161 { 1162 struct hfi1_asic_data *ad; 1163 int other; 1164 1165 if (!dd->asic_data) 1166 return NULL; 1167 dd->asic_data->dds[dd->hfi1_id] = NULL; 1168 other = dd->hfi1_id ? 0 : 1; 1169 ad = dd->asic_data; 1170 dd->asic_data = NULL; 1171 /* return NULL if the other dd still has a link */ 1172 return ad->dds[other] ? NULL : ad; 1173 } 1174 1175 static void finalize_asic_data(struct hfi1_devdata *dd, 1176 struct hfi1_asic_data *ad) 1177 { 1178 clean_up_i2c(dd, ad); 1179 kfree(ad); 1180 } 1181 1182 static void __hfi1_free_devdata(struct kobject *kobj) 1183 { 1184 struct hfi1_devdata *dd = 1185 container_of(kobj, struct hfi1_devdata, kobj); 1186 struct hfi1_asic_data *ad; 1187 unsigned long flags; 1188 1189 spin_lock_irqsave(&hfi1_devs_lock, flags); 1190 idr_remove(&hfi1_unit_table, dd->unit); 1191 list_del(&dd->list); 1192 ad = release_asic_data(dd); 1193 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1194 if (ad) 1195 finalize_asic_data(dd, ad); 1196 free_platform_config(dd); 1197 rcu_barrier(); /* wait for rcu callbacks to complete */ 1198 free_percpu(dd->int_counter); 1199 free_percpu(dd->rcv_limit); 1200 free_percpu(dd->send_schedule); 1201 rvt_dealloc_device(&dd->verbs_dev.rdi); 1202 } 1203 1204 static struct kobj_type hfi1_devdata_type = { 1205 .release = __hfi1_free_devdata, 1206 }; 1207 1208 void hfi1_free_devdata(struct hfi1_devdata *dd) 1209 { 1210 kobject_put(&dd->kobj); 1211 } 1212 1213 /* 1214 * Allocate our primary per-unit data structure. Must be done via verbs 1215 * allocator, because the verbs cleanup process both does cleanup and 1216 * free of the data structure. 1217 * "extra" is for chip-specific data. 1218 * 1219 * Use the idr mechanism to get a unit number for this unit. 1220 */ 1221 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1222 { 1223 unsigned long flags; 1224 struct hfi1_devdata *dd; 1225 int ret, nports; 1226 1227 /* extra is * number of ports */ 1228 nports = extra / sizeof(struct hfi1_pportdata); 1229 1230 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1231 nports); 1232 if (!dd) 1233 return ERR_PTR(-ENOMEM); 1234 dd->num_pports = nports; 1235 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1236 1237 INIT_LIST_HEAD(&dd->list); 1238 idr_preload(GFP_KERNEL); 1239 spin_lock_irqsave(&hfi1_devs_lock, flags); 1240 1241 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1242 if (ret >= 0) { 1243 dd->unit = ret; 1244 list_add(&dd->list, &hfi1_dev_list); 1245 } 1246 1247 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1248 idr_preload_end(); 1249 1250 if (ret < 0) { 1251 hfi1_early_err(&pdev->dev, 1252 "Could not allocate unit ID: error %d\n", -ret); 1253 goto bail; 1254 } 1255 /* 1256 * Initialize all locks for the device. This needs to be as early as 1257 * possible so locks are usable. 1258 */ 1259 spin_lock_init(&dd->sc_lock); 1260 spin_lock_init(&dd->sendctrl_lock); 1261 spin_lock_init(&dd->rcvctrl_lock); 1262 spin_lock_init(&dd->uctxt_lock); 1263 spin_lock_init(&dd->hfi1_diag_trans_lock); 1264 spin_lock_init(&dd->sc_init_lock); 1265 spin_lock_init(&dd->dc8051_memlock); 1266 seqlock_init(&dd->sc2vl_lock); 1267 spin_lock_init(&dd->sde_map_lock); 1268 spin_lock_init(&dd->pio_map_lock); 1269 mutex_init(&dd->dc8051_lock); 1270 init_waitqueue_head(&dd->event_queue); 1271 1272 dd->int_counter = alloc_percpu(u64); 1273 if (!dd->int_counter) { 1274 ret = -ENOMEM; 1275 hfi1_early_err(&pdev->dev, 1276 "Could not allocate per-cpu int_counter\n"); 1277 goto bail; 1278 } 1279 1280 dd->rcv_limit = alloc_percpu(u64); 1281 if (!dd->rcv_limit) { 1282 ret = -ENOMEM; 1283 hfi1_early_err(&pdev->dev, 1284 "Could not allocate per-cpu rcv_limit\n"); 1285 goto bail; 1286 } 1287 1288 dd->send_schedule = alloc_percpu(u64); 1289 if (!dd->send_schedule) { 1290 ret = -ENOMEM; 1291 hfi1_early_err(&pdev->dev, 1292 "Could not allocate per-cpu int_counter\n"); 1293 goto bail; 1294 } 1295 1296 if (!hfi1_cpulist_count) { 1297 u32 count = num_online_cpus(); 1298 1299 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), 1300 GFP_KERNEL); 1301 if (hfi1_cpulist) 1302 hfi1_cpulist_count = count; 1303 else 1304 hfi1_early_err( 1305 &pdev->dev, 1306 "Could not alloc cpulist info, cpu affinity might be wrong\n"); 1307 } 1308 kobject_init(&dd->kobj, &hfi1_devdata_type); 1309 return dd; 1310 1311 bail: 1312 if (!list_empty(&dd->list)) 1313 list_del_init(&dd->list); 1314 rvt_dealloc_device(&dd->verbs_dev.rdi); 1315 return ERR_PTR(ret); 1316 } 1317 1318 /* 1319 * Called from freeze mode handlers, and from PCI error 1320 * reporting code. Should be paranoid about state of 1321 * system and data structures. 1322 */ 1323 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1324 { 1325 if (dd->flags & HFI1_INITTED) { 1326 u32 pidx; 1327 1328 dd->flags &= ~HFI1_INITTED; 1329 if (dd->pport) 1330 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1331 struct hfi1_pportdata *ppd; 1332 1333 ppd = dd->pport + pidx; 1334 if (dd->flags & HFI1_PRESENT) 1335 set_link_state(ppd, HLS_DN_DISABLE); 1336 1337 if (ppd->statusp) 1338 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1339 } 1340 } 1341 1342 /* 1343 * Mark as having had an error for driver, and also 1344 * for /sys and status word mapped to user programs. 1345 * This marks unit as not usable, until reset. 1346 */ 1347 if (dd->status) 1348 dd->status->dev |= HFI1_STATUS_HWERROR; 1349 } 1350 1351 static void remove_one(struct pci_dev *); 1352 static int init_one(struct pci_dev *, const struct pci_device_id *); 1353 1354 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1355 #define PFX DRIVER_NAME ": " 1356 1357 const struct pci_device_id hfi1_pci_tbl[] = { 1358 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1359 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1360 { 0, } 1361 }; 1362 1363 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1364 1365 static struct pci_driver hfi1_pci_driver = { 1366 .name = DRIVER_NAME, 1367 .probe = init_one, 1368 .remove = remove_one, 1369 .id_table = hfi1_pci_tbl, 1370 .err_handler = &hfi1_pci_err_handler, 1371 }; 1372 1373 static void __init compute_krcvqs(void) 1374 { 1375 int i; 1376 1377 for (i = 0; i < krcvqsset; i++) 1378 n_krcvqs += krcvqs[i]; 1379 } 1380 1381 /* 1382 * Do all the generic driver unit- and chip-independent memory 1383 * allocation and initialization. 1384 */ 1385 static int __init hfi1_mod_init(void) 1386 { 1387 int ret; 1388 1389 ret = dev_init(); 1390 if (ret) 1391 goto bail; 1392 1393 ret = node_affinity_init(); 1394 if (ret) 1395 goto bail; 1396 1397 /* validate max MTU before any devices start */ 1398 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1399 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1400 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1401 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1402 } 1403 /* valid CUs run from 1-128 in powers of 2 */ 1404 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1405 hfi1_cu = 1; 1406 /* valid credit return threshold is 0-100, variable is unsigned */ 1407 if (user_credit_return_threshold > 100) 1408 user_credit_return_threshold = 100; 1409 1410 compute_krcvqs(); 1411 /* 1412 * sanitize receive interrupt count, time must wait until after 1413 * the hardware type is known 1414 */ 1415 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1416 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1417 /* reject invalid combinations */ 1418 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1419 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1420 rcv_intr_count = 1; 1421 } 1422 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1423 /* 1424 * Avoid indefinite packet delivery by requiring a timeout 1425 * if count is > 1. 1426 */ 1427 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1428 rcv_intr_timeout = 1; 1429 } 1430 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1431 /* 1432 * The dynamic algorithm expects a non-zero timeout 1433 * and a count > 1. 1434 */ 1435 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1436 rcv_intr_dynamic = 0; 1437 } 1438 1439 /* sanitize link CRC options */ 1440 link_crc_mask &= SUPPORTED_CRCS; 1441 1442 /* 1443 * These must be called before the driver is registered with 1444 * the PCI subsystem. 1445 */ 1446 idr_init(&hfi1_unit_table); 1447 1448 hfi1_dbg_init(); 1449 ret = hfi1_wss_init(); 1450 if (ret < 0) 1451 goto bail_wss; 1452 ret = pci_register_driver(&hfi1_pci_driver); 1453 if (ret < 0) { 1454 pr_err("Unable to register driver: error %d\n", -ret); 1455 goto bail_dev; 1456 } 1457 goto bail; /* all OK */ 1458 1459 bail_dev: 1460 hfi1_wss_exit(); 1461 bail_wss: 1462 hfi1_dbg_exit(); 1463 idr_destroy(&hfi1_unit_table); 1464 dev_cleanup(); 1465 bail: 1466 return ret; 1467 } 1468 1469 module_init(hfi1_mod_init); 1470 1471 /* 1472 * Do the non-unit driver cleanup, memory free, etc. at unload. 1473 */ 1474 static void __exit hfi1_mod_cleanup(void) 1475 { 1476 pci_unregister_driver(&hfi1_pci_driver); 1477 node_affinity_destroy(); 1478 hfi1_wss_exit(); 1479 hfi1_dbg_exit(); 1480 hfi1_cpulist_count = 0; 1481 kfree(hfi1_cpulist); 1482 1483 idr_destroy(&hfi1_unit_table); 1484 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1485 dev_cleanup(); 1486 } 1487 1488 module_exit(hfi1_mod_cleanup); 1489 1490 /* this can only be called after a successful initialization */ 1491 static void cleanup_device_data(struct hfi1_devdata *dd) 1492 { 1493 int ctxt; 1494 int pidx; 1495 1496 /* users can't do anything more with chip */ 1497 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1498 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1499 struct cc_state *cc_state; 1500 int i; 1501 1502 if (ppd->statusp) 1503 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1504 1505 for (i = 0; i < OPA_MAX_SLS; i++) 1506 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1507 1508 spin_lock(&ppd->cc_state_lock); 1509 cc_state = get_cc_state_protected(ppd); 1510 RCU_INIT_POINTER(ppd->cc_state, NULL); 1511 spin_unlock(&ppd->cc_state_lock); 1512 1513 if (cc_state) 1514 kfree_rcu(cc_state, rcu); 1515 } 1516 1517 free_credit_return(dd); 1518 1519 if (dd->rcvhdrtail_dummy_kvaddr) { 1520 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1521 (void *)dd->rcvhdrtail_dummy_kvaddr, 1522 dd->rcvhdrtail_dummy_dma); 1523 dd->rcvhdrtail_dummy_kvaddr = NULL; 1524 } 1525 1526 /* 1527 * Free any resources still in use (usually just kernel contexts) 1528 * at unload; we do for ctxtcnt, because that's what we allocate. 1529 */ 1530 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1531 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1532 1533 if (rcd) { 1534 hfi1_clear_tids(rcd); 1535 hfi1_free_ctxt(rcd); 1536 } 1537 } 1538 1539 kfree(dd->rcd); 1540 dd->rcd = NULL; 1541 1542 free_pio_map(dd); 1543 /* must follow rcv context free - need to remove rcv's hooks */ 1544 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1545 sc_free(dd->send_contexts[ctxt].sc); 1546 dd->num_send_contexts = 0; 1547 kfree(dd->send_contexts); 1548 dd->send_contexts = NULL; 1549 kfree(dd->hw_to_sw); 1550 dd->hw_to_sw = NULL; 1551 kfree(dd->boardname); 1552 vfree(dd->events); 1553 vfree(dd->status); 1554 } 1555 1556 /* 1557 * Clean up on unit shutdown, or error during unit load after 1558 * successful initialization. 1559 */ 1560 static void postinit_cleanup(struct hfi1_devdata *dd) 1561 { 1562 hfi1_start_cleanup(dd); 1563 1564 hfi1_pcie_ddcleanup(dd); 1565 hfi1_pcie_cleanup(dd->pcidev); 1566 1567 cleanup_device_data(dd); 1568 1569 hfi1_free_devdata(dd); 1570 } 1571 1572 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1573 { 1574 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1575 hfi1_early_err(dev, "Receive header queue count too small\n"); 1576 return -EINVAL; 1577 } 1578 1579 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1580 hfi1_early_err(dev, 1581 "Receive header queue count cannot be greater than %u\n", 1582 HFI1_MAX_HDRQ_EGRBUF_CNT); 1583 return -EINVAL; 1584 } 1585 1586 if (thecnt % HDRQ_INCREMENT) { 1587 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1588 thecnt, HDRQ_INCREMENT); 1589 return -EINVAL; 1590 } 1591 1592 return 0; 1593 } 1594 1595 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1596 { 1597 int ret = 0, j, pidx, initfail; 1598 struct hfi1_devdata *dd; 1599 struct hfi1_pportdata *ppd; 1600 1601 /* First, lock the non-writable module parameters */ 1602 HFI1_CAP_LOCK(); 1603 1604 /* Validate dev ids */ 1605 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1606 ent->device == PCI_DEVICE_ID_INTEL1)) { 1607 hfi1_early_err(&pdev->dev, 1608 "Failing on unknown Intel deviceid 0x%x\n", 1609 ent->device); 1610 ret = -ENODEV; 1611 goto bail; 1612 } 1613 1614 /* Validate some global module parameters */ 1615 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1616 if (ret) 1617 goto bail; 1618 1619 /* use the encoding function as a sanitization check */ 1620 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1621 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1622 hfi1_hdrq_entsize); 1623 ret = -EINVAL; 1624 goto bail; 1625 } 1626 1627 /* The receive eager buffer size must be set before the receive 1628 * contexts are created. 1629 * 1630 * Set the eager buffer size. Validate that it falls in a range 1631 * allowed by the hardware - all powers of 2 between the min and 1632 * max. The maximum valid MTU is within the eager buffer range 1633 * so we do not need to cap the max_mtu by an eager buffer size 1634 * setting. 1635 */ 1636 if (eager_buffer_size) { 1637 if (!is_power_of_2(eager_buffer_size)) 1638 eager_buffer_size = 1639 roundup_pow_of_two(eager_buffer_size); 1640 eager_buffer_size = 1641 clamp_val(eager_buffer_size, 1642 MIN_EAGER_BUFFER * 8, 1643 MAX_EAGER_BUFFER_TOTAL); 1644 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1645 eager_buffer_size); 1646 } else { 1647 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1648 ret = -EINVAL; 1649 goto bail; 1650 } 1651 1652 /* restrict value of hfi1_rcvarr_split */ 1653 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1654 1655 ret = hfi1_pcie_init(pdev, ent); 1656 if (ret) 1657 goto bail; 1658 1659 /* 1660 * Do device-specific initialization, function table setup, dd 1661 * allocation, etc. 1662 */ 1663 dd = hfi1_init_dd(pdev, ent); 1664 1665 if (IS_ERR(dd)) { 1666 ret = PTR_ERR(dd); 1667 goto clean_bail; /* error already printed */ 1668 } 1669 1670 ret = create_workqueues(dd); 1671 if (ret) 1672 goto clean_bail; 1673 1674 /* do the generic initialization */ 1675 initfail = hfi1_init(dd, 0); 1676 1677 /* setup vnic */ 1678 hfi1_vnic_setup(dd); 1679 1680 ret = hfi1_register_ib_device(dd); 1681 1682 /* 1683 * Now ready for use. this should be cleared whenever we 1684 * detect a reset, or initiate one. If earlier failure, 1685 * we still create devices, so diags, etc. can be used 1686 * to determine cause of problem. 1687 */ 1688 if (!initfail && !ret) { 1689 dd->flags |= HFI1_INITTED; 1690 /* create debufs files after init and ib register */ 1691 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1692 } 1693 1694 j = hfi1_device_create(dd); 1695 if (j) 1696 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1697 1698 if (initfail || ret) { 1699 stop_timers(dd); 1700 flush_workqueue(ib_wq); 1701 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1702 hfi1_quiet_serdes(dd->pport + pidx); 1703 ppd = dd->pport + pidx; 1704 if (ppd->hfi1_wq) { 1705 destroy_workqueue(ppd->hfi1_wq); 1706 ppd->hfi1_wq = NULL; 1707 } 1708 if (ppd->link_wq) { 1709 destroy_workqueue(ppd->link_wq); 1710 ppd->link_wq = NULL; 1711 } 1712 } 1713 if (!j) 1714 hfi1_device_remove(dd); 1715 if (!ret) 1716 hfi1_unregister_ib_device(dd); 1717 hfi1_vnic_cleanup(dd); 1718 postinit_cleanup(dd); 1719 if (initfail) 1720 ret = initfail; 1721 goto bail; /* everything already cleaned */ 1722 } 1723 1724 sdma_start(dd); 1725 1726 return 0; 1727 1728 clean_bail: 1729 hfi1_pcie_cleanup(pdev); 1730 bail: 1731 return ret; 1732 } 1733 1734 static void wait_for_clients(struct hfi1_devdata *dd) 1735 { 1736 /* 1737 * Remove the device init value and complete the device if there is 1738 * no clients or wait for active clients to finish. 1739 */ 1740 if (atomic_dec_and_test(&dd->user_refcount)) 1741 complete(&dd->user_comp); 1742 1743 wait_for_completion(&dd->user_comp); 1744 } 1745 1746 static void remove_one(struct pci_dev *pdev) 1747 { 1748 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1749 1750 /* close debugfs files before ib unregister */ 1751 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1752 1753 /* remove the /dev hfi1 interface */ 1754 hfi1_device_remove(dd); 1755 1756 /* wait for existing user space clients to finish */ 1757 wait_for_clients(dd); 1758 1759 /* unregister from IB core */ 1760 hfi1_unregister_ib_device(dd); 1761 1762 /* cleanup vnic */ 1763 hfi1_vnic_cleanup(dd); 1764 1765 /* 1766 * Disable the IB link, disable interrupts on the device, 1767 * clear dma engines, etc. 1768 */ 1769 shutdown_device(dd); 1770 1771 stop_timers(dd); 1772 1773 /* wait until all of our (qsfp) queue_work() calls complete */ 1774 flush_workqueue(ib_wq); 1775 1776 postinit_cleanup(dd); 1777 } 1778 1779 /** 1780 * hfi1_create_rcvhdrq - create a receive header queue 1781 * @dd: the hfi1_ib device 1782 * @rcd: the context data 1783 * 1784 * This must be contiguous memory (from an i/o perspective), and must be 1785 * DMA'able (which means for some systems, it will go through an IOMMU, 1786 * or be forced into a low address range). 1787 */ 1788 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1789 { 1790 unsigned amt; 1791 u64 reg; 1792 1793 if (!rcd->rcvhdrq) { 1794 dma_addr_t dma_hdrqtail; 1795 gfp_t gfp_flags; 1796 1797 /* 1798 * rcvhdrqentsize is in DWs, so we have to convert to bytes 1799 * (* sizeof(u32)). 1800 */ 1801 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * 1802 sizeof(u32)); 1803 1804 if ((rcd->ctxt < dd->first_dyn_alloc_ctxt) || 1805 (rcd->sc && (rcd->sc->type == SC_KERNEL))) 1806 gfp_flags = GFP_KERNEL; 1807 else 1808 gfp_flags = GFP_USER; 1809 rcd->rcvhdrq = dma_zalloc_coherent( 1810 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1811 gfp_flags | __GFP_COMP); 1812 1813 if (!rcd->rcvhdrq) { 1814 dd_dev_err(dd, 1815 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1816 amt, rcd->ctxt); 1817 goto bail; 1818 } 1819 1820 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { 1821 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1822 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, 1823 gfp_flags); 1824 if (!rcd->rcvhdrtail_kvaddr) 1825 goto bail_free; 1826 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; 1827 } 1828 1829 rcd->rcvhdrq_size = amt; 1830 } 1831 /* 1832 * These values are per-context: 1833 * RcvHdrCnt 1834 * RcvHdrEntSize 1835 * RcvHdrSize 1836 */ 1837 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1838 & RCV_HDR_CNT_CNT_MASK) 1839 << RCV_HDR_CNT_CNT_SHIFT; 1840 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1841 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1842 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1843 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1844 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1845 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) 1846 << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1847 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1848 1849 /* 1850 * Program dummy tail address for every receive context 1851 * before enabling any receive context 1852 */ 1853 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1854 dd->rcvhdrtail_dummy_dma); 1855 1856 return 0; 1857 1858 bail_free: 1859 dd_dev_err(dd, 1860 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1861 rcd->ctxt); 1862 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1863 rcd->rcvhdrq_dma); 1864 rcd->rcvhdrq = NULL; 1865 bail: 1866 return -ENOMEM; 1867 } 1868 1869 /** 1870 * allocate eager buffers, both kernel and user contexts. 1871 * @rcd: the context we are setting up. 1872 * 1873 * Allocate the eager TID buffers and program them into hip. 1874 * They are no longer completely contiguous, we do multiple allocation 1875 * calls. Otherwise we get the OOM code involved, by asking for too 1876 * much per call, with disastrous results on some kernels. 1877 */ 1878 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1879 { 1880 struct hfi1_devdata *dd = rcd->dd; 1881 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; 1882 gfp_t gfp_flags; 1883 u16 order; 1884 int ret = 0; 1885 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1886 1887 /* 1888 * GFP_USER, but without GFP_FS, so buffer cache can be 1889 * coalesced (we hope); otherwise, even at order 4, 1890 * heavy filesystem activity makes these fail, and we can 1891 * use compound pages. 1892 */ 1893 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1894 1895 /* 1896 * The minimum size of the eager buffers is a groups of MTU-sized 1897 * buffers. 1898 * The global eager_buffer_size parameter is checked against the 1899 * theoretical lower limit of the value. Here, we check against the 1900 * MTU. 1901 */ 1902 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1903 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1904 /* 1905 * If using one-pkt-per-egr-buffer, lower the eager buffer 1906 * size to the max MTU (page-aligned). 1907 */ 1908 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1909 rcd->egrbufs.rcvtid_size = round_mtu; 1910 1911 /* 1912 * Eager buffers sizes of 1MB or less require smaller TID sizes 1913 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1914 */ 1915 if (rcd->egrbufs.size <= (1 << 20)) 1916 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1917 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1918 1919 while (alloced_bytes < rcd->egrbufs.size && 1920 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1921 rcd->egrbufs.buffers[idx].addr = 1922 dma_zalloc_coherent(&dd->pcidev->dev, 1923 rcd->egrbufs.rcvtid_size, 1924 &rcd->egrbufs.buffers[idx].dma, 1925 gfp_flags); 1926 if (rcd->egrbufs.buffers[idx].addr) { 1927 rcd->egrbufs.buffers[idx].len = 1928 rcd->egrbufs.rcvtid_size; 1929 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1930 rcd->egrbufs.buffers[idx].addr; 1931 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 1932 rcd->egrbufs.buffers[idx].dma; 1933 rcd->egrbufs.alloced++; 1934 alloced_bytes += rcd->egrbufs.rcvtid_size; 1935 idx++; 1936 } else { 1937 u32 new_size, i, j; 1938 u64 offset = 0; 1939 1940 /* 1941 * Fail the eager buffer allocation if: 1942 * - we are already using the lowest acceptable size 1943 * - we are using one-pkt-per-egr-buffer (this implies 1944 * that we are accepting only one size) 1945 */ 1946 if (rcd->egrbufs.rcvtid_size == round_mtu || 1947 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1948 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1949 rcd->ctxt); 1950 ret = -ENOMEM; 1951 goto bail_rcvegrbuf_phys; 1952 } 1953 1954 new_size = rcd->egrbufs.rcvtid_size / 2; 1955 1956 /* 1957 * If the first attempt to allocate memory failed, don't 1958 * fail everything but continue with the next lower 1959 * size. 1960 */ 1961 if (idx == 0) { 1962 rcd->egrbufs.rcvtid_size = new_size; 1963 continue; 1964 } 1965 1966 /* 1967 * Re-partition already allocated buffers to a smaller 1968 * size. 1969 */ 1970 rcd->egrbufs.alloced = 0; 1971 for (i = 0, j = 0, offset = 0; j < idx; i++) { 1972 if (i >= rcd->egrbufs.count) 1973 break; 1974 rcd->egrbufs.rcvtids[i].dma = 1975 rcd->egrbufs.buffers[j].dma + offset; 1976 rcd->egrbufs.rcvtids[i].addr = 1977 rcd->egrbufs.buffers[j].addr + offset; 1978 rcd->egrbufs.alloced++; 1979 if ((rcd->egrbufs.buffers[j].dma + offset + 1980 new_size) == 1981 (rcd->egrbufs.buffers[j].dma + 1982 rcd->egrbufs.buffers[j].len)) { 1983 j++; 1984 offset = 0; 1985 } else { 1986 offset += new_size; 1987 } 1988 } 1989 rcd->egrbufs.rcvtid_size = new_size; 1990 } 1991 } 1992 rcd->egrbufs.numbufs = idx; 1993 rcd->egrbufs.size = alloced_bytes; 1994 1995 hfi1_cdbg(PROC, 1996 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 1997 rcd->ctxt, rcd->egrbufs.alloced, 1998 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 1999 2000 /* 2001 * Set the contexts rcv array head update threshold to the closest 2002 * power of 2 (so we can use a mask instead of modulo) below half 2003 * the allocated entries. 2004 */ 2005 rcd->egrbufs.threshold = 2006 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2007 /* 2008 * Compute the expected RcvArray entry base. This is done after 2009 * allocating the eager buffers in order to maximize the 2010 * expected RcvArray entries for the context. 2011 */ 2012 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2013 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2014 rcd->expected_count = max_entries - egrtop; 2015 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2016 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2017 2018 rcd->expected_base = rcd->eager_base + egrtop; 2019 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2020 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2021 rcd->eager_base, rcd->expected_base); 2022 2023 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2024 hfi1_cdbg(PROC, 2025 "ctxt%u: current Eager buffer size is invalid %u\n", 2026 rcd->ctxt, rcd->egrbufs.rcvtid_size); 2027 ret = -EINVAL; 2028 goto bail_rcvegrbuf_phys; 2029 } 2030 2031 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2032 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 2033 rcd->egrbufs.rcvtids[idx].dma, order); 2034 cond_resched(); 2035 } 2036 2037 return 0; 2038 2039 bail_rcvegrbuf_phys: 2040 for (idx = 0; idx < rcd->egrbufs.alloced && 2041 rcd->egrbufs.buffers[idx].addr; 2042 idx++) { 2043 dma_free_coherent(&dd->pcidev->dev, 2044 rcd->egrbufs.buffers[idx].len, 2045 rcd->egrbufs.buffers[idx].addr, 2046 rcd->egrbufs.buffers[idx].dma); 2047 rcd->egrbufs.buffers[idx].addr = NULL; 2048 rcd->egrbufs.buffers[idx].dma = 0; 2049 rcd->egrbufs.buffers[idx].len = 0; 2050 } 2051 2052 return ret; 2053 } 2054