1 /* 2 * Copyright(c) 2015-2017 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/pci.h> 49 #include <linux/netdevice.h> 50 #include <linux/vmalloc.h> 51 #include <linux/delay.h> 52 #include <linux/idr.h> 53 #include <linux/module.h> 54 #include <linux/printk.h> 55 #include <linux/hrtimer.h> 56 #include <linux/bitmap.h> 57 #include <rdma/rdma_vt.h> 58 59 #include "hfi.h" 60 #include "device.h" 61 #include "common.h" 62 #include "trace.h" 63 #include "mad.h" 64 #include "sdma.h" 65 #include "debugfs.h" 66 #include "verbs.h" 67 #include "aspm.h" 68 #include "affinity.h" 69 #include "vnic.h" 70 #include "exp_rcv.h" 71 72 #undef pr_fmt 73 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 74 75 #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5 76 /* 77 * min buffers we want to have per context, after driver 78 */ 79 #define HFI1_MIN_USER_CTXT_BUFCNT 7 80 81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ 84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ 85 86 /* 87 * Number of user receive contexts we are configured to use (to allow for more 88 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 89 */ 90 int num_user_contexts = -1; 91 module_param_named(num_user_contexts, num_user_contexts, int, 0444); 92 MODULE_PARM_DESC( 93 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); 94 95 uint krcvqs[RXE_NUM_DATA_VL]; 96 int krcvqsset; 97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); 98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 99 100 /* computed based on above array */ 101 unsigned long n_krcvqs; 102 103 static unsigned hfi1_rcvarr_split = 25; 104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers"); 106 107 static uint eager_buffer_size = (8 << 20); /* 8MB */ 108 module_param(eager_buffer_size, uint, S_IRUGO); 109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB"); 110 111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */ 112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO); 113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)"); 114 115 static uint hfi1_hdrq_entsize = 32; 116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO); 117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B"); 118 119 unsigned int user_credit_return_threshold = 33; /* default is 33% */ 120 module_param(user_credit_return_threshold, uint, S_IRUGO); 121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)"); 122 123 static inline u64 encode_rcv_header_entry_size(u16 size); 124 125 static struct idr hfi1_unit_table; 126 127 static int hfi1_create_kctxt(struct hfi1_devdata *dd, 128 struct hfi1_pportdata *ppd) 129 { 130 struct hfi1_ctxtdata *rcd; 131 int ret; 132 133 /* Control context has to be always 0 */ 134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); 135 136 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); 137 if (ret < 0) { 138 dd_dev_err(dd, "Kernel receive context allocation failed\n"); 139 return ret; 140 } 141 142 /* 143 * Set up the kernel context flags here and now because they use 144 * default values for all receive side memories. User contexts will 145 * be handled as they are created. 146 */ 147 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | 148 HFI1_CAP_KGET(NODROP_RHQ_FULL) | 149 HFI1_CAP_KGET(NODROP_EGR_FULL) | 150 HFI1_CAP_KGET(DMA_RTAIL); 151 152 /* Control context must use DMA_RTAIL */ 153 if (rcd->ctxt == HFI1_CTRL_CTXT) 154 rcd->flags |= HFI1_CAP_DMA_RTAIL; 155 rcd->seq_cnt = 1; 156 157 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); 158 if (!rcd->sc) { 159 dd_dev_err(dd, "Kernel send context allocation failed\n"); 160 return -ENOMEM; 161 } 162 hfi1_init_ctxt(rcd->sc); 163 164 return 0; 165 } 166 167 /* 168 * Create the receive context array and one or more kernel contexts 169 */ 170 int hfi1_create_kctxts(struct hfi1_devdata *dd) 171 { 172 u16 i; 173 int ret; 174 175 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), 176 GFP_KERNEL, dd->node); 177 if (!dd->rcd) 178 return -ENOMEM; 179 180 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 181 ret = hfi1_create_kctxt(dd, dd->pport); 182 if (ret) 183 goto bail; 184 } 185 186 return 0; 187 bail: 188 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) 189 hfi1_free_ctxt(dd->rcd[i]); 190 191 /* All the contexts should be freed, free the array */ 192 kfree(dd->rcd); 193 dd->rcd = NULL; 194 return ret; 195 } 196 197 /* 198 * Helper routines for the receive context reference count (rcd and uctxt). 199 */ 200 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd) 201 { 202 kref_init(&rcd->kref); 203 } 204 205 /** 206 * hfi1_rcd_free - When reference is zero clean up. 207 * @kref: pointer to an initialized rcd data structure 208 * 209 */ 210 static void hfi1_rcd_free(struct kref *kref) 211 { 212 unsigned long flags; 213 struct hfi1_ctxtdata *rcd = 214 container_of(kref, struct hfi1_ctxtdata, kref); 215 216 hfi1_free_ctxtdata(rcd->dd, rcd); 217 218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags); 219 rcd->dd->rcd[rcd->ctxt] = NULL; 220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags); 221 222 kfree(rcd); 223 } 224 225 /** 226 * hfi1_rcd_put - decrement reference for rcd 227 * @rcd: pointer to an initialized rcd data structure 228 * 229 * Use this to put a reference after the init. 230 */ 231 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd) 232 { 233 if (rcd) 234 return kref_put(&rcd->kref, hfi1_rcd_free); 235 236 return 0; 237 } 238 239 /** 240 * hfi1_rcd_get - increment reference for rcd 241 * @rcd: pointer to an initialized rcd data structure 242 * 243 * Use this to get a reference after the init. 244 */ 245 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd) 246 { 247 kref_get(&rcd->kref); 248 } 249 250 /** 251 * allocate_rcd_index - allocate an rcd index from the rcd array 252 * @dd: pointer to a valid devdata structure 253 * @rcd: rcd data structure to assign 254 * @index: pointer to index that is allocated 255 * 256 * Find an empty index in the rcd array, and assign the given rcd to it. 257 * If the array is full, we are EBUSY. 258 * 259 */ 260 static int allocate_rcd_index(struct hfi1_devdata *dd, 261 struct hfi1_ctxtdata *rcd, u16 *index) 262 { 263 unsigned long flags; 264 u16 ctxt; 265 266 spin_lock_irqsave(&dd->uctxt_lock, flags); 267 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) 268 if (!dd->rcd[ctxt]) 269 break; 270 271 if (ctxt < dd->num_rcv_contexts) { 272 rcd->ctxt = ctxt; 273 dd->rcd[ctxt] = rcd; 274 hfi1_rcd_init(rcd); 275 } 276 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 277 278 if (ctxt >= dd->num_rcv_contexts) 279 return -EBUSY; 280 281 *index = ctxt; 282 283 return 0; 284 } 285 286 /** 287 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the 288 * array 289 * @dd: pointer to a valid devdata structure 290 * @ctxt: the index of an possilbe rcd 291 * 292 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given 293 * ctxt index is valid. 294 * 295 * The caller is responsible for making the _put(). 296 * 297 */ 298 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd, 299 u16 ctxt) 300 { 301 if (ctxt < dd->num_rcv_contexts) 302 return hfi1_rcd_get_by_index(dd, ctxt); 303 304 return NULL; 305 } 306 307 /** 308 * hfi1_rcd_get_by_index 309 * @dd: pointer to a valid devdata structure 310 * @ctxt: the index of an possilbe rcd 311 * 312 * We need to protect access to the rcd array. If access is needed to 313 * one or more index, get the protecting spinlock and then increment the 314 * kref. 315 * 316 * The caller is responsible for making the _put(). 317 * 318 */ 319 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) 320 { 321 unsigned long flags; 322 struct hfi1_ctxtdata *rcd = NULL; 323 324 spin_lock_irqsave(&dd->uctxt_lock, flags); 325 if (dd->rcd[ctxt]) { 326 rcd = dd->rcd[ctxt]; 327 hfi1_rcd_get(rcd); 328 } 329 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 330 331 return rcd; 332 } 333 334 /* 335 * Common code for user and kernel context create and setup. 336 * NOTE: the initial kref is done here (hf1_rcd_init()). 337 */ 338 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 339 struct hfi1_ctxtdata **context) 340 { 341 struct hfi1_devdata *dd = ppd->dd; 342 struct hfi1_ctxtdata *rcd; 343 unsigned kctxt_ngroups = 0; 344 u32 base; 345 346 if (dd->rcv_entries.nctxt_extra > 347 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) 348 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 349 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)); 350 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); 351 if (rcd) { 352 u32 rcvtids, max_entries; 353 u16 ctxt; 354 int ret; 355 356 ret = allocate_rcd_index(dd, rcd, &ctxt); 357 if (ret) { 358 *context = NULL; 359 kfree(rcd); 360 return ret; 361 } 362 363 INIT_LIST_HEAD(&rcd->qp_wait_list); 364 hfi1_exp_tid_group_init(&rcd->tid_group_list); 365 hfi1_exp_tid_group_init(&rcd->tid_used_list); 366 hfi1_exp_tid_group_init(&rcd->tid_full_list); 367 rcd->ppd = ppd; 368 rcd->dd = dd; 369 __set_bit(0, rcd->in_use_ctxts); 370 rcd->numa_id = numa; 371 rcd->rcv_array_groups = dd->rcv_entries.ngroups; 372 373 mutex_init(&rcd->exp_lock); 374 375 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); 376 377 /* 378 * Calculate the context's RcvArray entry starting point. 379 * We do this here because we have to take into account all 380 * the RcvArray entries that previous context would have 381 * taken and we have to account for any extra groups assigned 382 * to the static (kernel) or dynamic (vnic/user) contexts. 383 */ 384 if (ctxt < dd->first_dyn_alloc_ctxt) { 385 if (ctxt < kctxt_ngroups) { 386 base = ctxt * (dd->rcv_entries.ngroups + 1); 387 rcd->rcv_array_groups++; 388 } else { 389 base = kctxt_ngroups + 390 (ctxt * dd->rcv_entries.ngroups); 391 } 392 } else { 393 u16 ct = ctxt - dd->first_dyn_alloc_ctxt; 394 395 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) + 396 kctxt_ngroups); 397 if (ct < dd->rcv_entries.nctxt_extra) { 398 base += ct * (dd->rcv_entries.ngroups + 1); 399 rcd->rcv_array_groups++; 400 } else { 401 base += dd->rcv_entries.nctxt_extra + 402 (ct * dd->rcv_entries.ngroups); 403 } 404 } 405 rcd->eager_base = base * dd->rcv_entries.group_size; 406 407 rcd->rcvhdrq_cnt = rcvhdrcnt; 408 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 409 /* 410 * Simple Eager buffer allocation: we have already pre-allocated 411 * the number of RcvArray entry groups. Each ctxtdata structure 412 * holds the number of groups for that context. 413 * 414 * To follow CSR requirements and maintain cacheline alignment, 415 * make sure all sizes and bases are multiples of group_size. 416 * 417 * The expected entry count is what is left after assigning 418 * eager. 419 */ 420 max_entries = rcd->rcv_array_groups * 421 dd->rcv_entries.group_size; 422 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); 423 rcd->egrbufs.count = round_down(rcvtids, 424 dd->rcv_entries.group_size); 425 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) { 426 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n", 427 rcd->ctxt); 428 rcd->egrbufs.count = MAX_EAGER_ENTRIES; 429 } 430 hfi1_cdbg(PROC, 431 "ctxt%u: max Eager buffer RcvArray entries: %u\n", 432 rcd->ctxt, rcd->egrbufs.count); 433 434 /* 435 * Allocate array that will hold the eager buffer accounting 436 * data. 437 * This will allocate the maximum possible buffer count based 438 * on the value of the RcvArray split parameter. 439 * The resulting value will be rounded down to the closest 440 * multiple of dd->rcv_entries.group_size. 441 */ 442 rcd->egrbufs.buffers = 443 kcalloc_node(rcd->egrbufs.count, 444 sizeof(*rcd->egrbufs.buffers), 445 GFP_KERNEL, numa); 446 if (!rcd->egrbufs.buffers) 447 goto bail; 448 rcd->egrbufs.rcvtids = 449 kcalloc_node(rcd->egrbufs.count, 450 sizeof(*rcd->egrbufs.rcvtids), 451 GFP_KERNEL, numa); 452 if (!rcd->egrbufs.rcvtids) 453 goto bail; 454 rcd->egrbufs.size = eager_buffer_size; 455 /* 456 * The size of the buffers programmed into the RcvArray 457 * entries needs to be big enough to handle the highest 458 * MTU supported. 459 */ 460 if (rcd->egrbufs.size < hfi1_max_mtu) { 461 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu); 462 hfi1_cdbg(PROC, 463 "ctxt%u: eager bufs size too small. Adjusting to %zu\n", 464 rcd->ctxt, rcd->egrbufs.size); 465 } 466 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE; 467 468 /* Applicable only for statically created kernel contexts */ 469 if (ctxt < dd->first_dyn_alloc_ctxt) { 470 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 471 GFP_KERNEL, numa); 472 if (!rcd->opstats) 473 goto bail; 474 } 475 476 *context = rcd; 477 return 0; 478 } 479 480 bail: 481 *context = NULL; 482 hfi1_free_ctxt(rcd); 483 return -ENOMEM; 484 } 485 486 /** 487 * hfi1_free_ctxt 488 * @rcd: pointer to an initialized rcd data structure 489 * 490 * This wrapper is the free function that matches hfi1_create_ctxtdata(). 491 * When a context is done being used (kernel or user), this function is called 492 * for the "final" put to match the kref init from hf1i_create_ctxtdata(). 493 * Other users of the context do a get/put sequence to make sure that the 494 * structure isn't removed while in use. 495 */ 496 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd) 497 { 498 hfi1_rcd_put(rcd); 499 } 500 501 /* 502 * Convert a receive header entry size that to the encoding used in the CSR. 503 * 504 * Return a zero if the given size is invalid. 505 */ 506 static inline u64 encode_rcv_header_entry_size(u16 size) 507 { 508 /* there are only 3 valid receive header entry sizes */ 509 if (size == 2) 510 return 1; 511 if (size == 16) 512 return 2; 513 else if (size == 32) 514 return 4; 515 return 0; /* invalid */ 516 } 517 518 /* 519 * Select the largest ccti value over all SLs to determine the intra- 520 * packet gap for the link. 521 * 522 * called with cca_timer_lock held (to protect access to cca_timer 523 * array), and rcu_read_lock() (to protect access to cc_state). 524 */ 525 void set_link_ipg(struct hfi1_pportdata *ppd) 526 { 527 struct hfi1_devdata *dd = ppd->dd; 528 struct cc_state *cc_state; 529 int i; 530 u16 cce, ccti_limit, max_ccti = 0; 531 u16 shift, mult; 532 u64 src; 533 u32 current_egress_rate; /* Mbits /sec */ 534 u32 max_pkt_time; 535 /* 536 * max_pkt_time is the maximum packet egress time in units 537 * of the fabric clock period 1/(805 MHz). 538 */ 539 540 cc_state = get_cc_state(ppd); 541 542 if (!cc_state) 543 /* 544 * This should _never_ happen - rcu_read_lock() is held, 545 * and set_link_ipg() should not be called if cc_state 546 * is NULL. 547 */ 548 return; 549 550 for (i = 0; i < OPA_MAX_SLS; i++) { 551 u16 ccti = ppd->cca_timer[i].ccti; 552 553 if (ccti > max_ccti) 554 max_ccti = ccti; 555 } 556 557 ccti_limit = cc_state->cct.ccti_limit; 558 if (max_ccti > ccti_limit) 559 max_ccti = ccti_limit; 560 561 cce = cc_state->cct.entries[max_ccti].entry; 562 shift = (cce & 0xc000) >> 14; 563 mult = (cce & 0x3fff); 564 565 current_egress_rate = active_egress_rate(ppd); 566 567 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate); 568 569 src = (max_pkt_time >> shift) * mult; 570 571 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK; 572 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT; 573 574 write_csr(dd, SEND_STATIC_RATE_CONTROL, src); 575 } 576 577 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) 578 { 579 struct cca_timer *cca_timer; 580 struct hfi1_pportdata *ppd; 581 int sl; 582 u16 ccti_timer, ccti_min; 583 struct cc_state *cc_state; 584 unsigned long flags; 585 enum hrtimer_restart ret = HRTIMER_NORESTART; 586 587 cca_timer = container_of(t, struct cca_timer, hrtimer); 588 ppd = cca_timer->ppd; 589 sl = cca_timer->sl; 590 591 rcu_read_lock(); 592 593 cc_state = get_cc_state(ppd); 594 595 if (!cc_state) { 596 rcu_read_unlock(); 597 return HRTIMER_NORESTART; 598 } 599 600 /* 601 * 1) decrement ccti for SL 602 * 2) calculate IPG for link (set_link_ipg()) 603 * 3) restart timer, unless ccti is at min value 604 */ 605 606 ccti_min = cc_state->cong_setting.entries[sl].ccti_min; 607 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; 608 609 spin_lock_irqsave(&ppd->cca_timer_lock, flags); 610 611 if (cca_timer->ccti > ccti_min) { 612 cca_timer->ccti--; 613 set_link_ipg(ppd); 614 } 615 616 if (cca_timer->ccti > ccti_min) { 617 unsigned long nsec = 1024 * ccti_timer; 618 /* ccti_timer is in units of 1.024 usec */ 619 hrtimer_forward_now(t, ns_to_ktime(nsec)); 620 ret = HRTIMER_RESTART; 621 } 622 623 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags); 624 rcu_read_unlock(); 625 return ret; 626 } 627 628 /* 629 * Common code for initializing the physical port structure. 630 */ 631 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 632 struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 633 { 634 int i; 635 uint default_pkey_idx; 636 struct cc_state *cc_state; 637 638 ppd->dd = dd; 639 ppd->hw_pidx = hw_pidx; 640 ppd->port = port; /* IB port number, not index */ 641 ppd->prev_link_width = LINK_WIDTH_DEFAULT; 642 /* 643 * There are C_VL_COUNT number of PortVLXmitWait counters. 644 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 645 */ 646 for (i = 0; i < C_VL_COUNT + 1; i++) { 647 ppd->port_vl_xmit_wait_last[i] = 0; 648 ppd->vl_xmit_flit_cnt[i] = 0; 649 } 650 651 default_pkey_idx = 1; 652 653 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY; 654 ppd->part_enforce |= HFI1_PART_ENFORCE_IN; 655 656 if (loopback) { 657 hfi1_early_err(&pdev->dev, 658 "Faking data partition 0x8001 in idx %u\n", 659 !default_pkey_idx); 660 ppd->pkeys[!default_pkey_idx] = 0x8001; 661 } 662 663 INIT_WORK(&ppd->link_vc_work, handle_verify_cap); 664 INIT_WORK(&ppd->link_up_work, handle_link_up); 665 INIT_WORK(&ppd->link_down_work, handle_link_down); 666 INIT_WORK(&ppd->freeze_work, handle_freeze); 667 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 668 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 669 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 670 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); 671 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 672 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 673 674 mutex_init(&ppd->hls_lock); 675 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 676 677 ppd->qsfp_info.ppd = ppd; 678 ppd->sm_trap_qp = 0x0; 679 ppd->sa_qp = 0x1; 680 681 ppd->hfi1_wq = NULL; 682 683 spin_lock_init(&ppd->cca_timer_lock); 684 685 for (i = 0; i < OPA_MAX_SLS; i++) { 686 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC, 687 HRTIMER_MODE_REL); 688 ppd->cca_timer[i].ppd = ppd; 689 ppd->cca_timer[i].sl = i; 690 ppd->cca_timer[i].ccti = 0; 691 ppd->cca_timer[i].hrtimer.function = cca_timer_fn; 692 } 693 694 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT; 695 696 spin_lock_init(&ppd->cc_state_lock); 697 spin_lock_init(&ppd->cc_log_lock); 698 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL); 699 RCU_INIT_POINTER(ppd->cc_state, cc_state); 700 if (!cc_state) 701 goto bail; 702 return; 703 704 bail: 705 706 hfi1_early_err(&pdev->dev, 707 "Congestion Control Agent disabled for port %d\n", port); 708 } 709 710 /* 711 * Do initialization for device that is only needed on 712 * first detect, not on resets. 713 */ 714 static int loadtime_init(struct hfi1_devdata *dd) 715 { 716 return 0; 717 } 718 719 /** 720 * init_after_reset - re-initialize after a reset 721 * @dd: the hfi1_ib device 722 * 723 * sanity check at least some of the values after reset, and 724 * ensure no receive or transmit (explicitly, in case reset 725 * failed 726 */ 727 static int init_after_reset(struct hfi1_devdata *dd) 728 { 729 int i; 730 struct hfi1_ctxtdata *rcd; 731 /* 732 * Ensure chip does no sends or receives, tail updates, or 733 * pioavail updates while we re-initialize. This is mostly 734 * for the driver data structures, not chip registers. 735 */ 736 for (i = 0; i < dd->num_rcv_contexts; i++) { 737 rcd = hfi1_rcd_get_by_index(dd, i); 738 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | 739 HFI1_RCVCTRL_INTRAVAIL_DIS | 740 HFI1_RCVCTRL_TAILUPD_DIS, rcd); 741 hfi1_rcd_put(rcd); 742 } 743 pio_send_control(dd, PSC_GLOBAL_DISABLE); 744 for (i = 0; i < dd->num_send_contexts; i++) 745 sc_disable(dd->send_contexts[i].sc); 746 747 return 0; 748 } 749 750 static void enable_chip(struct hfi1_devdata *dd) 751 { 752 struct hfi1_ctxtdata *rcd; 753 u32 rcvmask; 754 u16 i; 755 756 /* enable PIO send */ 757 pio_send_control(dd, PSC_GLOBAL_ENABLE); 758 759 /* 760 * Enable kernel ctxts' receive and receive interrupt. 761 * Other ctxts done as user opens and initializes them. 762 */ 763 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { 764 rcd = hfi1_rcd_get_by_index(dd, i); 765 if (!rcd) 766 continue; 767 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; 768 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? 769 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 770 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 771 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB; 772 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL)) 773 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; 774 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL)) 775 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; 776 hfi1_rcvctrl(dd, rcvmask, rcd); 777 sc_enable(rcd->sc); 778 hfi1_rcd_put(rcd); 779 } 780 } 781 782 /** 783 * create_workqueues - create per port workqueues 784 * @dd: the hfi1_ib device 785 */ 786 static int create_workqueues(struct hfi1_devdata *dd) 787 { 788 int pidx; 789 struct hfi1_pportdata *ppd; 790 791 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 792 ppd = dd->pport + pidx; 793 if (!ppd->hfi1_wq) { 794 ppd->hfi1_wq = 795 alloc_workqueue( 796 "hfi%d_%d", 797 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 798 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES, 799 dd->unit, pidx); 800 if (!ppd->hfi1_wq) 801 goto wq_error; 802 } 803 if (!ppd->link_wq) { 804 /* 805 * Make the link workqueue single-threaded to enforce 806 * serialization. 807 */ 808 ppd->link_wq = 809 alloc_workqueue( 810 "hfi_link_%d_%d", 811 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 812 1, /* max_active */ 813 dd->unit, pidx); 814 if (!ppd->link_wq) 815 goto wq_error; 816 } 817 } 818 return 0; 819 wq_error: 820 pr_err("alloc_workqueue failed for port %d\n", pidx + 1); 821 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 822 ppd = dd->pport + pidx; 823 if (ppd->hfi1_wq) { 824 destroy_workqueue(ppd->hfi1_wq); 825 ppd->hfi1_wq = NULL; 826 } 827 if (ppd->link_wq) { 828 destroy_workqueue(ppd->link_wq); 829 ppd->link_wq = NULL; 830 } 831 } 832 return -ENOMEM; 833 } 834 835 /** 836 * hfi1_init - do the actual initialization sequence on the chip 837 * @dd: the hfi1_ib device 838 * @reinit: re-initializing, so don't allocate new memory 839 * 840 * Do the actual initialization sequence on the chip. This is done 841 * both from the init routine called from the PCI infrastructure, and 842 * when we reset the chip, or detect that it was reset internally, 843 * or it's administratively re-enabled. 844 * 845 * Memory allocation here and in called routines is only done in 846 * the first case (reinit == 0). We have to be careful, because even 847 * without memory allocation, we need to re-write all the chip registers 848 * TIDs, etc. after the reset or enable has completed. 849 */ 850 int hfi1_init(struct hfi1_devdata *dd, int reinit) 851 { 852 int ret = 0, pidx, lastfail = 0; 853 unsigned long len; 854 u16 i; 855 struct hfi1_ctxtdata *rcd; 856 struct hfi1_pportdata *ppd; 857 858 /* Set up recv low level handlers */ 859 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] = 860 kdeth_process_expected; 861 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] = 862 kdeth_process_eager; 863 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib; 864 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] = 865 process_receive_error; 866 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] = 867 process_receive_bypass; 868 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] = 869 process_receive_invalid; 870 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] = 871 process_receive_invalid; 872 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] = 873 process_receive_invalid; 874 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions; 875 876 /* Set up send low level handlers */ 877 dd->process_pio_send = hfi1_verbs_send_pio; 878 dd->process_dma_send = hfi1_verbs_send_dma; 879 dd->pio_inline_send = pio_copy; 880 dd->process_vnic_dma_send = hfi1_vnic_send_dma; 881 882 if (is_ax(dd)) { 883 atomic_set(&dd->drop_packet, DROP_PACKET_ON); 884 dd->do_drop = 1; 885 } else { 886 atomic_set(&dd->drop_packet, DROP_PACKET_OFF); 887 dd->do_drop = 0; 888 } 889 890 /* make sure the link is not "up" */ 891 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 892 ppd = dd->pport + pidx; 893 ppd->linkup = 0; 894 } 895 896 if (reinit) 897 ret = init_after_reset(dd); 898 else 899 ret = loadtime_init(dd); 900 if (ret) 901 goto done; 902 903 /* allocate dummy tail memory for all receive contexts */ 904 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 905 &dd->pcidev->dev, sizeof(u64), 906 &dd->rcvhdrtail_dummy_dma, 907 GFP_KERNEL); 908 909 if (!dd->rcvhdrtail_dummy_kvaddr) { 910 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 911 ret = -ENOMEM; 912 goto done; 913 } 914 915 /* dd->rcd can be NULL if early initialization failed */ 916 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { 917 /* 918 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 919 * re-init, the simplest way to handle this is to free 920 * existing, and re-allocate. 921 * Need to re-create rest of ctxt 0 ctxtdata as well. 922 */ 923 rcd = hfi1_rcd_get_by_index(dd, i); 924 if (!rcd) 925 continue; 926 927 rcd->do_interrupt = &handle_receive_interrupt; 928 929 lastfail = hfi1_create_rcvhdrq(dd, rcd); 930 if (!lastfail) 931 lastfail = hfi1_setup_eagerbufs(rcd); 932 if (lastfail) { 933 dd_dev_err(dd, 934 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 935 ret = lastfail; 936 } 937 hfi1_rcd_put(rcd); 938 } 939 940 /* Allocate enough memory for user event notification. */ 941 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * 942 sizeof(*dd->events)); 943 dd->events = vmalloc_user(len); 944 if (!dd->events) 945 dd_dev_err(dd, "Failed to allocate user events page\n"); 946 /* 947 * Allocate a page for device and port status. 948 * Page will be shared amongst all user processes. 949 */ 950 dd->status = vmalloc_user(PAGE_SIZE); 951 if (!dd->status) 952 dd_dev_err(dd, "Failed to allocate dev status page\n"); 953 else 954 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) - 955 sizeof(dd->status->freezemsg)); 956 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 957 ppd = dd->pport + pidx; 958 if (dd->status) 959 /* Currently, we only have one port */ 960 ppd->statusp = &dd->status->port; 961 962 set_mtu(ppd); 963 } 964 965 /* enable chip even if we have an error, so we can debug cause */ 966 enable_chip(dd); 967 968 done: 969 /* 970 * Set status even if port serdes is not initialized 971 * so that diags will work. 972 */ 973 if (dd->status) 974 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT | 975 HFI1_STATUS_INITTED; 976 if (!ret) { 977 /* enable all interrupts from the chip */ 978 set_intr_state(dd, 1); 979 980 /* chip is OK for user apps; mark it as initialized */ 981 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 982 ppd = dd->pport + pidx; 983 984 /* 985 * start the serdes - must be after interrupts are 986 * enabled so we are notified when the link goes up 987 */ 988 lastfail = bringup_serdes(ppd); 989 if (lastfail) 990 dd_dev_info(dd, 991 "Failed to bring up port %u\n", 992 ppd->port); 993 994 /* 995 * Set status even if port serdes is not initialized 996 * so that diags will work. 997 */ 998 if (ppd->statusp) 999 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT | 1000 HFI1_STATUS_INITTED; 1001 if (!ppd->link_speed_enabled) 1002 continue; 1003 } 1004 } 1005 1006 /* if ret is non-zero, we probably should do some cleanup here... */ 1007 return ret; 1008 } 1009 1010 static inline struct hfi1_devdata *__hfi1_lookup(int unit) 1011 { 1012 return idr_find(&hfi1_unit_table, unit); 1013 } 1014 1015 struct hfi1_devdata *hfi1_lookup(int unit) 1016 { 1017 struct hfi1_devdata *dd; 1018 unsigned long flags; 1019 1020 spin_lock_irqsave(&hfi1_devs_lock, flags); 1021 dd = __hfi1_lookup(unit); 1022 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1023 1024 return dd; 1025 } 1026 1027 /* 1028 * Stop the timers during unit shutdown, or after an error late 1029 * in initialization. 1030 */ 1031 static void stop_timers(struct hfi1_devdata *dd) 1032 { 1033 struct hfi1_pportdata *ppd; 1034 int pidx; 1035 1036 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1037 ppd = dd->pport + pidx; 1038 if (ppd->led_override_timer.function) { 1039 del_timer_sync(&ppd->led_override_timer); 1040 atomic_set(&ppd->led_override_timer_active, 0); 1041 } 1042 } 1043 } 1044 1045 /** 1046 * shutdown_device - shut down a device 1047 * @dd: the hfi1_ib device 1048 * 1049 * This is called to make the device quiet when we are about to 1050 * unload the driver, and also when the device is administratively 1051 * disabled. It does not free any data structures. 1052 * Everything it does has to be setup again by hfi1_init(dd, 1) 1053 */ 1054 static void shutdown_device(struct hfi1_devdata *dd) 1055 { 1056 struct hfi1_pportdata *ppd; 1057 struct hfi1_ctxtdata *rcd; 1058 unsigned pidx; 1059 int i; 1060 1061 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1062 ppd = dd->pport + pidx; 1063 1064 ppd->linkup = 0; 1065 if (ppd->statusp) 1066 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 1067 HFI1_STATUS_IB_READY); 1068 } 1069 dd->flags &= ~HFI1_INITTED; 1070 1071 /* mask and clean up interrupts, but not errors */ 1072 set_intr_state(dd, 0); 1073 hfi1_clean_up_interrupts(dd); 1074 1075 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1076 ppd = dd->pport + pidx; 1077 for (i = 0; i < dd->num_rcv_contexts; i++) { 1078 rcd = hfi1_rcd_get_by_index(dd, i); 1079 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | 1080 HFI1_RCVCTRL_CTXT_DIS | 1081 HFI1_RCVCTRL_INTRAVAIL_DIS | 1082 HFI1_RCVCTRL_PKEY_DIS | 1083 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd); 1084 hfi1_rcd_put(rcd); 1085 } 1086 /* 1087 * Gracefully stop all sends allowing any in progress to 1088 * trickle out first. 1089 */ 1090 for (i = 0; i < dd->num_send_contexts; i++) 1091 sc_flush(dd->send_contexts[i].sc); 1092 } 1093 1094 /* 1095 * Enough for anything that's going to trickle out to have actually 1096 * done so. 1097 */ 1098 udelay(20); 1099 1100 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1101 ppd = dd->pport + pidx; 1102 1103 /* disable all contexts */ 1104 for (i = 0; i < dd->num_send_contexts; i++) 1105 sc_disable(dd->send_contexts[i].sc); 1106 /* disable the send device */ 1107 pio_send_control(dd, PSC_GLOBAL_DISABLE); 1108 1109 shutdown_led_override(ppd); 1110 1111 /* 1112 * Clear SerdesEnable. 1113 * We can't count on interrupts since we are stopping. 1114 */ 1115 hfi1_quiet_serdes(ppd); 1116 1117 if (ppd->hfi1_wq) { 1118 destroy_workqueue(ppd->hfi1_wq); 1119 ppd->hfi1_wq = NULL; 1120 } 1121 if (ppd->link_wq) { 1122 destroy_workqueue(ppd->link_wq); 1123 ppd->link_wq = NULL; 1124 } 1125 } 1126 sdma_exit(dd); 1127 } 1128 1129 /** 1130 * hfi1_free_ctxtdata - free a context's allocated data 1131 * @dd: the hfi1_ib device 1132 * @rcd: the ctxtdata structure 1133 * 1134 * free up any allocated data for a context 1135 * It should never change any chip state, or global driver state. 1136 */ 1137 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1138 { 1139 u32 e; 1140 1141 if (!rcd) 1142 return; 1143 1144 if (rcd->rcvhdrq) { 1145 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 1146 rcd->rcvhdrq, rcd->rcvhdrq_dma); 1147 rcd->rcvhdrq = NULL; 1148 if (rcd->rcvhdrtail_kvaddr) { 1149 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1150 (void *)rcd->rcvhdrtail_kvaddr, 1151 rcd->rcvhdrqtailaddr_dma); 1152 rcd->rcvhdrtail_kvaddr = NULL; 1153 } 1154 } 1155 1156 /* all the RcvArray entries should have been cleared by now */ 1157 kfree(rcd->egrbufs.rcvtids); 1158 rcd->egrbufs.rcvtids = NULL; 1159 1160 for (e = 0; e < rcd->egrbufs.alloced; e++) { 1161 if (rcd->egrbufs.buffers[e].dma) 1162 dma_free_coherent(&dd->pcidev->dev, 1163 rcd->egrbufs.buffers[e].len, 1164 rcd->egrbufs.buffers[e].addr, 1165 rcd->egrbufs.buffers[e].dma); 1166 } 1167 kfree(rcd->egrbufs.buffers); 1168 rcd->egrbufs.alloced = 0; 1169 rcd->egrbufs.buffers = NULL; 1170 1171 sc_free(rcd->sc); 1172 rcd->sc = NULL; 1173 1174 vfree(rcd->subctxt_uregbase); 1175 vfree(rcd->subctxt_rcvegrbuf); 1176 vfree(rcd->subctxt_rcvhdr_base); 1177 kfree(rcd->opstats); 1178 1179 rcd->subctxt_uregbase = NULL; 1180 rcd->subctxt_rcvegrbuf = NULL; 1181 rcd->subctxt_rcvhdr_base = NULL; 1182 rcd->opstats = NULL; 1183 } 1184 1185 /* 1186 * Release our hold on the shared asic data. If we are the last one, 1187 * return the structure to be finalized outside the lock. Must be 1188 * holding hfi1_devs_lock. 1189 */ 1190 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd) 1191 { 1192 struct hfi1_asic_data *ad; 1193 int other; 1194 1195 if (!dd->asic_data) 1196 return NULL; 1197 dd->asic_data->dds[dd->hfi1_id] = NULL; 1198 other = dd->hfi1_id ? 0 : 1; 1199 ad = dd->asic_data; 1200 dd->asic_data = NULL; 1201 /* return NULL if the other dd still has a link */ 1202 return ad->dds[other] ? NULL : ad; 1203 } 1204 1205 static void finalize_asic_data(struct hfi1_devdata *dd, 1206 struct hfi1_asic_data *ad) 1207 { 1208 clean_up_i2c(dd, ad); 1209 kfree(ad); 1210 } 1211 1212 /** 1213 * hfi1_clean_devdata - cleans up per-unit data structure 1214 * @dd: pointer to a valid devdata structure 1215 * 1216 * It cleans up all data structures set up by 1217 * by hfi1_alloc_devdata(). 1218 */ 1219 static void hfi1_clean_devdata(struct hfi1_devdata *dd) 1220 { 1221 struct hfi1_asic_data *ad; 1222 unsigned long flags; 1223 1224 spin_lock_irqsave(&hfi1_devs_lock, flags); 1225 if (!list_empty(&dd->list)) { 1226 idr_remove(&hfi1_unit_table, dd->unit); 1227 list_del_init(&dd->list); 1228 } 1229 ad = release_asic_data(dd); 1230 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1231 1232 finalize_asic_data(dd, ad); 1233 free_platform_config(dd); 1234 rcu_barrier(); /* wait for rcu callbacks to complete */ 1235 free_percpu(dd->int_counter); 1236 free_percpu(dd->rcv_limit); 1237 free_percpu(dd->send_schedule); 1238 free_percpu(dd->tx_opstats); 1239 dd->int_counter = NULL; 1240 dd->rcv_limit = NULL; 1241 dd->send_schedule = NULL; 1242 dd->tx_opstats = NULL; 1243 sdma_clean(dd, dd->num_sdma); 1244 rvt_dealloc_device(&dd->verbs_dev.rdi); 1245 } 1246 1247 static void __hfi1_free_devdata(struct kobject *kobj) 1248 { 1249 struct hfi1_devdata *dd = 1250 container_of(kobj, struct hfi1_devdata, kobj); 1251 1252 hfi1_clean_devdata(dd); 1253 } 1254 1255 static struct kobj_type hfi1_devdata_type = { 1256 .release = __hfi1_free_devdata, 1257 }; 1258 1259 void hfi1_free_devdata(struct hfi1_devdata *dd) 1260 { 1261 kobject_put(&dd->kobj); 1262 } 1263 1264 /* 1265 * Allocate our primary per-unit data structure. Must be done via verbs 1266 * allocator, because the verbs cleanup process both does cleanup and 1267 * free of the data structure. 1268 * "extra" is for chip-specific data. 1269 * 1270 * Use the idr mechanism to get a unit number for this unit. 1271 */ 1272 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) 1273 { 1274 unsigned long flags; 1275 struct hfi1_devdata *dd; 1276 int ret, nports; 1277 1278 /* extra is * number of ports */ 1279 nports = extra / sizeof(struct hfi1_pportdata); 1280 1281 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1282 nports); 1283 if (!dd) 1284 return ERR_PTR(-ENOMEM); 1285 dd->num_pports = nports; 1286 dd->pport = (struct hfi1_pportdata *)(dd + 1); 1287 dd->pcidev = pdev; 1288 pci_set_drvdata(pdev, dd); 1289 1290 INIT_LIST_HEAD(&dd->list); 1291 idr_preload(GFP_KERNEL); 1292 spin_lock_irqsave(&hfi1_devs_lock, flags); 1293 1294 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT); 1295 if (ret >= 0) { 1296 dd->unit = ret; 1297 list_add(&dd->list, &hfi1_dev_list); 1298 } 1299 1300 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 1301 idr_preload_end(); 1302 1303 if (ret < 0) { 1304 hfi1_early_err(&pdev->dev, 1305 "Could not allocate unit ID: error %d\n", -ret); 1306 goto bail; 1307 } 1308 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); 1309 1310 /* 1311 * Initialize all locks for the device. This needs to be as early as 1312 * possible so locks are usable. 1313 */ 1314 spin_lock_init(&dd->sc_lock); 1315 spin_lock_init(&dd->sendctrl_lock); 1316 spin_lock_init(&dd->rcvctrl_lock); 1317 spin_lock_init(&dd->uctxt_lock); 1318 spin_lock_init(&dd->hfi1_diag_trans_lock); 1319 spin_lock_init(&dd->sc_init_lock); 1320 spin_lock_init(&dd->dc8051_memlock); 1321 seqlock_init(&dd->sc2vl_lock); 1322 spin_lock_init(&dd->sde_map_lock); 1323 spin_lock_init(&dd->pio_map_lock); 1324 mutex_init(&dd->dc8051_lock); 1325 init_waitqueue_head(&dd->event_queue); 1326 1327 dd->int_counter = alloc_percpu(u64); 1328 if (!dd->int_counter) { 1329 ret = -ENOMEM; 1330 goto bail; 1331 } 1332 1333 dd->rcv_limit = alloc_percpu(u64); 1334 if (!dd->rcv_limit) { 1335 ret = -ENOMEM; 1336 goto bail; 1337 } 1338 1339 dd->send_schedule = alloc_percpu(u64); 1340 if (!dd->send_schedule) { 1341 ret = -ENOMEM; 1342 goto bail; 1343 } 1344 1345 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx); 1346 if (!dd->tx_opstats) { 1347 ret = -ENOMEM; 1348 goto bail; 1349 } 1350 1351 kobject_init(&dd->kobj, &hfi1_devdata_type); 1352 return dd; 1353 1354 bail: 1355 hfi1_clean_devdata(dd); 1356 return ERR_PTR(ret); 1357 } 1358 1359 /* 1360 * Called from freeze mode handlers, and from PCI error 1361 * reporting code. Should be paranoid about state of 1362 * system and data structures. 1363 */ 1364 void hfi1_disable_after_error(struct hfi1_devdata *dd) 1365 { 1366 if (dd->flags & HFI1_INITTED) { 1367 u32 pidx; 1368 1369 dd->flags &= ~HFI1_INITTED; 1370 if (dd->pport) 1371 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1372 struct hfi1_pportdata *ppd; 1373 1374 ppd = dd->pport + pidx; 1375 if (dd->flags & HFI1_PRESENT) 1376 set_link_state(ppd, HLS_DN_DISABLE); 1377 1378 if (ppd->statusp) 1379 *ppd->statusp &= ~HFI1_STATUS_IB_READY; 1380 } 1381 } 1382 1383 /* 1384 * Mark as having had an error for driver, and also 1385 * for /sys and status word mapped to user programs. 1386 * This marks unit as not usable, until reset. 1387 */ 1388 if (dd->status) 1389 dd->status->dev |= HFI1_STATUS_HWERROR; 1390 } 1391 1392 static void remove_one(struct pci_dev *); 1393 static int init_one(struct pci_dev *, const struct pci_device_id *); 1394 1395 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " 1396 #define PFX DRIVER_NAME ": " 1397 1398 const struct pci_device_id hfi1_pci_tbl[] = { 1399 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, 1400 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, 1401 { 0, } 1402 }; 1403 1404 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl); 1405 1406 static struct pci_driver hfi1_pci_driver = { 1407 .name = DRIVER_NAME, 1408 .probe = init_one, 1409 .remove = remove_one, 1410 .id_table = hfi1_pci_tbl, 1411 .err_handler = &hfi1_pci_err_handler, 1412 }; 1413 1414 static void __init compute_krcvqs(void) 1415 { 1416 int i; 1417 1418 for (i = 0; i < krcvqsset; i++) 1419 n_krcvqs += krcvqs[i]; 1420 } 1421 1422 /* 1423 * Do all the generic driver unit- and chip-independent memory 1424 * allocation and initialization. 1425 */ 1426 static int __init hfi1_mod_init(void) 1427 { 1428 int ret; 1429 1430 ret = dev_init(); 1431 if (ret) 1432 goto bail; 1433 1434 ret = node_affinity_init(); 1435 if (ret) 1436 goto bail; 1437 1438 /* validate max MTU before any devices start */ 1439 if (!valid_opa_max_mtu(hfi1_max_mtu)) { 1440 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n", 1441 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU); 1442 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 1443 } 1444 /* valid CUs run from 1-128 in powers of 2 */ 1445 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu)) 1446 hfi1_cu = 1; 1447 /* valid credit return threshold is 0-100, variable is unsigned */ 1448 if (user_credit_return_threshold > 100) 1449 user_credit_return_threshold = 100; 1450 1451 compute_krcvqs(); 1452 /* 1453 * sanitize receive interrupt count, time must wait until after 1454 * the hardware type is known 1455 */ 1456 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) 1457 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; 1458 /* reject invalid combinations */ 1459 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) { 1460 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n"); 1461 rcv_intr_count = 1; 1462 } 1463 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) { 1464 /* 1465 * Avoid indefinite packet delivery by requiring a timeout 1466 * if count is > 1. 1467 */ 1468 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n"); 1469 rcv_intr_timeout = 1; 1470 } 1471 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) { 1472 /* 1473 * The dynamic algorithm expects a non-zero timeout 1474 * and a count > 1. 1475 */ 1476 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n"); 1477 rcv_intr_dynamic = 0; 1478 } 1479 1480 /* sanitize link CRC options */ 1481 link_crc_mask &= SUPPORTED_CRCS; 1482 1483 /* 1484 * These must be called before the driver is registered with 1485 * the PCI subsystem. 1486 */ 1487 idr_init(&hfi1_unit_table); 1488 1489 hfi1_dbg_init(); 1490 ret = hfi1_wss_init(); 1491 if (ret < 0) 1492 goto bail_wss; 1493 ret = pci_register_driver(&hfi1_pci_driver); 1494 if (ret < 0) { 1495 pr_err("Unable to register driver: error %d\n", -ret); 1496 goto bail_dev; 1497 } 1498 goto bail; /* all OK */ 1499 1500 bail_dev: 1501 hfi1_wss_exit(); 1502 bail_wss: 1503 hfi1_dbg_exit(); 1504 idr_destroy(&hfi1_unit_table); 1505 dev_cleanup(); 1506 bail: 1507 return ret; 1508 } 1509 1510 module_init(hfi1_mod_init); 1511 1512 /* 1513 * Do the non-unit driver cleanup, memory free, etc. at unload. 1514 */ 1515 static void __exit hfi1_mod_cleanup(void) 1516 { 1517 pci_unregister_driver(&hfi1_pci_driver); 1518 node_affinity_destroy(); 1519 hfi1_wss_exit(); 1520 hfi1_dbg_exit(); 1521 1522 idr_destroy(&hfi1_unit_table); 1523 dispose_firmware(); /* asymmetric with obtain_firmware() */ 1524 dev_cleanup(); 1525 } 1526 1527 module_exit(hfi1_mod_cleanup); 1528 1529 /* this can only be called after a successful initialization */ 1530 static void cleanup_device_data(struct hfi1_devdata *dd) 1531 { 1532 int ctxt; 1533 int pidx; 1534 1535 /* users can't do anything more with chip */ 1536 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1537 struct hfi1_pportdata *ppd = &dd->pport[pidx]; 1538 struct cc_state *cc_state; 1539 int i; 1540 1541 if (ppd->statusp) 1542 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT; 1543 1544 for (i = 0; i < OPA_MAX_SLS; i++) 1545 hrtimer_cancel(&ppd->cca_timer[i].hrtimer); 1546 1547 spin_lock(&ppd->cc_state_lock); 1548 cc_state = get_cc_state_protected(ppd); 1549 RCU_INIT_POINTER(ppd->cc_state, NULL); 1550 spin_unlock(&ppd->cc_state_lock); 1551 1552 if (cc_state) 1553 kfree_rcu(cc_state, rcu); 1554 } 1555 1556 free_credit_return(dd); 1557 1558 if (dd->rcvhdrtail_dummy_kvaddr) { 1559 dma_free_coherent(&dd->pcidev->dev, sizeof(u64), 1560 (void *)dd->rcvhdrtail_dummy_kvaddr, 1561 dd->rcvhdrtail_dummy_dma); 1562 dd->rcvhdrtail_dummy_kvaddr = NULL; 1563 } 1564 1565 /* 1566 * Free any resources still in use (usually just kernel contexts) 1567 * at unload; we do for ctxtcnt, because that's what we allocate. 1568 */ 1569 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { 1570 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; 1571 1572 if (rcd) { 1573 hfi1_clear_tids(rcd); 1574 hfi1_free_ctxt(rcd); 1575 } 1576 } 1577 1578 kfree(dd->rcd); 1579 dd->rcd = NULL; 1580 1581 free_pio_map(dd); 1582 /* must follow rcv context free - need to remove rcv's hooks */ 1583 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) 1584 sc_free(dd->send_contexts[ctxt].sc); 1585 dd->num_send_contexts = 0; 1586 kfree(dd->send_contexts); 1587 dd->send_contexts = NULL; 1588 kfree(dd->hw_to_sw); 1589 dd->hw_to_sw = NULL; 1590 kfree(dd->boardname); 1591 vfree(dd->events); 1592 vfree(dd->status); 1593 } 1594 1595 /* 1596 * Clean up on unit shutdown, or error during unit load after 1597 * successful initialization. 1598 */ 1599 static void postinit_cleanup(struct hfi1_devdata *dd) 1600 { 1601 hfi1_start_cleanup(dd); 1602 1603 hfi1_pcie_ddcleanup(dd); 1604 hfi1_pcie_cleanup(dd->pcidev); 1605 1606 cleanup_device_data(dd); 1607 1608 hfi1_free_devdata(dd); 1609 } 1610 1611 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) 1612 { 1613 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1614 hfi1_early_err(dev, "Receive header queue count too small\n"); 1615 return -EINVAL; 1616 } 1617 1618 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 1619 hfi1_early_err(dev, 1620 "Receive header queue count cannot be greater than %u\n", 1621 HFI1_MAX_HDRQ_EGRBUF_CNT); 1622 return -EINVAL; 1623 } 1624 1625 if (thecnt % HDRQ_INCREMENT) { 1626 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", 1627 thecnt, HDRQ_INCREMENT); 1628 return -EINVAL; 1629 } 1630 1631 return 0; 1632 } 1633 1634 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1635 { 1636 int ret = 0, j, pidx, initfail; 1637 struct hfi1_devdata *dd; 1638 struct hfi1_pportdata *ppd; 1639 1640 /* First, lock the non-writable module parameters */ 1641 HFI1_CAP_LOCK(); 1642 1643 /* Validate dev ids */ 1644 if (!(ent->device == PCI_DEVICE_ID_INTEL0 || 1645 ent->device == PCI_DEVICE_ID_INTEL1)) { 1646 hfi1_early_err(&pdev->dev, 1647 "Failing on unknown Intel deviceid 0x%x\n", 1648 ent->device); 1649 ret = -ENODEV; 1650 goto bail; 1651 } 1652 1653 /* Validate some global module parameters */ 1654 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); 1655 if (ret) 1656 goto bail; 1657 1658 /* use the encoding function as a sanitization check */ 1659 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1660 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1661 hfi1_hdrq_entsize); 1662 ret = -EINVAL; 1663 goto bail; 1664 } 1665 1666 /* The receive eager buffer size must be set before the receive 1667 * contexts are created. 1668 * 1669 * Set the eager buffer size. Validate that it falls in a range 1670 * allowed by the hardware - all powers of 2 between the min and 1671 * max. The maximum valid MTU is within the eager buffer range 1672 * so we do not need to cap the max_mtu by an eager buffer size 1673 * setting. 1674 */ 1675 if (eager_buffer_size) { 1676 if (!is_power_of_2(eager_buffer_size)) 1677 eager_buffer_size = 1678 roundup_pow_of_two(eager_buffer_size); 1679 eager_buffer_size = 1680 clamp_val(eager_buffer_size, 1681 MIN_EAGER_BUFFER * 8, 1682 MAX_EAGER_BUFFER_TOTAL); 1683 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", 1684 eager_buffer_size); 1685 } else { 1686 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); 1687 ret = -EINVAL; 1688 goto bail; 1689 } 1690 1691 /* restrict value of hfi1_rcvarr_split */ 1692 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); 1693 1694 ret = hfi1_pcie_init(pdev, ent); 1695 if (ret) 1696 goto bail; 1697 1698 /* 1699 * Do device-specific initialization, function table setup, dd 1700 * allocation, etc. 1701 */ 1702 dd = hfi1_init_dd(pdev, ent); 1703 1704 if (IS_ERR(dd)) { 1705 ret = PTR_ERR(dd); 1706 goto clean_bail; /* error already printed */ 1707 } 1708 1709 ret = create_workqueues(dd); 1710 if (ret) 1711 goto clean_bail; 1712 1713 /* do the generic initialization */ 1714 initfail = hfi1_init(dd, 0); 1715 1716 /* setup vnic */ 1717 hfi1_vnic_setup(dd); 1718 1719 ret = hfi1_register_ib_device(dd); 1720 1721 /* 1722 * Now ready for use. this should be cleared whenever we 1723 * detect a reset, or initiate one. If earlier failure, 1724 * we still create devices, so diags, etc. can be used 1725 * to determine cause of problem. 1726 */ 1727 if (!initfail && !ret) { 1728 dd->flags |= HFI1_INITTED; 1729 /* create debufs files after init and ib register */ 1730 hfi1_dbg_ibdev_init(&dd->verbs_dev); 1731 } 1732 1733 j = hfi1_device_create(dd); 1734 if (j) 1735 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1736 1737 if (initfail || ret) { 1738 hfi1_clean_up_interrupts(dd); 1739 stop_timers(dd); 1740 flush_workqueue(ib_wq); 1741 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1742 hfi1_quiet_serdes(dd->pport + pidx); 1743 ppd = dd->pport + pidx; 1744 if (ppd->hfi1_wq) { 1745 destroy_workqueue(ppd->hfi1_wq); 1746 ppd->hfi1_wq = NULL; 1747 } 1748 if (ppd->link_wq) { 1749 destroy_workqueue(ppd->link_wq); 1750 ppd->link_wq = NULL; 1751 } 1752 } 1753 if (!j) 1754 hfi1_device_remove(dd); 1755 if (!ret) 1756 hfi1_unregister_ib_device(dd); 1757 hfi1_vnic_cleanup(dd); 1758 postinit_cleanup(dd); 1759 if (initfail) 1760 ret = initfail; 1761 goto bail; /* everything already cleaned */ 1762 } 1763 1764 sdma_start(dd); 1765 1766 return 0; 1767 1768 clean_bail: 1769 hfi1_pcie_cleanup(pdev); 1770 bail: 1771 return ret; 1772 } 1773 1774 static void wait_for_clients(struct hfi1_devdata *dd) 1775 { 1776 /* 1777 * Remove the device init value and complete the device if there is 1778 * no clients or wait for active clients to finish. 1779 */ 1780 if (atomic_dec_and_test(&dd->user_refcount)) 1781 complete(&dd->user_comp); 1782 1783 wait_for_completion(&dd->user_comp); 1784 } 1785 1786 static void remove_one(struct pci_dev *pdev) 1787 { 1788 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1789 1790 /* close debugfs files before ib unregister */ 1791 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1792 1793 /* remove the /dev hfi1 interface */ 1794 hfi1_device_remove(dd); 1795 1796 /* wait for existing user space clients to finish */ 1797 wait_for_clients(dd); 1798 1799 /* unregister from IB core */ 1800 hfi1_unregister_ib_device(dd); 1801 1802 /* cleanup vnic */ 1803 hfi1_vnic_cleanup(dd); 1804 1805 /* 1806 * Disable the IB link, disable interrupts on the device, 1807 * clear dma engines, etc. 1808 */ 1809 shutdown_device(dd); 1810 1811 stop_timers(dd); 1812 1813 /* wait until all of our (qsfp) queue_work() calls complete */ 1814 flush_workqueue(ib_wq); 1815 1816 postinit_cleanup(dd); 1817 } 1818 1819 /** 1820 * hfi1_create_rcvhdrq - create a receive header queue 1821 * @dd: the hfi1_ib device 1822 * @rcd: the context data 1823 * 1824 * This must be contiguous memory (from an i/o perspective), and must be 1825 * DMA'able (which means for some systems, it will go through an IOMMU, 1826 * or be forced into a low address range). 1827 */ 1828 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 1829 { 1830 unsigned amt; 1831 u64 reg; 1832 1833 if (!rcd->rcvhdrq) { 1834 dma_addr_t dma_hdrqtail; 1835 gfp_t gfp_flags; 1836 1837 /* 1838 * rcvhdrqentsize is in DWs, so we have to convert to bytes 1839 * (* sizeof(u32)). 1840 */ 1841 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * 1842 sizeof(u32)); 1843 1844 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 1845 gfp_flags = GFP_KERNEL; 1846 else 1847 gfp_flags = GFP_USER; 1848 rcd->rcvhdrq = dma_zalloc_coherent( 1849 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1850 gfp_flags | __GFP_COMP); 1851 1852 if (!rcd->rcvhdrq) { 1853 dd_dev_err(dd, 1854 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1855 amt, rcd->ctxt); 1856 goto bail; 1857 } 1858 1859 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { 1860 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1861 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, 1862 gfp_flags); 1863 if (!rcd->rcvhdrtail_kvaddr) 1864 goto bail_free; 1865 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; 1866 } 1867 1868 rcd->rcvhdrq_size = amt; 1869 } 1870 /* 1871 * These values are per-context: 1872 * RcvHdrCnt 1873 * RcvHdrEntSize 1874 * RcvHdrSize 1875 */ 1876 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT) 1877 & RCV_HDR_CNT_CNT_MASK) 1878 << RCV_HDR_CNT_CNT_SHIFT; 1879 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); 1880 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize) 1881 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) 1882 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 1883 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); 1884 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK) 1885 << RCV_HDR_SIZE_HDR_SIZE_SHIFT; 1886 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); 1887 1888 /* 1889 * Program dummy tail address for every receive context 1890 * before enabling any receive context 1891 */ 1892 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, 1893 dd->rcvhdrtail_dummy_dma); 1894 1895 return 0; 1896 1897 bail_free: 1898 dd_dev_err(dd, 1899 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1900 rcd->ctxt); 1901 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1902 rcd->rcvhdrq_dma); 1903 rcd->rcvhdrq = NULL; 1904 bail: 1905 return -ENOMEM; 1906 } 1907 1908 /** 1909 * allocate eager buffers, both kernel and user contexts. 1910 * @rcd: the context we are setting up. 1911 * 1912 * Allocate the eager TID buffers and program them into hip. 1913 * They are no longer completely contiguous, we do multiple allocation 1914 * calls. Otherwise we get the OOM code involved, by asking for too 1915 * much per call, with disastrous results on some kernels. 1916 */ 1917 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) 1918 { 1919 struct hfi1_devdata *dd = rcd->dd; 1920 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; 1921 gfp_t gfp_flags; 1922 u16 order; 1923 int ret = 0; 1924 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu); 1925 1926 /* 1927 * GFP_USER, but without GFP_FS, so buffer cache can be 1928 * coalesced (we hope); otherwise, even at order 4, 1929 * heavy filesystem activity makes these fail, and we can 1930 * use compound pages. 1931 */ 1932 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1933 1934 /* 1935 * The minimum size of the eager buffers is a groups of MTU-sized 1936 * buffers. 1937 * The global eager_buffer_size parameter is checked against the 1938 * theoretical lower limit of the value. Here, we check against the 1939 * MTU. 1940 */ 1941 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size)) 1942 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size; 1943 /* 1944 * If using one-pkt-per-egr-buffer, lower the eager buffer 1945 * size to the max MTU (page-aligned). 1946 */ 1947 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) 1948 rcd->egrbufs.rcvtid_size = round_mtu; 1949 1950 /* 1951 * Eager buffers sizes of 1MB or less require smaller TID sizes 1952 * to satisfy the "multiple of 8 RcvArray entries" requirement. 1953 */ 1954 if (rcd->egrbufs.size <= (1 << 20)) 1955 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu, 1956 rounddown_pow_of_two(rcd->egrbufs.size / 8)); 1957 1958 while (alloced_bytes < rcd->egrbufs.size && 1959 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1960 rcd->egrbufs.buffers[idx].addr = 1961 dma_zalloc_coherent(&dd->pcidev->dev, 1962 rcd->egrbufs.rcvtid_size, 1963 &rcd->egrbufs.buffers[idx].dma, 1964 gfp_flags); 1965 if (rcd->egrbufs.buffers[idx].addr) { 1966 rcd->egrbufs.buffers[idx].len = 1967 rcd->egrbufs.rcvtid_size; 1968 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr = 1969 rcd->egrbufs.buffers[idx].addr; 1970 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma = 1971 rcd->egrbufs.buffers[idx].dma; 1972 rcd->egrbufs.alloced++; 1973 alloced_bytes += rcd->egrbufs.rcvtid_size; 1974 idx++; 1975 } else { 1976 u32 new_size, i, j; 1977 u64 offset = 0; 1978 1979 /* 1980 * Fail the eager buffer allocation if: 1981 * - we are already using the lowest acceptable size 1982 * - we are using one-pkt-per-egr-buffer (this implies 1983 * that we are accepting only one size) 1984 */ 1985 if (rcd->egrbufs.rcvtid_size == round_mtu || 1986 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { 1987 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", 1988 rcd->ctxt); 1989 ret = -ENOMEM; 1990 goto bail_rcvegrbuf_phys; 1991 } 1992 1993 new_size = rcd->egrbufs.rcvtid_size / 2; 1994 1995 /* 1996 * If the first attempt to allocate memory failed, don't 1997 * fail everything but continue with the next lower 1998 * size. 1999 */ 2000 if (idx == 0) { 2001 rcd->egrbufs.rcvtid_size = new_size; 2002 continue; 2003 } 2004 2005 /* 2006 * Re-partition already allocated buffers to a smaller 2007 * size. 2008 */ 2009 rcd->egrbufs.alloced = 0; 2010 for (i = 0, j = 0, offset = 0; j < idx; i++) { 2011 if (i >= rcd->egrbufs.count) 2012 break; 2013 rcd->egrbufs.rcvtids[i].dma = 2014 rcd->egrbufs.buffers[j].dma + offset; 2015 rcd->egrbufs.rcvtids[i].addr = 2016 rcd->egrbufs.buffers[j].addr + offset; 2017 rcd->egrbufs.alloced++; 2018 if ((rcd->egrbufs.buffers[j].dma + offset + 2019 new_size) == 2020 (rcd->egrbufs.buffers[j].dma + 2021 rcd->egrbufs.buffers[j].len)) { 2022 j++; 2023 offset = 0; 2024 } else { 2025 offset += new_size; 2026 } 2027 } 2028 rcd->egrbufs.rcvtid_size = new_size; 2029 } 2030 } 2031 rcd->egrbufs.numbufs = idx; 2032 rcd->egrbufs.size = alloced_bytes; 2033 2034 hfi1_cdbg(PROC, 2035 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n", 2036 rcd->ctxt, rcd->egrbufs.alloced, 2037 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024); 2038 2039 /* 2040 * Set the contexts rcv array head update threshold to the closest 2041 * power of 2 (so we can use a mask instead of modulo) below half 2042 * the allocated entries. 2043 */ 2044 rcd->egrbufs.threshold = 2045 rounddown_pow_of_two(rcd->egrbufs.alloced / 2); 2046 /* 2047 * Compute the expected RcvArray entry base. This is done after 2048 * allocating the eager buffers in order to maximize the 2049 * expected RcvArray entries for the context. 2050 */ 2051 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; 2052 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size); 2053 rcd->expected_count = max_entries - egrtop; 2054 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2) 2055 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2; 2056 2057 rcd->expected_base = rcd->eager_base + egrtop; 2058 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n", 2059 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, 2060 rcd->eager_base, rcd->expected_base); 2061 2062 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) { 2063 hfi1_cdbg(PROC, 2064 "ctxt%u: current Eager buffer size is invalid %u\n", 2065 rcd->ctxt, rcd->egrbufs.rcvtid_size); 2066 ret = -EINVAL; 2067 goto bail_rcvegrbuf_phys; 2068 } 2069 2070 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { 2071 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, 2072 rcd->egrbufs.rcvtids[idx].dma, order); 2073 cond_resched(); 2074 } 2075 2076 return 0; 2077 2078 bail_rcvegrbuf_phys: 2079 for (idx = 0; idx < rcd->egrbufs.alloced && 2080 rcd->egrbufs.buffers[idx].addr; 2081 idx++) { 2082 dma_free_coherent(&dd->pcidev->dev, 2083 rcd->egrbufs.buffers[idx].len, 2084 rcd->egrbufs.buffers[idx].addr, 2085 rcd->egrbufs.buffers[idx].dma); 2086 rcd->egrbufs.buffers[idx].addr = NULL; 2087 rcd->egrbufs.buffers[idx].dma = 0; 2088 rcd->egrbufs.buffers[idx].len = 0; 2089 } 2090 2091 return ret; 2092 } 2093