1 /* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 #include <linux/module.h> 40 #include <linux/printk.h> 41 #ifdef CONFIG_INFINIBAND_QIB_DCA 42 #include <linux/dca.h> 43 #endif 44 #include <rdma/rdma_vt.h> 45 46 #include "qib.h" 47 #include "qib_common.h" 48 #include "qib_mad.h" 49 #ifdef CONFIG_DEBUG_FS 50 #include "qib_debugfs.h" 51 #include "qib_verbs.h" 52 #endif 53 54 #undef pr_fmt 55 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt 56 57 /* 58 * min buffers we want to have per context, after driver 59 */ 60 #define QIB_MIN_USER_CTXT_BUFCNT 7 61 62 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF 63 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 64 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) 65 66 /* 67 * Number of ctxts we are configured to use (to allow for more pio 68 * buffers per ctxt, etc.) Zero means use chip value. 69 */ 70 ushort qib_cfgctxts; 71 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); 72 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); 73 74 unsigned qib_numa_aware; 75 module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO); 76 MODULE_PARM_DESC(numa_aware, 77 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process"); 78 79 /* 80 * If set, do not write to any regs if avoidable, hack to allow 81 * check for deranged default register values. 82 */ 83 ushort qib_mini_init; 84 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); 85 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); 86 87 unsigned qib_n_krcv_queues; 88 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); 89 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); 90 91 unsigned qib_cc_table_size; 92 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); 93 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); 94 95 static void verify_interrupt(struct timer_list *); 96 97 DEFINE_XARRAY_FLAGS(qib_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 98 u32 qib_cpulist_count; 99 unsigned long *qib_cpulist; 100 101 /* set number of contexts we'll actually use */ 102 void qib_set_ctxtcnt(struct qib_devdata *dd) 103 { 104 if (!qib_cfgctxts) { 105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 106 if (dd->cfgctxts > dd->ctxtcnt) 107 dd->cfgctxts = dd->ctxtcnt; 108 } else if (qib_cfgctxts < dd->num_pports) 109 dd->cfgctxts = dd->ctxtcnt; 110 else if (qib_cfgctxts <= dd->ctxtcnt) 111 dd->cfgctxts = qib_cfgctxts; 112 else 113 dd->cfgctxts = dd->ctxtcnt; 114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : 115 dd->cfgctxts - dd->first_user_ctxt; 116 } 117 118 /* 119 * Common code for creating the receive context array. 120 */ 121 int qib_create_ctxts(struct qib_devdata *dd) 122 { 123 unsigned i; 124 int local_node_id = pcibus_to_node(dd->pcidev->bus); 125 126 if (local_node_id < 0) 127 local_node_id = numa_node_id(); 128 dd->assigned_node_id = local_node_id; 129 130 /* 131 * Allocate full ctxtcnt array, rather than just cfgctxts, because 132 * cleanup iterates across all possible ctxts. 133 */ 134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); 135 if (!dd->rcd) 136 return -ENOMEM; 137 138 /* create (one or more) kctxt */ 139 for (i = 0; i < dd->first_user_ctxt; ++i) { 140 struct qib_pportdata *ppd; 141 struct qib_ctxtdata *rcd; 142 143 if (dd->skip_kctxt_mask & (1 << i)) 144 continue; 145 146 ppd = dd->pport + (i % dd->num_pports); 147 148 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); 149 if (!rcd) { 150 qib_dev_err(dd, 151 "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); 152 kfree(dd->rcd); 153 dd->rcd = NULL; 154 return -ENOMEM; 155 } 156 rcd->pkeys[0] = QIB_DEFAULT_P_KEY; 157 rcd->seq_cnt = 1; 158 } 159 return 0; 160 } 161 162 /* 163 * Common code for user and kernel context setup. 164 */ 165 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, 166 int node_id) 167 { 168 struct qib_devdata *dd = ppd->dd; 169 struct qib_ctxtdata *rcd; 170 171 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id); 172 if (rcd) { 173 INIT_LIST_HEAD(&rcd->qp_wait_list); 174 rcd->node_id = node_id; 175 rcd->ppd = ppd; 176 rcd->dd = dd; 177 rcd->cnt = 1; 178 rcd->ctxt = ctxt; 179 dd->rcd[ctxt] = rcd; 180 #ifdef CONFIG_DEBUG_FS 181 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ 182 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), 183 GFP_KERNEL, node_id); 184 if (!rcd->opstats) { 185 kfree(rcd); 186 qib_dev_err(dd, 187 "Unable to allocate per ctxt stats buffer\n"); 188 return NULL; 189 } 190 } 191 #endif 192 dd->f_init_ctxt(rcd); 193 194 /* 195 * To avoid wasting a lot of memory, we allocate 32KB chunks 196 * of physically contiguous memory, advance through it until 197 * used up and then allocate more. Of course, we need 198 * memory to store those extra pointers, now. 32KB seems to 199 * be the most that is "safe" under memory pressure 200 * (creating large files and then copying them over 201 * NFS while doing lots of MPI jobs). The OOM killer can 202 * get invoked, even though we say we can sleep and this can 203 * cause significant system problems.... 204 */ 205 rcd->rcvegrbuf_size = 0x8000; 206 rcd->rcvegrbufs_perchunk = 207 rcd->rcvegrbuf_size / dd->rcvegrbufsize; 208 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + 209 rcd->rcvegrbufs_perchunk - 1) / 210 rcd->rcvegrbufs_perchunk; 211 rcd->rcvegrbufs_perchunk_shift = 212 ilog2(rcd->rcvegrbufs_perchunk); 213 } 214 return rcd; 215 } 216 217 /* 218 * Common code for initializing the physical port structure. 219 */ 220 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, 221 u8 hw_pidx, u8 port) 222 { 223 int size; 224 225 ppd->dd = dd; 226 ppd->hw_pidx = hw_pidx; 227 ppd->port = port; /* IB port number, not index */ 228 229 spin_lock_init(&ppd->sdma_lock); 230 spin_lock_init(&ppd->lflags_lock); 231 spin_lock_init(&ppd->cc_shadow_lock); 232 init_waitqueue_head(&ppd->state_wait); 233 234 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0); 235 236 ppd->qib_wq = NULL; 237 ppd->ibport_data.pmastats = 238 alloc_percpu(struct qib_pma_counters); 239 if (!ppd->ibport_data.pmastats) 240 return -ENOMEM; 241 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); 242 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); 243 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); 244 if (!(ppd->ibport_data.rvp.rc_acks) || 245 !(ppd->ibport_data.rvp.rc_qacks) || 246 !(ppd->ibport_data.rvp.rc_delayed_comp)) 247 return -ENOMEM; 248 249 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) 250 goto bail; 251 252 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size, 253 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT); 254 255 ppd->cc_max_table_entries = 256 ppd->cc_supported_table_entries/IB_CCT_ENTRIES; 257 258 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry) 259 * IB_CCT_ENTRIES; 260 ppd->ccti_entries = kzalloc(size, GFP_KERNEL); 261 if (!ppd->ccti_entries) 262 goto bail; 263 264 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry); 265 ppd->congestion_entries = kzalloc(size, GFP_KERNEL); 266 if (!ppd->congestion_entries) 267 goto bail_1; 268 269 size = sizeof(struct cc_table_shadow); 270 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL); 271 if (!ppd->ccti_entries_shadow) 272 goto bail_2; 273 274 size = sizeof(struct ib_cc_congestion_setting_attr); 275 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL); 276 if (!ppd->congestion_entries_shadow) 277 goto bail_3; 278 279 return 0; 280 281 bail_3: 282 kfree(ppd->ccti_entries_shadow); 283 ppd->ccti_entries_shadow = NULL; 284 bail_2: 285 kfree(ppd->congestion_entries); 286 ppd->congestion_entries = NULL; 287 bail_1: 288 kfree(ppd->ccti_entries); 289 ppd->ccti_entries = NULL; 290 bail: 291 /* User is intentionally disabling the congestion control agent */ 292 if (!qib_cc_table_size) 293 return 0; 294 295 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { 296 qib_cc_table_size = 0; 297 qib_dev_err(dd, 298 "Congestion Control table size %d less than minimum %d for port %d\n", 299 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port); 300 } 301 302 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", 303 port); 304 return 0; 305 } 306 307 static int init_pioavailregs(struct qib_devdata *dd) 308 { 309 int ret, pidx; 310 u64 *status_page; 311 312 dd->pioavailregs_dma = dma_alloc_coherent( 313 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, 314 GFP_KERNEL); 315 if (!dd->pioavailregs_dma) { 316 qib_dev_err(dd, 317 "failed to allocate PIOavail reg area in memory\n"); 318 ret = -ENOMEM; 319 goto done; 320 } 321 322 /* 323 * We really want L2 cache aligned, but for current CPUs of 324 * interest, they are the same. 325 */ 326 status_page = (u64 *) 327 ((char *) dd->pioavailregs_dma + 328 ((2 * L1_CACHE_BYTES + 329 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); 330 /* device status comes first, for backwards compatibility */ 331 dd->devstatusp = status_page; 332 *status_page++ = 0; 333 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 334 dd->pport[pidx].statusp = status_page; 335 *status_page++ = 0; 336 } 337 338 /* 339 * Setup buffer to hold freeze and other messages, accessible to 340 * apps, following statusp. This is per-unit, not per port. 341 */ 342 dd->freezemsg = (char *) status_page; 343 *dd->freezemsg = 0; 344 /* length of msg buffer is "whatever is left" */ 345 ret = (char *) status_page - (char *) dd->pioavailregs_dma; 346 dd->freezelen = PAGE_SIZE - ret; 347 348 ret = 0; 349 350 done: 351 return ret; 352 } 353 354 /** 355 * init_shadow_tids - allocate the shadow TID array 356 * @dd: the qlogic_ib device 357 * 358 * allocate the shadow TID array, so we can qib_munlock previous 359 * entries. It may make more sense to move the pageshadow to the 360 * ctxt data structure, so we only allocate memory for ctxts actually 361 * in use, since we at 8k per ctxt, now. 362 * We don't want failures here to prevent use of the driver/chip, 363 * so no return value. 364 */ 365 static void init_shadow_tids(struct qib_devdata *dd) 366 { 367 struct page **pages; 368 dma_addr_t *addrs; 369 370 pages = vzalloc(array_size(sizeof(struct page *), 371 dd->cfgctxts * dd->rcvtidcnt)); 372 if (!pages) 373 goto bail; 374 375 addrs = vzalloc(array_size(sizeof(dma_addr_t), 376 dd->cfgctxts * dd->rcvtidcnt)); 377 if (!addrs) 378 goto bail_free; 379 380 dd->pageshadow = pages; 381 dd->physshadow = addrs; 382 return; 383 384 bail_free: 385 vfree(pages); 386 bail: 387 dd->pageshadow = NULL; 388 } 389 390 /* 391 * Do initialization for device that is only needed on 392 * first detect, not on resets. 393 */ 394 static int loadtime_init(struct qib_devdata *dd) 395 { 396 int ret = 0; 397 398 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & 399 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { 400 qib_dev_err(dd, 401 "Driver only handles version %d, chip swversion is %d (%llx), failing\n", 402 QIB_CHIP_SWVERSION, 403 (int)(dd->revision >> 404 QLOGIC_IB_R_SOFTWARE_SHIFT) & 405 QLOGIC_IB_R_SOFTWARE_MASK, 406 (unsigned long long) dd->revision); 407 ret = -ENOSYS; 408 goto done; 409 } 410 411 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) 412 qib_devinfo(dd->pcidev, "%s", dd->boardversion); 413 414 spin_lock_init(&dd->pioavail_lock); 415 spin_lock_init(&dd->sendctrl_lock); 416 spin_lock_init(&dd->uctxt_lock); 417 spin_lock_init(&dd->qib_diag_trans_lock); 418 spin_lock_init(&dd->eep_st_lock); 419 mutex_init(&dd->eep_lock); 420 421 if (qib_mini_init) 422 goto done; 423 424 ret = init_pioavailregs(dd); 425 init_shadow_tids(dd); 426 427 qib_get_eeprom_info(dd); 428 429 /* setup time (don't start yet) to verify we got interrupt */ 430 timer_setup(&dd->intrchk_timer, verify_interrupt, 0); 431 done: 432 return ret; 433 } 434 435 /** 436 * init_after_reset - re-initialize after a reset 437 * @dd: the qlogic_ib device 438 * 439 * sanity check at least some of the values after reset, and 440 * ensure no receive or transmit (explicitly, in case reset 441 * failed 442 */ 443 static int init_after_reset(struct qib_devdata *dd) 444 { 445 int i; 446 447 /* 448 * Ensure chip does no sends or receives, tail updates, or 449 * pioavail updates while we re-initialize. This is mostly 450 * for the driver data structures, not chip registers. 451 */ 452 for (i = 0; i < dd->num_pports; ++i) { 453 /* 454 * ctxt == -1 means "all contexts". Only really safe for 455 * _dis_abling things, as here. 456 */ 457 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | 458 QIB_RCVCTRL_INTRAVAIL_DIS | 459 QIB_RCVCTRL_TAILUPD_DIS, -1); 460 /* Redundant across ports for some, but no big deal. */ 461 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | 462 QIB_SENDCTRL_AVAIL_DIS); 463 } 464 465 return 0; 466 } 467 468 static void enable_chip(struct qib_devdata *dd) 469 { 470 u64 rcvmask; 471 int i; 472 473 /* 474 * Enable PIO send, and update of PIOavail regs to memory. 475 */ 476 for (i = 0; i < dd->num_pports; ++i) 477 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | 478 QIB_SENDCTRL_AVAIL_ENB); 479 /* 480 * Enable kernel ctxts' receive and receive interrupt. 481 * Other ctxts done as user opens and inits them. 482 */ 483 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; 484 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? 485 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; 486 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 487 struct qib_ctxtdata *rcd = dd->rcd[i]; 488 489 if (rcd) 490 dd->f_rcvctrl(rcd->ppd, rcvmask, i); 491 } 492 } 493 494 static void verify_interrupt(struct timer_list *t) 495 { 496 struct qib_devdata *dd = from_timer(dd, t, intrchk_timer); 497 u64 int_counter; 498 499 if (!dd) 500 return; /* being torn down */ 501 502 /* 503 * If we don't have a lid or any interrupts, let the user know and 504 * don't bother checking again. 505 */ 506 int_counter = qib_int_counter(dd) - dd->z_int_counter; 507 if (int_counter == 0) { 508 if (!dd->f_intr_fallback(dd)) 509 dev_err(&dd->pcidev->dev, 510 "No interrupts detected, not usable.\n"); 511 else /* re-arm the timer to see if fallback works */ 512 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 513 } 514 } 515 516 static void init_piobuf_state(struct qib_devdata *dd) 517 { 518 int i, pidx; 519 u32 uctxts; 520 521 /* 522 * Ensure all buffers are free, and fifos empty. Buffers 523 * are common, so only do once for port 0. 524 * 525 * After enable and qib_chg_pioavailkernel so we can safely 526 * enable pioavail updates and PIOENABLE. After this, packets 527 * are ready and able to go out. 528 */ 529 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); 530 for (pidx = 0; pidx < dd->num_pports; ++pidx) 531 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); 532 533 /* 534 * If not all sendbufs are used, add the one to each of the lower 535 * numbered contexts. pbufsctxt and lastctxt_piobuf are 536 * calculated in chip-specific code because it may cause some 537 * chip-specific adjustments to be made. 538 */ 539 uctxts = dd->cfgctxts - dd->first_user_ctxt; 540 dd->ctxts_extrabuf = dd->pbufsctxt ? 541 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; 542 543 /* 544 * Set up the shadow copies of the piobufavail registers, 545 * which we compare against the chip registers for now, and 546 * the in memory DMA'ed copies of the registers. 547 * By now pioavail updates to memory should have occurred, so 548 * copy them into our working/shadow registers; this is in 549 * case something went wrong with abort, but mostly to get the 550 * initial values of the generation bit correct. 551 */ 552 for (i = 0; i < dd->pioavregs; i++) { 553 __le64 tmp; 554 555 tmp = dd->pioavailregs_dma[i]; 556 /* 557 * Don't need to worry about pioavailkernel here 558 * because we will call qib_chg_pioavailkernel() later 559 * in initialization, to busy out buffers as needed. 560 */ 561 dd->pioavailshadow[i] = le64_to_cpu(tmp); 562 } 563 while (i < ARRAY_SIZE(dd->pioavailshadow)) 564 dd->pioavailshadow[i++] = 0; /* for debugging sanity */ 565 566 /* after pioavailshadow is setup */ 567 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, 568 TXCHK_CHG_TYPE_KERN, NULL); 569 dd->f_initvl15_bufs(dd); 570 } 571 572 /** 573 * qib_create_workqueues - create per port workqueues 574 * @dd: the qlogic_ib device 575 */ 576 static int qib_create_workqueues(struct qib_devdata *dd) 577 { 578 int pidx; 579 struct qib_pportdata *ppd; 580 581 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 582 ppd = dd->pport + pidx; 583 if (!ppd->qib_wq) { 584 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 585 586 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 587 dd->unit, pidx); 588 ppd->qib_wq = alloc_ordered_workqueue(wq_name, 589 WQ_MEM_RECLAIM); 590 if (!ppd->qib_wq) 591 goto wq_error; 592 } 593 } 594 return 0; 595 wq_error: 596 pr_err("create_singlethread_workqueue failed for port %d\n", 597 pidx + 1); 598 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 599 ppd = dd->pport + pidx; 600 if (ppd->qib_wq) { 601 destroy_workqueue(ppd->qib_wq); 602 ppd->qib_wq = NULL; 603 } 604 } 605 return -ENOMEM; 606 } 607 608 static void qib_free_pportdata(struct qib_pportdata *ppd) 609 { 610 free_percpu(ppd->ibport_data.pmastats); 611 free_percpu(ppd->ibport_data.rvp.rc_acks); 612 free_percpu(ppd->ibport_data.rvp.rc_qacks); 613 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); 614 ppd->ibport_data.pmastats = NULL; 615 } 616 617 /** 618 * qib_init - do the actual initialization sequence on the chip 619 * @dd: the qlogic_ib device 620 * @reinit: reinitializing, so don't allocate new memory 621 * 622 * Do the actual initialization sequence on the chip. This is done 623 * both from the init routine called from the PCI infrastructure, and 624 * when we reset the chip, or detect that it was reset internally, 625 * or it's administratively re-enabled. 626 * 627 * Memory allocation here and in called routines is only done in 628 * the first case (reinit == 0). We have to be careful, because even 629 * without memory allocation, we need to re-write all the chip registers 630 * TIDs, etc. after the reset or enable has completed. 631 */ 632 int qib_init(struct qib_devdata *dd, int reinit) 633 { 634 int ret = 0, pidx, lastfail = 0; 635 u32 portok = 0; 636 unsigned i; 637 struct qib_ctxtdata *rcd; 638 struct qib_pportdata *ppd; 639 unsigned long flags; 640 641 /* Set linkstate to unknown, so we can watch for a transition. */ 642 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 643 ppd = dd->pport + pidx; 644 spin_lock_irqsave(&ppd->lflags_lock, flags); 645 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | 646 QIBL_LINKDOWN | QIBL_LINKINIT | 647 QIBL_LINKV); 648 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 649 } 650 651 if (reinit) 652 ret = init_after_reset(dd); 653 else 654 ret = loadtime_init(dd); 655 if (ret) 656 goto done; 657 658 /* Bypass most chip-init, to get to device creation */ 659 if (qib_mini_init) 660 return 0; 661 662 ret = dd->f_late_initreg(dd); 663 if (ret) 664 goto done; 665 666 /* dd->rcd can be NULL if early init failed */ 667 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { 668 /* 669 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing 670 * re-init, the simplest way to handle this is to free 671 * existing, and re-allocate. 672 * Need to re-create rest of ctxt 0 ctxtdata as well. 673 */ 674 rcd = dd->rcd[i]; 675 if (!rcd) 676 continue; 677 678 lastfail = qib_create_rcvhdrq(dd, rcd); 679 if (!lastfail) 680 lastfail = qib_setup_eagerbufs(rcd); 681 if (lastfail) 682 qib_dev_err(dd, 683 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); 684 } 685 686 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 687 int mtu; 688 689 if (lastfail) 690 ret = lastfail; 691 ppd = dd->pport + pidx; 692 mtu = ib_mtu_enum_to_int(qib_ibmtu); 693 if (mtu == -1) { 694 mtu = QIB_DEFAULT_MTU; 695 qib_ibmtu = 0; /* don't leave invalid value */ 696 } 697 /* set max we can ever have for this driver load */ 698 ppd->init_ibmaxlen = min(mtu > 2048 ? 699 dd->piosize4k : dd->piosize2k, 700 dd->rcvegrbufsize + 701 (dd->rcvhdrentsize << 2)); 702 /* 703 * Have to initialize ibmaxlen, but this will normally 704 * change immediately in qib_set_mtu(). 705 */ 706 ppd->ibmaxlen = ppd->init_ibmaxlen; 707 qib_set_mtu(ppd, mtu); 708 709 spin_lock_irqsave(&ppd->lflags_lock, flags); 710 ppd->lflags |= QIBL_IB_LINK_DISABLED; 711 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 712 713 lastfail = dd->f_bringup_serdes(ppd); 714 if (lastfail) { 715 qib_devinfo(dd->pcidev, 716 "Failed to bringup IB port %u\n", ppd->port); 717 lastfail = -ENETDOWN; 718 continue; 719 } 720 721 portok++; 722 } 723 724 if (!portok) { 725 /* none of the ports initialized */ 726 if (!ret && lastfail) 727 ret = lastfail; 728 else if (!ret) 729 ret = -ENETDOWN; 730 /* but continue on, so we can debug cause */ 731 } 732 733 enable_chip(dd); 734 735 init_piobuf_state(dd); 736 737 done: 738 if (!ret) { 739 /* chip is OK for user apps; mark it as initialized */ 740 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 741 ppd = dd->pport + pidx; 742 /* 743 * Set status even if port serdes is not initialized 744 * so that diags will work. 745 */ 746 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | 747 QIB_STATUS_INITTED; 748 if (!ppd->link_speed_enabled) 749 continue; 750 if (dd->flags & QIB_HAS_SEND_DMA) 751 ret = qib_setup_sdma(ppd); 752 timer_setup(&ppd->hol_timer, qib_hol_event, 0); 753 ppd->hol_state = QIB_HOL_UP; 754 } 755 756 /* now we can enable all interrupts from the chip */ 757 dd->f_set_intr_state(dd, 1); 758 759 /* 760 * Setup to verify we get an interrupt, and fallback 761 * to an alternate if necessary and possible. 762 */ 763 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); 764 /* start stats retrieval timer */ 765 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); 766 } 767 768 /* if ret is non-zero, we probably should do some cleanup here... */ 769 return ret; 770 } 771 772 /* 773 * These next two routines are placeholders in case we don't have per-arch 774 * code for controlling write combining. If explicit control of write 775 * combining is not available, performance will probably be awful. 776 */ 777 778 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) 779 { 780 return -EOPNOTSUPP; 781 } 782 783 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) 784 { 785 } 786 787 struct qib_devdata *qib_lookup(int unit) 788 { 789 return xa_load(&qib_dev_table, unit); 790 } 791 792 /* 793 * Stop the timers during unit shutdown, or after an error late 794 * in initialization. 795 */ 796 static void qib_stop_timers(struct qib_devdata *dd) 797 { 798 struct qib_pportdata *ppd; 799 int pidx; 800 801 if (dd->stats_timer.function) 802 del_timer_sync(&dd->stats_timer); 803 if (dd->intrchk_timer.function) 804 del_timer_sync(&dd->intrchk_timer); 805 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 806 ppd = dd->pport + pidx; 807 if (ppd->hol_timer.function) 808 del_timer_sync(&ppd->hol_timer); 809 if (ppd->led_override_timer.function) { 810 del_timer_sync(&ppd->led_override_timer); 811 atomic_set(&ppd->led_override_timer_active, 0); 812 } 813 if (ppd->symerr_clear_timer.function) 814 del_timer_sync(&ppd->symerr_clear_timer); 815 } 816 } 817 818 /** 819 * qib_shutdown_device - shut down a device 820 * @dd: the qlogic_ib device 821 * 822 * This is called to make the device quiet when we are about to 823 * unload the driver, and also when the device is administratively 824 * disabled. It does not free any data structures. 825 * Everything it does has to be setup again by qib_init(dd, 1) 826 */ 827 static void qib_shutdown_device(struct qib_devdata *dd) 828 { 829 struct qib_pportdata *ppd; 830 unsigned pidx; 831 832 if (dd->flags & QIB_SHUTDOWN) 833 return; 834 dd->flags |= QIB_SHUTDOWN; 835 836 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 837 ppd = dd->pport + pidx; 838 839 spin_lock_irq(&ppd->lflags_lock); 840 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | 841 QIBL_LINKARMED | QIBL_LINKACTIVE | 842 QIBL_LINKV); 843 spin_unlock_irq(&ppd->lflags_lock); 844 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); 845 } 846 dd->flags &= ~QIB_INITTED; 847 848 /* mask interrupts, but not errors */ 849 dd->f_set_intr_state(dd, 0); 850 851 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 852 ppd = dd->pport + pidx; 853 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | 854 QIB_RCVCTRL_CTXT_DIS | 855 QIB_RCVCTRL_INTRAVAIL_DIS | 856 QIB_RCVCTRL_PKEY_ENB, -1); 857 /* 858 * Gracefully stop all sends allowing any in progress to 859 * trickle out first. 860 */ 861 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); 862 } 863 864 /* 865 * Enough for anything that's going to trickle out to have actually 866 * done so. 867 */ 868 udelay(20); 869 870 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 871 ppd = dd->pport + pidx; 872 dd->f_setextled(ppd, 0); /* make sure LEDs are off */ 873 874 if (dd->flags & QIB_HAS_SEND_DMA) 875 qib_teardown_sdma(ppd); 876 877 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | 878 QIB_SENDCTRL_SEND_DIS); 879 /* 880 * Clear SerdesEnable. 881 * We can't count on interrupts since we are stopping. 882 */ 883 dd->f_quiet_serdes(ppd); 884 885 if (ppd->qib_wq) { 886 destroy_workqueue(ppd->qib_wq); 887 ppd->qib_wq = NULL; 888 } 889 qib_free_pportdata(ppd); 890 } 891 892 } 893 894 /** 895 * qib_free_ctxtdata - free a context's allocated data 896 * @dd: the qlogic_ib device 897 * @rcd: the ctxtdata structure 898 * 899 * free up any allocated data for a context 900 * This should not touch anything that would affect a simultaneous 901 * re-allocation of context data, because it is called after qib_mutex 902 * is released (and can be called from reinit as well). 903 * It should never change any chip state, or global driver state. 904 */ 905 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 906 { 907 if (!rcd) 908 return; 909 910 if (rcd->rcvhdrq) { 911 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, 912 rcd->rcvhdrq, rcd->rcvhdrq_phys); 913 rcd->rcvhdrq = NULL; 914 if (rcd->rcvhdrtail_kvaddr) { 915 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 916 rcd->rcvhdrtail_kvaddr, 917 rcd->rcvhdrqtailaddr_phys); 918 rcd->rcvhdrtail_kvaddr = NULL; 919 } 920 } 921 if (rcd->rcvegrbuf) { 922 unsigned e; 923 924 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 925 void *base = rcd->rcvegrbuf[e]; 926 size_t size = rcd->rcvegrbuf_size; 927 928 dma_free_coherent(&dd->pcidev->dev, size, 929 base, rcd->rcvegrbuf_phys[e]); 930 } 931 kfree(rcd->rcvegrbuf); 932 rcd->rcvegrbuf = NULL; 933 kfree(rcd->rcvegrbuf_phys); 934 rcd->rcvegrbuf_phys = NULL; 935 rcd->rcvegrbuf_chunks = 0; 936 } 937 938 kfree(rcd->tid_pg_list); 939 vfree(rcd->user_event_mask); 940 vfree(rcd->subctxt_uregbase); 941 vfree(rcd->subctxt_rcvegrbuf); 942 vfree(rcd->subctxt_rcvhdr_base); 943 #ifdef CONFIG_DEBUG_FS 944 kfree(rcd->opstats); 945 rcd->opstats = NULL; 946 #endif 947 kfree(rcd); 948 } 949 950 /* 951 * Perform a PIO buffer bandwidth write test, to verify proper system 952 * configuration. Even when all the setup calls work, occasionally 953 * BIOS or other issues can prevent write combining from working, or 954 * can cause other bandwidth problems to the chip. 955 * 956 * This test simply writes the same buffer over and over again, and 957 * measures close to the peak bandwidth to the chip (not testing 958 * data bandwidth to the wire). On chips that use an address-based 959 * trigger to send packets to the wire, this is easy. On chips that 960 * use a count to trigger, we want to make sure that the packet doesn't 961 * go out on the wire, or trigger flow control checks. 962 */ 963 static void qib_verify_pioperf(struct qib_devdata *dd) 964 { 965 u32 pbnum, cnt, lcnt; 966 u32 __iomem *piobuf; 967 u32 *addr; 968 u64 msecs, emsecs; 969 970 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); 971 if (!piobuf) { 972 qib_devinfo(dd->pcidev, 973 "No PIObufs for checking perf, skipping\n"); 974 return; 975 } 976 977 /* 978 * Enough to give us a reasonable test, less than piobuf size, and 979 * likely multiple of store buffer length. 980 */ 981 cnt = 1024; 982 983 addr = vmalloc(cnt); 984 if (!addr) 985 goto done; 986 987 preempt_disable(); /* we want reasonably accurate elapsed time */ 988 msecs = 1 + jiffies_to_msecs(jiffies); 989 for (lcnt = 0; lcnt < 10000U; lcnt++) { 990 /* wait until we cross msec boundary */ 991 if (jiffies_to_msecs(jiffies) >= msecs) 992 break; 993 udelay(1); 994 } 995 996 dd->f_set_armlaunch(dd, 0); 997 998 /* 999 * length 0, no dwords actually sent 1000 */ 1001 writeq(0, piobuf); 1002 qib_flush_wc(); 1003 1004 /* 1005 * This is only roughly accurate, since even with preempt we 1006 * still take interrupts that could take a while. Running for 1007 * >= 5 msec seems to get us "close enough" to accurate values. 1008 */ 1009 msecs = jiffies_to_msecs(jiffies); 1010 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { 1011 qib_pio_copy(piobuf + 64, addr, cnt >> 2); 1012 emsecs = jiffies_to_msecs(jiffies) - msecs; 1013 } 1014 1015 /* 1 GiB/sec, slightly over IB SDR line rate */ 1016 if (lcnt < (emsecs * 1024U)) 1017 qib_dev_err(dd, 1018 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n", 1019 lcnt / (u32) emsecs); 1020 1021 preempt_enable(); 1022 1023 vfree(addr); 1024 1025 done: 1026 /* disarm piobuf, so it's available again */ 1027 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); 1028 qib_sendbuf_done(dd, pbnum); 1029 dd->f_set_armlaunch(dd, 1); 1030 } 1031 1032 void qib_free_devdata(struct qib_devdata *dd) 1033 { 1034 unsigned long flags; 1035 1036 xa_lock_irqsave(&qib_dev_table, flags); 1037 __xa_erase(&qib_dev_table, dd->unit); 1038 xa_unlock_irqrestore(&qib_dev_table, flags); 1039 1040 #ifdef CONFIG_DEBUG_FS 1041 qib_dbg_ibdev_exit(&dd->verbs_dev); 1042 #endif 1043 free_percpu(dd->int_counter); 1044 rvt_dealloc_device(&dd->verbs_dev.rdi); 1045 } 1046 1047 u64 qib_int_counter(struct qib_devdata *dd) 1048 { 1049 int cpu; 1050 u64 int_counter = 0; 1051 1052 for_each_possible_cpu(cpu) 1053 int_counter += *per_cpu_ptr(dd->int_counter, cpu); 1054 return int_counter; 1055 } 1056 1057 u64 qib_sps_ints(void) 1058 { 1059 unsigned long index, flags; 1060 struct qib_devdata *dd; 1061 u64 sps_ints = 0; 1062 1063 xa_lock_irqsave(&qib_dev_table, flags); 1064 xa_for_each(&qib_dev_table, index, dd) { 1065 sps_ints += qib_int_counter(dd); 1066 } 1067 xa_unlock_irqrestore(&qib_dev_table, flags); 1068 return sps_ints; 1069 } 1070 1071 /* 1072 * Allocate our primary per-unit data structure. Must be done via verbs 1073 * allocator, because the verbs cleanup process both does cleanup and 1074 * free of the data structure. 1075 * "extra" is for chip-specific data. 1076 */ 1077 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) 1078 { 1079 struct qib_devdata *dd; 1080 int ret, nports; 1081 1082 /* extra is * number of ports */ 1083 nports = extra / sizeof(struct qib_pportdata); 1084 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra, 1085 nports); 1086 if (!dd) 1087 return ERR_PTR(-ENOMEM); 1088 1089 ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b, 1090 GFP_KERNEL); 1091 if (ret < 0) { 1092 qib_early_err(&pdev->dev, 1093 "Could not allocate unit ID: error %d\n", -ret); 1094 goto bail; 1095 } 1096 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit); 1097 1098 dd->int_counter = alloc_percpu(u64); 1099 if (!dd->int_counter) { 1100 ret = -ENOMEM; 1101 qib_early_err(&pdev->dev, 1102 "Could not allocate per-cpu int_counter\n"); 1103 goto bail; 1104 } 1105 1106 if (!qib_cpulist_count) { 1107 u32 count = num_online_cpus(); 1108 1109 qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long), 1110 GFP_KERNEL); 1111 if (qib_cpulist) 1112 qib_cpulist_count = count; 1113 } 1114 #ifdef CONFIG_DEBUG_FS 1115 qib_dbg_ibdev_init(&dd->verbs_dev); 1116 #endif 1117 return dd; 1118 bail: 1119 if (!list_empty(&dd->list)) 1120 list_del_init(&dd->list); 1121 rvt_dealloc_device(&dd->verbs_dev.rdi); 1122 return ERR_PTR(ret); 1123 } 1124 1125 /* 1126 * Called from freeze mode handlers, and from PCI error 1127 * reporting code. Should be paranoid about state of 1128 * system and data structures. 1129 */ 1130 void qib_disable_after_error(struct qib_devdata *dd) 1131 { 1132 if (dd->flags & QIB_INITTED) { 1133 u32 pidx; 1134 1135 dd->flags &= ~QIB_INITTED; 1136 if (dd->pport) 1137 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1138 struct qib_pportdata *ppd; 1139 1140 ppd = dd->pport + pidx; 1141 if (dd->flags & QIB_PRESENT) { 1142 qib_set_linkstate(ppd, 1143 QIB_IB_LINKDOWN_DISABLE); 1144 dd->f_setextled(ppd, 0); 1145 } 1146 *ppd->statusp &= ~QIB_STATUS_IB_READY; 1147 } 1148 } 1149 1150 /* 1151 * Mark as having had an error for driver, and also 1152 * for /sys and status word mapped to user programs. 1153 * This marks unit as not usable, until reset. 1154 */ 1155 if (dd->devstatusp) 1156 *dd->devstatusp |= QIB_STATUS_HWERROR; 1157 } 1158 1159 static void qib_remove_one(struct pci_dev *); 1160 static int qib_init_one(struct pci_dev *, const struct pci_device_id *); 1161 static void qib_shutdown_one(struct pci_dev *); 1162 1163 #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " 1164 #define PFX QIB_DRV_NAME ": " 1165 1166 static const struct pci_device_id qib_pci_tbl[] = { 1167 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, 1168 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, 1169 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, 1170 { 0, } 1171 }; 1172 1173 MODULE_DEVICE_TABLE(pci, qib_pci_tbl); 1174 1175 static struct pci_driver qib_driver = { 1176 .name = QIB_DRV_NAME, 1177 .probe = qib_init_one, 1178 .remove = qib_remove_one, 1179 .shutdown = qib_shutdown_one, 1180 .id_table = qib_pci_tbl, 1181 .err_handler = &qib_pci_err_handler, 1182 }; 1183 1184 #ifdef CONFIG_INFINIBAND_QIB_DCA 1185 1186 static int qib_notify_dca(struct notifier_block *, unsigned long, void *); 1187 static struct notifier_block dca_notifier = { 1188 .notifier_call = qib_notify_dca, 1189 .next = NULL, 1190 .priority = 0 1191 }; 1192 1193 static int qib_notify_dca_device(struct device *device, void *data) 1194 { 1195 struct qib_devdata *dd = dev_get_drvdata(device); 1196 unsigned long event = *(unsigned long *)data; 1197 1198 return dd->f_notify_dca(dd, event); 1199 } 1200 1201 static int qib_notify_dca(struct notifier_block *nb, unsigned long event, 1202 void *p) 1203 { 1204 int rval; 1205 1206 rval = driver_for_each_device(&qib_driver.driver, NULL, 1207 &event, qib_notify_dca_device); 1208 return rval ? NOTIFY_BAD : NOTIFY_DONE; 1209 } 1210 1211 #endif 1212 1213 /* 1214 * Do all the generic driver unit- and chip-independent memory 1215 * allocation and initialization. 1216 */ 1217 static int __init qib_ib_init(void) 1218 { 1219 int ret; 1220 1221 ret = qib_dev_init(); 1222 if (ret) 1223 goto bail; 1224 1225 /* 1226 * These must be called before the driver is registered with 1227 * the PCI subsystem. 1228 */ 1229 #ifdef CONFIG_INFINIBAND_QIB_DCA 1230 dca_register_notify(&dca_notifier); 1231 #endif 1232 #ifdef CONFIG_DEBUG_FS 1233 qib_dbg_init(); 1234 #endif 1235 ret = pci_register_driver(&qib_driver); 1236 if (ret < 0) { 1237 pr_err("Unable to register driver: error %d\n", -ret); 1238 goto bail_dev; 1239 } 1240 1241 /* not fatal if it doesn't work */ 1242 if (qib_init_qibfs()) 1243 pr_err("Unable to register ipathfs\n"); 1244 goto bail; /* all OK */ 1245 1246 bail_dev: 1247 #ifdef CONFIG_INFINIBAND_QIB_DCA 1248 dca_unregister_notify(&dca_notifier); 1249 #endif 1250 #ifdef CONFIG_DEBUG_FS 1251 qib_dbg_exit(); 1252 #endif 1253 qib_dev_cleanup(); 1254 bail: 1255 return ret; 1256 } 1257 1258 module_init(qib_ib_init); 1259 1260 /* 1261 * Do the non-unit driver cleanup, memory free, etc. at unload. 1262 */ 1263 static void __exit qib_ib_cleanup(void) 1264 { 1265 int ret; 1266 1267 ret = qib_exit_qibfs(); 1268 if (ret) 1269 pr_err( 1270 "Unable to cleanup counter filesystem: error %d\n", 1271 -ret); 1272 1273 #ifdef CONFIG_INFINIBAND_QIB_DCA 1274 dca_unregister_notify(&dca_notifier); 1275 #endif 1276 pci_unregister_driver(&qib_driver); 1277 #ifdef CONFIG_DEBUG_FS 1278 qib_dbg_exit(); 1279 #endif 1280 1281 qib_cpulist_count = 0; 1282 kfree(qib_cpulist); 1283 1284 WARN_ON(!xa_empty(&qib_dev_table)); 1285 qib_dev_cleanup(); 1286 } 1287 1288 module_exit(qib_ib_cleanup); 1289 1290 /* this can only be called after a successful initialization */ 1291 static void cleanup_device_data(struct qib_devdata *dd) 1292 { 1293 int ctxt; 1294 int pidx; 1295 struct qib_ctxtdata **tmp; 1296 unsigned long flags; 1297 1298 /* users can't do anything more with chip */ 1299 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1300 if (dd->pport[pidx].statusp) 1301 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; 1302 1303 spin_lock(&dd->pport[pidx].cc_shadow_lock); 1304 1305 kfree(dd->pport[pidx].congestion_entries); 1306 dd->pport[pidx].congestion_entries = NULL; 1307 kfree(dd->pport[pidx].ccti_entries); 1308 dd->pport[pidx].ccti_entries = NULL; 1309 kfree(dd->pport[pidx].ccti_entries_shadow); 1310 dd->pport[pidx].ccti_entries_shadow = NULL; 1311 kfree(dd->pport[pidx].congestion_entries_shadow); 1312 dd->pport[pidx].congestion_entries_shadow = NULL; 1313 1314 spin_unlock(&dd->pport[pidx].cc_shadow_lock); 1315 } 1316 1317 qib_disable_wc(dd); 1318 1319 if (dd->pioavailregs_dma) { 1320 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1321 (void *) dd->pioavailregs_dma, 1322 dd->pioavailregs_phys); 1323 dd->pioavailregs_dma = NULL; 1324 } 1325 1326 if (dd->pageshadow) { 1327 struct page **tmpp = dd->pageshadow; 1328 dma_addr_t *tmpd = dd->physshadow; 1329 int i; 1330 1331 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { 1332 int ctxt_tidbase = ctxt * dd->rcvtidcnt; 1333 int maxtid = ctxt_tidbase + dd->rcvtidcnt; 1334 1335 for (i = ctxt_tidbase; i < maxtid; i++) { 1336 if (!tmpp[i]) 1337 continue; 1338 pci_unmap_page(dd->pcidev, tmpd[i], 1339 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1340 qib_release_user_pages(&tmpp[i], 1); 1341 tmpp[i] = NULL; 1342 } 1343 } 1344 1345 dd->pageshadow = NULL; 1346 vfree(tmpp); 1347 dd->physshadow = NULL; 1348 vfree(tmpd); 1349 } 1350 1351 /* 1352 * Free any resources still in use (usually just kernel contexts) 1353 * at unload; we do for ctxtcnt, because that's what we allocate. 1354 * We acquire lock to be really paranoid that rcd isn't being 1355 * accessed from some interrupt-related code (that should not happen, 1356 * but best to be sure). 1357 */ 1358 spin_lock_irqsave(&dd->uctxt_lock, flags); 1359 tmp = dd->rcd; 1360 dd->rcd = NULL; 1361 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 1362 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { 1363 struct qib_ctxtdata *rcd = tmp[ctxt]; 1364 1365 tmp[ctxt] = NULL; /* debugging paranoia */ 1366 qib_free_ctxtdata(dd, rcd); 1367 } 1368 kfree(tmp); 1369 } 1370 1371 /* 1372 * Clean up on unit shutdown, or error during unit load after 1373 * successful initialization. 1374 */ 1375 static void qib_postinit_cleanup(struct qib_devdata *dd) 1376 { 1377 /* 1378 * Clean up chip-specific stuff. 1379 * We check for NULL here, because it's outside 1380 * the kregbase check, and we need to call it 1381 * after the free_irq. Thus it's possible that 1382 * the function pointers were never initialized. 1383 */ 1384 if (dd->f_cleanup) 1385 dd->f_cleanup(dd); 1386 1387 qib_pcie_ddcleanup(dd); 1388 1389 cleanup_device_data(dd); 1390 1391 qib_free_devdata(dd); 1392 } 1393 1394 static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1395 { 1396 int ret, j, pidx, initfail; 1397 struct qib_devdata *dd = NULL; 1398 1399 ret = qib_pcie_init(pdev, ent); 1400 if (ret) 1401 goto bail; 1402 1403 /* 1404 * Do device-specific initialiation, function table setup, dd 1405 * allocation, etc. 1406 */ 1407 switch (ent->device) { 1408 case PCI_DEVICE_ID_QLOGIC_IB_6120: 1409 #ifdef CONFIG_PCI_MSI 1410 dd = qib_init_iba6120_funcs(pdev, ent); 1411 #else 1412 qib_early_err(&pdev->dev, 1413 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", 1414 ent->device); 1415 dd = ERR_PTR(-ENODEV); 1416 #endif 1417 break; 1418 1419 case PCI_DEVICE_ID_QLOGIC_IB_7220: 1420 dd = qib_init_iba7220_funcs(pdev, ent); 1421 break; 1422 1423 case PCI_DEVICE_ID_QLOGIC_IB_7322: 1424 dd = qib_init_iba7322_funcs(pdev, ent); 1425 break; 1426 1427 default: 1428 qib_early_err(&pdev->dev, 1429 "Failing on unknown Intel deviceid 0x%x\n", 1430 ent->device); 1431 ret = -ENODEV; 1432 } 1433 1434 if (IS_ERR(dd)) 1435 ret = PTR_ERR(dd); 1436 if (ret) 1437 goto bail; /* error already printed */ 1438 1439 ret = qib_create_workqueues(dd); 1440 if (ret) 1441 goto bail; 1442 1443 /* do the generic initialization */ 1444 initfail = qib_init(dd, 0); 1445 1446 ret = qib_register_ib_device(dd); 1447 1448 /* 1449 * Now ready for use. this should be cleared whenever we 1450 * detect a reset, or initiate one. If earlier failure, 1451 * we still create devices, so diags, etc. can be used 1452 * to determine cause of problem. 1453 */ 1454 if (!qib_mini_init && !initfail && !ret) 1455 dd->flags |= QIB_INITTED; 1456 1457 j = qib_device_create(dd); 1458 if (j) 1459 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); 1460 j = qibfs_add(dd); 1461 if (j) 1462 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", 1463 -j); 1464 1465 if (qib_mini_init || initfail || ret) { 1466 qib_stop_timers(dd); 1467 flush_workqueue(ib_wq); 1468 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1469 dd->f_quiet_serdes(dd->pport + pidx); 1470 if (qib_mini_init) 1471 goto bail; 1472 if (!j) { 1473 (void) qibfs_remove(dd); 1474 qib_device_remove(dd); 1475 } 1476 if (!ret) 1477 qib_unregister_ib_device(dd); 1478 qib_postinit_cleanup(dd); 1479 if (initfail) 1480 ret = initfail; 1481 goto bail; 1482 } 1483 1484 ret = qib_enable_wc(dd); 1485 if (ret) { 1486 qib_dev_err(dd, 1487 "Write combining not enabled (err %d): performance may be poor\n", 1488 -ret); 1489 ret = 0; 1490 } 1491 1492 qib_verify_pioperf(dd); 1493 bail: 1494 return ret; 1495 } 1496 1497 static void qib_remove_one(struct pci_dev *pdev) 1498 { 1499 struct qib_devdata *dd = pci_get_drvdata(pdev); 1500 int ret; 1501 1502 /* unregister from IB core */ 1503 qib_unregister_ib_device(dd); 1504 1505 /* 1506 * Disable the IB link, disable interrupts on the device, 1507 * clear dma engines, etc. 1508 */ 1509 if (!qib_mini_init) 1510 qib_shutdown_device(dd); 1511 1512 qib_stop_timers(dd); 1513 1514 /* wait until all of our (qsfp) queue_work() calls complete */ 1515 flush_workqueue(ib_wq); 1516 1517 ret = qibfs_remove(dd); 1518 if (ret) 1519 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", 1520 -ret); 1521 1522 qib_device_remove(dd); 1523 1524 qib_postinit_cleanup(dd); 1525 } 1526 1527 static void qib_shutdown_one(struct pci_dev *pdev) 1528 { 1529 struct qib_devdata *dd = pci_get_drvdata(pdev); 1530 1531 qib_shutdown_device(dd); 1532 } 1533 1534 /** 1535 * qib_create_rcvhdrq - create a receive header queue 1536 * @dd: the qlogic_ib device 1537 * @rcd: the context data 1538 * 1539 * This must be contiguous memory (from an i/o perspective), and must be 1540 * DMA'able (which means for some systems, it will go through an IOMMU, 1541 * or be forced into a low address range). 1542 */ 1543 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) 1544 { 1545 unsigned amt; 1546 int old_node_id; 1547 1548 if (!rcd->rcvhdrq) { 1549 dma_addr_t phys_hdrqtail; 1550 gfp_t gfp_flags; 1551 1552 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * 1553 sizeof(u32), PAGE_SIZE); 1554 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? 1555 GFP_USER : GFP_KERNEL; 1556 1557 old_node_id = dev_to_node(&dd->pcidev->dev); 1558 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1559 rcd->rcvhdrq = dma_alloc_coherent( 1560 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, 1561 gfp_flags | __GFP_COMP); 1562 set_dev_node(&dd->pcidev->dev, old_node_id); 1563 1564 if (!rcd->rcvhdrq) { 1565 qib_dev_err(dd, 1566 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", 1567 amt, rcd->ctxt); 1568 goto bail; 1569 } 1570 1571 if (rcd->ctxt >= dd->first_user_ctxt) { 1572 rcd->user_event_mask = vmalloc_user(PAGE_SIZE); 1573 if (!rcd->user_event_mask) 1574 goto bail_free_hdrq; 1575 } 1576 1577 if (!(dd->flags & QIB_NODMA_RTAIL)) { 1578 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1579 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( 1580 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, 1581 gfp_flags); 1582 set_dev_node(&dd->pcidev->dev, old_node_id); 1583 if (!rcd->rcvhdrtail_kvaddr) 1584 goto bail_free; 1585 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; 1586 } 1587 1588 rcd->rcvhdrq_size = amt; 1589 } 1590 1591 /* clear for security and sanity on each use */ 1592 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); 1593 if (rcd->rcvhdrtail_kvaddr) 1594 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); 1595 return 0; 1596 1597 bail_free: 1598 qib_dev_err(dd, 1599 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", 1600 rcd->ctxt); 1601 vfree(rcd->user_event_mask); 1602 rcd->user_event_mask = NULL; 1603 bail_free_hdrq: 1604 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, 1605 rcd->rcvhdrq_phys); 1606 rcd->rcvhdrq = NULL; 1607 bail: 1608 return -ENOMEM; 1609 } 1610 1611 /** 1612 * allocate eager buffers, both kernel and user contexts. 1613 * @rcd: the context we are setting up. 1614 * 1615 * Allocate the eager TID buffers and program them into hip. 1616 * They are no longer completely contiguous, we do multiple allocation 1617 * calls. Otherwise we get the OOM code involved, by asking for too 1618 * much per call, with disastrous results on some kernels. 1619 */ 1620 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) 1621 { 1622 struct qib_devdata *dd = rcd->dd; 1623 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; 1624 size_t size; 1625 gfp_t gfp_flags; 1626 int old_node_id; 1627 1628 /* 1629 * GFP_USER, but without GFP_FS, so buffer cache can be 1630 * coalesced (we hope); otherwise, even at order 4, 1631 * heavy filesystem activity makes these fail, and we can 1632 * use compound pages. 1633 */ 1634 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1635 1636 egrcnt = rcd->rcvegrcnt; 1637 egroff = rcd->rcvegr_tid_base; 1638 egrsize = dd->rcvegrbufsize; 1639 1640 chunk = rcd->rcvegrbuf_chunks; 1641 egrperchunk = rcd->rcvegrbufs_perchunk; 1642 size = rcd->rcvegrbuf_size; 1643 if (!rcd->rcvegrbuf) { 1644 rcd->rcvegrbuf = 1645 kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]), 1646 GFP_KERNEL, rcd->node_id); 1647 if (!rcd->rcvegrbuf) 1648 goto bail; 1649 } 1650 if (!rcd->rcvegrbuf_phys) { 1651 rcd->rcvegrbuf_phys = 1652 kmalloc_array_node(chunk, 1653 sizeof(rcd->rcvegrbuf_phys[0]), 1654 GFP_KERNEL, rcd->node_id); 1655 if (!rcd->rcvegrbuf_phys) 1656 goto bail_rcvegrbuf; 1657 } 1658 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { 1659 if (rcd->rcvegrbuf[e]) 1660 continue; 1661 1662 old_node_id = dev_to_node(&dd->pcidev->dev); 1663 set_dev_node(&dd->pcidev->dev, rcd->node_id); 1664 rcd->rcvegrbuf[e] = 1665 dma_alloc_coherent(&dd->pcidev->dev, size, 1666 &rcd->rcvegrbuf_phys[e], 1667 gfp_flags); 1668 set_dev_node(&dd->pcidev->dev, old_node_id); 1669 if (!rcd->rcvegrbuf[e]) 1670 goto bail_rcvegrbuf_phys; 1671 } 1672 1673 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; 1674 1675 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { 1676 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; 1677 unsigned i; 1678 1679 /* clear for security and sanity on each use */ 1680 memset(rcd->rcvegrbuf[chunk], 0, size); 1681 1682 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { 1683 dd->f_put_tid(dd, e + egroff + 1684 (u64 __iomem *) 1685 ((char __iomem *) 1686 dd->kregbase + 1687 dd->rcvegrbase), 1688 RCVHQ_RCV_TYPE_EAGER, pa); 1689 pa += egrsize; 1690 } 1691 cond_resched(); /* don't hog the cpu */ 1692 } 1693 1694 return 0; 1695 1696 bail_rcvegrbuf_phys: 1697 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) 1698 dma_free_coherent(&dd->pcidev->dev, size, 1699 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); 1700 kfree(rcd->rcvegrbuf_phys); 1701 rcd->rcvegrbuf_phys = NULL; 1702 bail_rcvegrbuf: 1703 kfree(rcd->rcvegrbuf); 1704 rcd->rcvegrbuf = NULL; 1705 bail: 1706 return -ENOMEM; 1707 } 1708 1709 /* 1710 * Note: Changes to this routine should be mirrored 1711 * for the diagnostics routine qib_remap_ioaddr32(). 1712 * There is also related code for VL15 buffers in qib_init_7322_variables(). 1713 * The teardown code that unmaps is in qib_pcie_ddcleanup() 1714 */ 1715 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) 1716 { 1717 u64 __iomem *qib_kregbase = NULL; 1718 void __iomem *qib_piobase = NULL; 1719 u64 __iomem *qib_userbase = NULL; 1720 u64 qib_kreglen; 1721 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; 1722 u64 qib_pio4koffset = dd->piobufbase >> 32; 1723 u64 qib_pio2klen = dd->piobcnt2k * dd->palign; 1724 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; 1725 u64 qib_physaddr = dd->physaddr; 1726 u64 qib_piolen; 1727 u64 qib_userlen = 0; 1728 1729 /* 1730 * Free the old mapping because the kernel will try to reuse the 1731 * old mapping and not create a new mapping with the 1732 * write combining attribute. 1733 */ 1734 iounmap(dd->kregbase); 1735 dd->kregbase = NULL; 1736 1737 /* 1738 * Assumes chip address space looks like: 1739 * - kregs + sregs + cregs + uregs (in any order) 1740 * - piobufs (2K and 4K bufs in either order) 1741 * or: 1742 * - kregs + sregs + cregs (in any order) 1743 * - piobufs (2K and 4K bufs in either order) 1744 * - uregs 1745 */ 1746 if (dd->piobcnt4k == 0) { 1747 qib_kreglen = qib_pio2koffset; 1748 qib_piolen = qib_pio2klen; 1749 } else if (qib_pio2koffset < qib_pio4koffset) { 1750 qib_kreglen = qib_pio2koffset; 1751 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; 1752 } else { 1753 qib_kreglen = qib_pio4koffset; 1754 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; 1755 } 1756 qib_piolen += vl15buflen; 1757 /* Map just the configured ports (not all hw ports) */ 1758 if (dd->uregbase > qib_kreglen) 1759 qib_userlen = dd->ureg_align * dd->cfgctxts; 1760 1761 /* Sanity checks passed, now create the new mappings */ 1762 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); 1763 if (!qib_kregbase) 1764 goto bail; 1765 1766 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); 1767 if (!qib_piobase) 1768 goto bail_kregbase; 1769 1770 if (qib_userlen) { 1771 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, 1772 qib_userlen); 1773 if (!qib_userbase) 1774 goto bail_piobase; 1775 } 1776 1777 dd->kregbase = qib_kregbase; 1778 dd->kregend = (u64 __iomem *) 1779 ((char __iomem *) qib_kregbase + qib_kreglen); 1780 dd->piobase = qib_piobase; 1781 dd->pio2kbase = (void __iomem *) 1782 (((char __iomem *) dd->piobase) + 1783 qib_pio2koffset - qib_kreglen); 1784 if (dd->piobcnt4k) 1785 dd->pio4kbase = (void __iomem *) 1786 (((char __iomem *) dd->piobase) + 1787 qib_pio4koffset - qib_kreglen); 1788 if (qib_userlen) 1789 /* ureg will now be accessed relative to dd->userbase */ 1790 dd->userbase = qib_userbase; 1791 return 0; 1792 1793 bail_piobase: 1794 iounmap(qib_piobase); 1795 bail_kregbase: 1796 iounmap(qib_kregbase); 1797 bail: 1798 return -ENOMEM; 1799 } 1800