1 /* 2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/spinlock.h> 35 #include <linux/pci.h> 36 #include <linux/io.h> 37 #include <linux/delay.h> 38 #include <linux/netdevice.h> 39 #include <linux/vmalloc.h> 40 41 #include "qib.h" 42 43 /* 44 * The size has to be longer than this string, so we can append 45 * board/chip information to it in the init code. 46 */ 47 const char ib_qib_version[] = QIB_IDSTR "\n"; 48 49 DEFINE_SPINLOCK(qib_devs_lock); 50 LIST_HEAD(qib_dev_list); 51 DEFINE_MUTEX(qib_mutex); /* general driver use */ 52 53 unsigned qib_ibmtu; 54 module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO); 55 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096"); 56 57 unsigned qib_compat_ddr_negotiate = 1; 58 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint, 59 S_IWUSR | S_IRUGO); 60 MODULE_PARM_DESC(compat_ddr_negotiate, 61 "Attempt pre-IBTA 1.2 DDR speed negotiation"); 62 63 MODULE_LICENSE("Dual BSD/GPL"); 64 MODULE_AUTHOR("QLogic <support@qlogic.com>"); 65 MODULE_DESCRIPTION("QLogic IB driver"); 66 67 /* 68 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our 69 * PIO send buffers. This is well beyond anything currently 70 * defined in the InfiniBand spec. 71 */ 72 #define QIB_PIO_MAXIBHDR 128 73 74 /* 75 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt. 76 */ 77 #define QIB_MAX_PKT_RECV 64 78 79 struct qlogic_ib_stats qib_stats; 80 81 const char *qib_get_unit_name(int unit) 82 { 83 static char iname[16]; 84 85 snprintf(iname, sizeof iname, "infinipath%u", unit); 86 return iname; 87 } 88 89 /* 90 * Return count of units with at least one port ACTIVE. 91 */ 92 int qib_count_active_units(void) 93 { 94 struct qib_devdata *dd; 95 struct qib_pportdata *ppd; 96 unsigned long flags; 97 int pidx, nunits_active = 0; 98 99 spin_lock_irqsave(&qib_devs_lock, flags); 100 list_for_each_entry(dd, &qib_dev_list, list) { 101 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase) 102 continue; 103 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 104 ppd = dd->pport + pidx; 105 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | 106 QIBL_LINKARMED | QIBL_LINKACTIVE))) { 107 nunits_active++; 108 break; 109 } 110 } 111 } 112 spin_unlock_irqrestore(&qib_devs_lock, flags); 113 return nunits_active; 114 } 115 116 /* 117 * Return count of all units, optionally return in arguments 118 * the number of usable (present) units, and the number of 119 * ports that are up. 120 */ 121 int qib_count_units(int *npresentp, int *nupp) 122 { 123 int nunits = 0, npresent = 0, nup = 0; 124 struct qib_devdata *dd; 125 unsigned long flags; 126 int pidx; 127 struct qib_pportdata *ppd; 128 129 spin_lock_irqsave(&qib_devs_lock, flags); 130 131 list_for_each_entry(dd, &qib_dev_list, list) { 132 nunits++; 133 if ((dd->flags & QIB_PRESENT) && dd->kregbase) 134 npresent++; 135 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 136 ppd = dd->pport + pidx; 137 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | 138 QIBL_LINKARMED | QIBL_LINKACTIVE))) 139 nup++; 140 } 141 } 142 143 spin_unlock_irqrestore(&qib_devs_lock, flags); 144 145 if (npresentp) 146 *npresentp = npresent; 147 if (nupp) 148 *nupp = nup; 149 150 return nunits; 151 } 152 153 /** 154 * qib_wait_linkstate - wait for an IB link state change to occur 155 * @dd: the qlogic_ib device 156 * @state: the state to wait for 157 * @msecs: the number of milliseconds to wait 158 * 159 * wait up to msecs milliseconds for IB link state change to occur for 160 * now, take the easy polling route. Currently used only by 161 * qib_set_linkstate. Returns 0 if state reached, otherwise 162 * -ETIMEDOUT state can have multiple states set, for any of several 163 * transitions. 164 */ 165 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) 166 { 167 int ret; 168 unsigned long flags; 169 170 spin_lock_irqsave(&ppd->lflags_lock, flags); 171 if (ppd->state_wanted) { 172 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 173 ret = -EBUSY; 174 goto bail; 175 } 176 ppd->state_wanted = state; 177 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 178 wait_event_interruptible_timeout(ppd->state_wait, 179 (ppd->lflags & state), 180 msecs_to_jiffies(msecs)); 181 spin_lock_irqsave(&ppd->lflags_lock, flags); 182 ppd->state_wanted = 0; 183 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 184 185 if (!(ppd->lflags & state)) 186 ret = -ETIMEDOUT; 187 else 188 ret = 0; 189 bail: 190 return ret; 191 } 192 193 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) 194 { 195 u32 lstate; 196 int ret; 197 struct qib_devdata *dd = ppd->dd; 198 unsigned long flags; 199 200 switch (newstate) { 201 case QIB_IB_LINKDOWN_ONLY: 202 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 203 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP); 204 /* don't wait */ 205 ret = 0; 206 goto bail; 207 208 case QIB_IB_LINKDOWN: 209 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 210 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); 211 /* don't wait */ 212 ret = 0; 213 goto bail; 214 215 case QIB_IB_LINKDOWN_SLEEP: 216 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 217 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP); 218 /* don't wait */ 219 ret = 0; 220 goto bail; 221 222 case QIB_IB_LINKDOWN_DISABLE: 223 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 224 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE); 225 /* don't wait */ 226 ret = 0; 227 goto bail; 228 229 case QIB_IB_LINKARM: 230 if (ppd->lflags & QIBL_LINKARMED) { 231 ret = 0; 232 goto bail; 233 } 234 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) { 235 ret = -EINVAL; 236 goto bail; 237 } 238 /* 239 * Since the port can be ACTIVE when we ask for ARMED, 240 * clear QIBL_LINKV so we can wait for a transition. 241 * If the link isn't ARMED, then something else happened 242 * and there is no point waiting for ARMED. 243 */ 244 spin_lock_irqsave(&ppd->lflags_lock, flags); 245 ppd->lflags &= ~QIBL_LINKV; 246 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 247 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 248 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP); 249 lstate = QIBL_LINKV; 250 break; 251 252 case QIB_IB_LINKACTIVE: 253 if (ppd->lflags & QIBL_LINKACTIVE) { 254 ret = 0; 255 goto bail; 256 } 257 if (!(ppd->lflags & QIBL_LINKARMED)) { 258 ret = -EINVAL; 259 goto bail; 260 } 261 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 262 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP); 263 lstate = QIBL_LINKACTIVE; 264 break; 265 266 default: 267 ret = -EINVAL; 268 goto bail; 269 } 270 ret = qib_wait_linkstate(ppd, lstate, 10); 271 272 bail: 273 return ret; 274 } 275 276 /* 277 * Get address of eager buffer from it's index (allocated in chunks, not 278 * contiguous). 279 */ 280 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) 281 { 282 const u32 chunk = etail / rcd->rcvegrbufs_perchunk; 283 const u32 idx = etail % rcd->rcvegrbufs_perchunk; 284 285 return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; 286 } 287 288 /* 289 * Returns 1 if error was a CRC, else 0. 290 * Needed for some chip's synthesized error counters. 291 */ 292 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, 293 u32 ctxt, u32 eflags, u32 l, u32 etail, 294 __le32 *rhf_addr, struct qib_message_header *rhdr) 295 { 296 u32 ret = 0; 297 298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) 299 ret = 1; 300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) { 301 /* For TIDERR and RC QPs premptively schedule a NAK */ 302 struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr; 303 struct qib_other_headers *ohdr = NULL; 304 struct qib_ibport *ibp = &ppd->ibport_data; 305 struct qib_qp *qp = NULL; 306 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); 307 u16 lid = be16_to_cpu(hdr->lrh[1]); 308 int lnh = be16_to_cpu(hdr->lrh[0]) & 3; 309 u32 qp_num; 310 u32 opcode; 311 u32 psn; 312 int diff; 313 unsigned long flags; 314 315 /* Sanity check packet */ 316 if (tlen < 24) 317 goto drop; 318 319 if (lid < QIB_MULTICAST_LID_BASE) { 320 lid &= ~((1 << ppd->lmc) - 1); 321 if (unlikely(lid != ppd->lid)) 322 goto drop; 323 } 324 325 /* Check for GRH */ 326 if (lnh == QIB_LRH_BTH) 327 ohdr = &hdr->u.oth; 328 else if (lnh == QIB_LRH_GRH) { 329 u32 vtf; 330 331 ohdr = &hdr->u.l.oth; 332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 333 goto drop; 334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 336 goto drop; 337 } else 338 goto drop; 339 340 /* Get opcode and PSN from packet */ 341 opcode = be32_to_cpu(ohdr->bth[0]); 342 opcode >>= 24; 343 psn = be32_to_cpu(ohdr->bth[2]); 344 345 /* Get the destination QP number. */ 346 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 347 if (qp_num != QIB_MULTICAST_QPN) { 348 int ruc_res; 349 qp = qib_lookup_qpn(ibp, qp_num); 350 if (!qp) 351 goto drop; 352 353 /* 354 * Handle only RC QPs - for other QP types drop error 355 * packet. 356 */ 357 spin_lock(&qp->r_lock); 358 359 /* Check for valid receive state. */ 360 if (!(ib_qib_state_ops[qp->state] & 361 QIB_PROCESS_RECV_OK)) { 362 ibp->n_pkt_drops++; 363 goto unlock; 364 } 365 366 switch (qp->ibqp.qp_type) { 367 case IB_QPT_RC: 368 spin_lock_irqsave(&qp->s_lock, flags); 369 ruc_res = 370 qib_ruc_check_hdr( 371 ibp, hdr, 372 lnh == QIB_LRH_GRH, 373 qp, 374 be32_to_cpu(ohdr->bth[0])); 375 if (ruc_res) { 376 spin_unlock_irqrestore(&qp->s_lock, 377 flags); 378 goto unlock; 379 } 380 spin_unlock_irqrestore(&qp->s_lock, flags); 381 382 /* Only deal with RDMA Writes for now */ 383 if (opcode < 384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 385 diff = qib_cmp24(psn, qp->r_psn); 386 if (!qp->r_nak_state && diff >= 0) { 387 ibp->n_rc_seqnak++; 388 qp->r_nak_state = 389 IB_NAK_PSN_ERROR; 390 /* Use the expected PSN. */ 391 qp->r_ack_psn = qp->r_psn; 392 /* 393 * Wait to send the sequence 394 * NAK until all packets 395 * in the receive queue have 396 * been processed. 397 * Otherwise, we end up 398 * propagating congestion. 399 */ 400 if (list_empty(&qp->rspwait)) { 401 qp->r_flags |= 402 QIB_R_RSP_NAK; 403 atomic_inc( 404 &qp->refcount); 405 list_add_tail( 406 &qp->rspwait, 407 &rcd->qp_wait_list); 408 } 409 } /* Out of sequence NAK */ 410 } /* QP Request NAKs */ 411 break; 412 case IB_QPT_SMI: 413 case IB_QPT_GSI: 414 case IB_QPT_UD: 415 case IB_QPT_UC: 416 default: 417 /* For now don't handle any other QP types */ 418 break; 419 } 420 421 unlock: 422 spin_unlock(&qp->r_lock); 423 /* 424 * Notify qib_destroy_qp() if it is waiting 425 * for us to finish. 426 */ 427 if (atomic_dec_and_test(&qp->refcount)) 428 wake_up(&qp->wait); 429 } /* Unicast QP */ 430 } /* Valid packet with TIDErr */ 431 432 drop: 433 return ret; 434 } 435 436 /* 437 * qib_kreceive - receive a packet 438 * @rcd: the qlogic_ib context 439 * @llic: gets count of good packets needed to clear lli, 440 * (used with chips that need need to track crcs for lli) 441 * 442 * called from interrupt handler for errors or receive interrupt 443 * Returns number of CRC error packets, needed by some chips for 444 * local link integrity tracking. crcs are adjusted down by following 445 * good packets, if any, and count of good packets is also tracked. 446 */ 447 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) 448 { 449 struct qib_devdata *dd = rcd->dd; 450 struct qib_pportdata *ppd = rcd->ppd; 451 __le32 *rhf_addr; 452 void *ebuf; 453 const u32 rsize = dd->rcvhdrentsize; /* words */ 454 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */ 455 u32 etail = -1, l, hdrqtail; 456 struct qib_message_header *hdr; 457 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; 458 int last; 459 u64 lval; 460 struct qib_qp *qp, *nqp; 461 462 l = rcd->head; 463 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 464 if (dd->flags & QIB_NODMA_RTAIL) { 465 u32 seq = qib_hdrget_seq(rhf_addr); 466 if (seq != rcd->seq_cnt) 467 goto bail; 468 hdrqtail = 0; 469 } else { 470 hdrqtail = qib_get_rcvhdrtail(rcd); 471 if (l == hdrqtail) 472 goto bail; 473 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 474 } 475 476 for (last = 0, i = 1; !last; i += !last) { 477 hdr = dd->f_get_msgheader(dd, rhf_addr); 478 eflags = qib_hdrget_err_flags(rhf_addr); 479 etype = qib_hdrget_rcv_type(rhf_addr); 480 /* total length */ 481 tlen = qib_hdrget_length_in_bytes(rhf_addr); 482 ebuf = NULL; 483 if ((dd->flags & QIB_NODMA_RTAIL) ? 484 qib_hdrget_use_egr_buf(rhf_addr) : 485 (etype != RCVHQ_RCV_TYPE_EXPECTED)) { 486 etail = qib_hdrget_index(rhf_addr); 487 updegr = 1; 488 if (tlen > sizeof(*hdr) || 489 etype >= RCVHQ_RCV_TYPE_NON_KD) 490 ebuf = qib_get_egrbuf(rcd, etail); 491 } 492 if (!eflags) { 493 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; 494 495 if (lrh_len != tlen) { 496 qib_stats.sps_lenerrs++; 497 goto move_along; 498 } 499 } 500 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags && 501 ebuf == NULL && 502 tlen > (dd->rcvhdrentsize - 2 + 1 - 503 qib_hdrget_offset(rhf_addr)) << 2) { 504 goto move_along; 505 } 506 507 /* 508 * Both tiderr and qibhdrerr are set for all plain IB 509 * packets; only qibhdrerr should be set. 510 */ 511 if (unlikely(eflags)) 512 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, 513 etail, rhf_addr, hdr); 514 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 515 qib_ib_rcv(rcd, hdr, ebuf, tlen); 516 if (crcs) 517 crcs--; 518 else if (llic && *llic) 519 --*llic; 520 } 521 move_along: 522 l += rsize; 523 if (l >= maxcnt) 524 l = 0; 525 if (i == QIB_MAX_PKT_RECV) 526 last = 1; 527 528 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 529 if (dd->flags & QIB_NODMA_RTAIL) { 530 u32 seq = qib_hdrget_seq(rhf_addr); 531 532 if (++rcd->seq_cnt > 13) 533 rcd->seq_cnt = 1; 534 if (seq != rcd->seq_cnt) 535 last = 1; 536 } else if (l == hdrqtail) 537 last = 1; 538 /* 539 * Update head regs etc., every 16 packets, if not last pkt, 540 * to help prevent rcvhdrq overflows, when many packets 541 * are processed and queue is nearly full. 542 * Don't request an interrupt for intermediate updates. 543 */ 544 lval = l; 545 if (!last && !(i & 0xf)) { 546 dd->f_update_usrhead(rcd, lval, updegr, etail, i); 547 updegr = 0; 548 } 549 } 550 551 rcd->head = l; 552 rcd->pkt_count += i; 553 554 /* 555 * Iterate over all QPs waiting to respond. 556 * The list won't change since the IRQ is only run on one CPU. 557 */ 558 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 559 list_del_init(&qp->rspwait); 560 if (qp->r_flags & QIB_R_RSP_NAK) { 561 qp->r_flags &= ~QIB_R_RSP_NAK; 562 qib_send_rc_ack(qp); 563 } 564 if (qp->r_flags & QIB_R_RSP_SEND) { 565 unsigned long flags; 566 567 qp->r_flags &= ~QIB_R_RSP_SEND; 568 spin_lock_irqsave(&qp->s_lock, flags); 569 if (ib_qib_state_ops[qp->state] & 570 QIB_PROCESS_OR_FLUSH_SEND) 571 qib_schedule_send(qp); 572 spin_unlock_irqrestore(&qp->s_lock, flags); 573 } 574 if (atomic_dec_and_test(&qp->refcount)) 575 wake_up(&qp->wait); 576 } 577 578 bail: 579 /* Report number of packets consumed */ 580 if (npkts) 581 *npkts = i; 582 583 /* 584 * Always write head at end, and setup rcv interrupt, even 585 * if no packets were processed. 586 */ 587 lval = (u64)rcd->head | dd->rhdrhead_intr_off; 588 dd->f_update_usrhead(rcd, lval, updegr, etail, i); 589 return crcs; 590 } 591 592 /** 593 * qib_set_mtu - set the MTU 594 * @ppd: the perport data 595 * @arg: the new MTU 596 * 597 * We can handle "any" incoming size, the issue here is whether we 598 * need to restrict our outgoing size. For now, we don't do any 599 * sanity checking on this, and we don't deal with what happens to 600 * programs that are already running when the size changes. 601 * NOTE: changing the MTU will usually cause the IBC to go back to 602 * link INIT state... 603 */ 604 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg) 605 { 606 u32 piosize; 607 int ret, chk; 608 609 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && 610 arg != 4096) { 611 ret = -EINVAL; 612 goto bail; 613 } 614 chk = ib_mtu_enum_to_int(qib_ibmtu); 615 if (chk > 0 && arg > chk) { 616 ret = -EINVAL; 617 goto bail; 618 } 619 620 piosize = ppd->ibmaxlen; 621 ppd->ibmtu = arg; 622 623 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) { 624 /* Only if it's not the initial value (or reset to it) */ 625 if (piosize != ppd->init_ibmaxlen) { 626 if (arg > piosize && arg <= ppd->init_ibmaxlen) 627 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32); 628 ppd->ibmaxlen = piosize; 629 } 630 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) { 631 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32); 632 ppd->ibmaxlen = piosize; 633 } 634 635 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0); 636 637 ret = 0; 638 639 bail: 640 return ret; 641 } 642 643 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 644 { 645 struct qib_devdata *dd = ppd->dd; 646 ppd->lid = lid; 647 ppd->lmc = lmc; 648 649 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC, 650 lid | (~((1U << lmc) - 1)) << 16); 651 652 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n", 653 dd->unit, ppd->port, lid); 654 655 return 0; 656 } 657 658 /* 659 * Following deal with the "obviously simple" task of overriding the state 660 * of the LEDS, which normally indicate link physical and logical status. 661 * The complications arise in dealing with different hardware mappings 662 * and the board-dependent routine being called from interrupts. 663 * and then there's the requirement to _flash_ them. 664 */ 665 #define LED_OVER_FREQ_SHIFT 8 666 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) 667 /* Below is "non-zero" to force override, but both actual LEDs are off */ 668 #define LED_OVER_BOTH_OFF (8) 669 670 static void qib_run_led_override(unsigned long opaque) 671 { 672 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 673 struct qib_devdata *dd = ppd->dd; 674 int timeoff; 675 int ph_idx; 676 677 if (!(dd->flags & QIB_INITTED)) 678 return; 679 680 ph_idx = ppd->led_override_phase++ & 1; 681 ppd->led_override = ppd->led_override_vals[ph_idx]; 682 timeoff = ppd->led_override_timeoff; 683 684 dd->f_setextled(ppd, 1); 685 /* 686 * don't re-fire the timer if user asked for it to be off; we let 687 * it fire one more time after they turn it off to simplify 688 */ 689 if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) 690 mod_timer(&ppd->led_override_timer, jiffies + timeoff); 691 } 692 693 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val) 694 { 695 struct qib_devdata *dd = ppd->dd; 696 int timeoff, freq; 697 698 if (!(dd->flags & QIB_INITTED)) 699 return; 700 701 /* First check if we are blinking. If not, use 1HZ polling */ 702 timeoff = HZ; 703 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; 704 705 if (freq) { 706 /* For blink, set each phase from one nybble of val */ 707 ppd->led_override_vals[0] = val & 0xF; 708 ppd->led_override_vals[1] = (val >> 4) & 0xF; 709 timeoff = (HZ << 4)/freq; 710 } else { 711 /* Non-blink set both phases the same. */ 712 ppd->led_override_vals[0] = val & 0xF; 713 ppd->led_override_vals[1] = val & 0xF; 714 } 715 ppd->led_override_timeoff = timeoff; 716 717 /* 718 * If the timer has not already been started, do so. Use a "quick" 719 * timeout so the function will be called soon, to look at our request. 720 */ 721 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { 722 /* Need to start timer */ 723 init_timer(&ppd->led_override_timer); 724 ppd->led_override_timer.function = qib_run_led_override; 725 ppd->led_override_timer.data = (unsigned long) ppd; 726 ppd->led_override_timer.expires = jiffies + 1; 727 add_timer(&ppd->led_override_timer); 728 } else { 729 if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) 730 mod_timer(&ppd->led_override_timer, jiffies + 1); 731 atomic_dec(&ppd->led_override_timer_active); 732 } 733 } 734 735 /** 736 * qib_reset_device - reset the chip if possible 737 * @unit: the device to reset 738 * 739 * Whether or not reset is successful, we attempt to re-initialize the chip 740 * (that is, much like a driver unload/reload). We clear the INITTED flag 741 * so that the various entry points will fail until we reinitialize. For 742 * now, we only allow this if no user contexts are open that use chip resources 743 */ 744 int qib_reset_device(int unit) 745 { 746 int ret, i; 747 struct qib_devdata *dd = qib_lookup(unit); 748 struct qib_pportdata *ppd; 749 unsigned long flags; 750 int pidx; 751 752 if (!dd) { 753 ret = -ENODEV; 754 goto bail; 755 } 756 757 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); 758 759 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { 760 qib_devinfo(dd->pcidev, "Invalid unit number %u or " 761 "not initialized or not present\n", unit); 762 ret = -ENXIO; 763 goto bail; 764 } 765 766 spin_lock_irqsave(&dd->uctxt_lock, flags); 767 if (dd->rcd) 768 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { 769 if (!dd->rcd[i] || !dd->rcd[i]->cnt) 770 continue; 771 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 772 ret = -EBUSY; 773 goto bail; 774 } 775 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 776 777 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 778 ppd = dd->pport + pidx; 779 if (atomic_read(&ppd->led_override_timer_active)) { 780 /* Need to stop LED timer, _then_ shut off LEDs */ 781 del_timer_sync(&ppd->led_override_timer); 782 atomic_set(&ppd->led_override_timer_active, 0); 783 } 784 785 /* Shut off LEDs after we are sure timer is not running */ 786 ppd->led_override = LED_OVER_BOTH_OFF; 787 dd->f_setextled(ppd, 0); 788 if (dd->flags & QIB_HAS_SEND_DMA) 789 qib_teardown_sdma(ppd); 790 } 791 792 ret = dd->f_reset(dd); 793 if (ret == 1) 794 ret = qib_init(dd, 1); 795 else 796 ret = -EAGAIN; 797 if (ret) 798 qib_dev_err(dd, "Reinitialize unit %u after " 799 "reset failed with %d\n", unit, ret); 800 else 801 qib_devinfo(dd->pcidev, "Reinitialized unit %u after " 802 "resetting\n", unit); 803 804 bail: 805 return ret; 806 } 807