1 /* 2 * Copyright (c) 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/spinlock.h> 36 #include <linux/pci.h> 37 #include <linux/io.h> 38 #include <linux/delay.h> 39 #include <linux/netdevice.h> 40 #include <linux/vmalloc.h> 41 #include <linux/module.h> 42 #include <linux/prefetch.h> 43 44 #include "qib.h" 45 46 /* 47 * The size has to be longer than this string, so we can append 48 * board/chip information to it in the init code. 49 */ 50 const char ib_qib_version[] = QIB_DRIVER_VERSION "\n"; 51 52 DEFINE_SPINLOCK(qib_devs_lock); 53 LIST_HEAD(qib_dev_list); 54 DEFINE_MUTEX(qib_mutex); /* general driver use */ 55 56 unsigned qib_ibmtu; 57 module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO); 58 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096"); 59 60 unsigned qib_compat_ddr_negotiate = 1; 61 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint, 62 S_IWUSR | S_IRUGO); 63 MODULE_PARM_DESC(compat_ddr_negotiate, 64 "Attempt pre-IBTA 1.2 DDR speed negotiation"); 65 66 MODULE_LICENSE("Dual BSD/GPL"); 67 MODULE_AUTHOR("Intel <ibsupport@intel.com>"); 68 MODULE_DESCRIPTION("Intel IB driver"); 69 70 /* 71 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our 72 * PIO send buffers. This is well beyond anything currently 73 * defined in the InfiniBand spec. 74 */ 75 #define QIB_PIO_MAXIBHDR 128 76 77 /* 78 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt. 79 */ 80 #define QIB_MAX_PKT_RECV 64 81 82 struct qlogic_ib_stats qib_stats; 83 84 const char *qib_get_unit_name(int unit) 85 { 86 static char iname[16]; 87 88 snprintf(iname, sizeof(iname), "infinipath%u", unit); 89 return iname; 90 } 91 92 const char *qib_get_card_name(struct rvt_dev_info *rdi) 93 { 94 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); 95 struct qib_devdata *dd = container_of(ibdev, 96 struct qib_devdata, verbs_dev); 97 return qib_get_unit_name(dd->unit); 98 } 99 100 struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi) 101 { 102 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); 103 struct qib_devdata *dd = container_of(ibdev, 104 struct qib_devdata, verbs_dev); 105 return dd->pcidev; 106 } 107 108 /* 109 * Return count of units with at least one port ACTIVE. 110 */ 111 int qib_count_active_units(void) 112 { 113 struct qib_devdata *dd; 114 struct qib_pportdata *ppd; 115 unsigned long flags; 116 int pidx, nunits_active = 0; 117 118 spin_lock_irqsave(&qib_devs_lock, flags); 119 list_for_each_entry(dd, &qib_dev_list, list) { 120 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase) 121 continue; 122 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 123 ppd = dd->pport + pidx; 124 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | 125 QIBL_LINKARMED | QIBL_LINKACTIVE))) { 126 nunits_active++; 127 break; 128 } 129 } 130 } 131 spin_unlock_irqrestore(&qib_devs_lock, flags); 132 return nunits_active; 133 } 134 135 /* 136 * Return count of all units, optionally return in arguments 137 * the number of usable (present) units, and the number of 138 * ports that are up. 139 */ 140 int qib_count_units(int *npresentp, int *nupp) 141 { 142 int nunits = 0, npresent = 0, nup = 0; 143 struct qib_devdata *dd; 144 unsigned long flags; 145 int pidx; 146 struct qib_pportdata *ppd; 147 148 spin_lock_irqsave(&qib_devs_lock, flags); 149 150 list_for_each_entry(dd, &qib_dev_list, list) { 151 nunits++; 152 if ((dd->flags & QIB_PRESENT) && dd->kregbase) 153 npresent++; 154 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 155 ppd = dd->pport + pidx; 156 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | 157 QIBL_LINKARMED | QIBL_LINKACTIVE))) 158 nup++; 159 } 160 } 161 162 spin_unlock_irqrestore(&qib_devs_lock, flags); 163 164 if (npresentp) 165 *npresentp = npresent; 166 if (nupp) 167 *nupp = nup; 168 169 return nunits; 170 } 171 172 /** 173 * qib_wait_linkstate - wait for an IB link state change to occur 174 * @dd: the qlogic_ib device 175 * @state: the state to wait for 176 * @msecs: the number of milliseconds to wait 177 * 178 * wait up to msecs milliseconds for IB link state change to occur for 179 * now, take the easy polling route. Currently used only by 180 * qib_set_linkstate. Returns 0 if state reached, otherwise 181 * -ETIMEDOUT state can have multiple states set, for any of several 182 * transitions. 183 */ 184 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) 185 { 186 int ret; 187 unsigned long flags; 188 189 spin_lock_irqsave(&ppd->lflags_lock, flags); 190 if (ppd->state_wanted) { 191 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 192 ret = -EBUSY; 193 goto bail; 194 } 195 ppd->state_wanted = state; 196 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 197 wait_event_interruptible_timeout(ppd->state_wait, 198 (ppd->lflags & state), 199 msecs_to_jiffies(msecs)); 200 spin_lock_irqsave(&ppd->lflags_lock, flags); 201 ppd->state_wanted = 0; 202 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 203 204 if (!(ppd->lflags & state)) 205 ret = -ETIMEDOUT; 206 else 207 ret = 0; 208 bail: 209 return ret; 210 } 211 212 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) 213 { 214 u32 lstate; 215 int ret; 216 struct qib_devdata *dd = ppd->dd; 217 unsigned long flags; 218 219 switch (newstate) { 220 case QIB_IB_LINKDOWN_ONLY: 221 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 222 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP); 223 /* don't wait */ 224 ret = 0; 225 goto bail; 226 227 case QIB_IB_LINKDOWN: 228 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 229 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); 230 /* don't wait */ 231 ret = 0; 232 goto bail; 233 234 case QIB_IB_LINKDOWN_SLEEP: 235 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 236 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP); 237 /* don't wait */ 238 ret = 0; 239 goto bail; 240 241 case QIB_IB_LINKDOWN_DISABLE: 242 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 243 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE); 244 /* don't wait */ 245 ret = 0; 246 goto bail; 247 248 case QIB_IB_LINKARM: 249 if (ppd->lflags & QIBL_LINKARMED) { 250 ret = 0; 251 goto bail; 252 } 253 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) { 254 ret = -EINVAL; 255 goto bail; 256 } 257 /* 258 * Since the port can be ACTIVE when we ask for ARMED, 259 * clear QIBL_LINKV so we can wait for a transition. 260 * If the link isn't ARMED, then something else happened 261 * and there is no point waiting for ARMED. 262 */ 263 spin_lock_irqsave(&ppd->lflags_lock, flags); 264 ppd->lflags &= ~QIBL_LINKV; 265 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 266 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 267 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP); 268 lstate = QIBL_LINKV; 269 break; 270 271 case QIB_IB_LINKACTIVE: 272 if (ppd->lflags & QIBL_LINKACTIVE) { 273 ret = 0; 274 goto bail; 275 } 276 if (!(ppd->lflags & QIBL_LINKARMED)) { 277 ret = -EINVAL; 278 goto bail; 279 } 280 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, 281 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP); 282 lstate = QIBL_LINKACTIVE; 283 break; 284 285 default: 286 ret = -EINVAL; 287 goto bail; 288 } 289 ret = qib_wait_linkstate(ppd, lstate, 10); 290 291 bail: 292 return ret; 293 } 294 295 /* 296 * Get address of eager buffer from it's index (allocated in chunks, not 297 * contiguous). 298 */ 299 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) 300 { 301 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; 302 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1); 303 304 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); 305 } 306 307 /* 308 * Returns 1 if error was a CRC, else 0. 309 * Needed for some chip's synthesized error counters. 310 */ 311 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, 312 u32 ctxt, u32 eflags, u32 l, u32 etail, 313 __le32 *rhf_addr, struct qib_message_header *rhdr) 314 { 315 u32 ret = 0; 316 317 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) 318 ret = 1; 319 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) { 320 /* For TIDERR and RC QPs premptively schedule a NAK */ 321 struct ib_header *hdr = (struct ib_header *)rhdr; 322 struct ib_other_headers *ohdr = NULL; 323 struct qib_ibport *ibp = &ppd->ibport_data; 324 struct qib_devdata *dd = ppd->dd; 325 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 326 struct rvt_qp *qp = NULL; 327 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); 328 u16 lid = be16_to_cpu(hdr->lrh[1]); 329 int lnh = be16_to_cpu(hdr->lrh[0]) & 3; 330 u32 qp_num; 331 u32 opcode; 332 u32 psn; 333 int diff; 334 335 /* Sanity check packet */ 336 if (tlen < 24) 337 goto drop; 338 339 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { 340 lid &= ~((1 << ppd->lmc) - 1); 341 if (unlikely(lid != ppd->lid)) 342 goto drop; 343 } 344 345 /* Check for GRH */ 346 if (lnh == QIB_LRH_BTH) 347 ohdr = &hdr->u.oth; 348 else if (lnh == QIB_LRH_GRH) { 349 u32 vtf; 350 351 ohdr = &hdr->u.l.oth; 352 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 353 goto drop; 354 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 355 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 356 goto drop; 357 } else 358 goto drop; 359 360 /* Get opcode and PSN from packet */ 361 opcode = be32_to_cpu(ohdr->bth[0]); 362 opcode >>= 24; 363 psn = be32_to_cpu(ohdr->bth[2]); 364 365 /* Get the destination QP number. */ 366 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 367 if (qp_num != QIB_MULTICAST_QPN) { 368 int ruc_res; 369 370 rcu_read_lock(); 371 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 372 if (!qp) { 373 rcu_read_unlock(); 374 goto drop; 375 } 376 377 /* 378 * Handle only RC QPs - for other QP types drop error 379 * packet. 380 */ 381 spin_lock(&qp->r_lock); 382 383 /* Check for valid receive state. */ 384 if (!(ib_rvt_state_ops[qp->state] & 385 RVT_PROCESS_RECV_OK)) { 386 ibp->rvp.n_pkt_drops++; 387 goto unlock; 388 } 389 390 switch (qp->ibqp.qp_type) { 391 case IB_QPT_RC: 392 ruc_res = 393 qib_ruc_check_hdr( 394 ibp, hdr, 395 lnh == QIB_LRH_GRH, 396 qp, 397 be32_to_cpu(ohdr->bth[0])); 398 if (ruc_res) 399 goto unlock; 400 401 /* Only deal with RDMA Writes for now */ 402 if (opcode < 403 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { 404 diff = qib_cmp24(psn, qp->r_psn); 405 if (!qp->r_nak_state && diff >= 0) { 406 ibp->rvp.n_rc_seqnak++; 407 qp->r_nak_state = 408 IB_NAK_PSN_ERROR; 409 /* Use the expected PSN. */ 410 qp->r_ack_psn = qp->r_psn; 411 /* 412 * Wait to send the sequence 413 * NAK until all packets 414 * in the receive queue have 415 * been processed. 416 * Otherwise, we end up 417 * propagating congestion. 418 */ 419 if (list_empty(&qp->rspwait)) { 420 qp->r_flags |= 421 RVT_R_RSP_NAK; 422 rvt_get_qp(qp); 423 list_add_tail( 424 &qp->rspwait, 425 &rcd->qp_wait_list); 426 } 427 } /* Out of sequence NAK */ 428 } /* QP Request NAKs */ 429 break; 430 case IB_QPT_SMI: 431 case IB_QPT_GSI: 432 case IB_QPT_UD: 433 case IB_QPT_UC: 434 default: 435 /* For now don't handle any other QP types */ 436 break; 437 } 438 439 unlock: 440 spin_unlock(&qp->r_lock); 441 rcu_read_unlock(); 442 } /* Unicast QP */ 443 } /* Valid packet with TIDErr */ 444 445 drop: 446 return ret; 447 } 448 449 /* 450 * qib_kreceive - receive a packet 451 * @rcd: the qlogic_ib context 452 * @llic: gets count of good packets needed to clear lli, 453 * (used with chips that need need to track crcs for lli) 454 * 455 * called from interrupt handler for errors or receive interrupt 456 * Returns number of CRC error packets, needed by some chips for 457 * local link integrity tracking. crcs are adjusted down by following 458 * good packets, if any, and count of good packets is also tracked. 459 */ 460 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) 461 { 462 struct qib_devdata *dd = rcd->dd; 463 struct qib_pportdata *ppd = rcd->ppd; 464 __le32 *rhf_addr; 465 void *ebuf; 466 const u32 rsize = dd->rcvhdrentsize; /* words */ 467 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */ 468 u32 etail = -1, l, hdrqtail; 469 struct qib_message_header *hdr; 470 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; 471 int last; 472 u64 lval; 473 struct rvt_qp *qp, *nqp; 474 475 l = rcd->head; 476 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 477 if (dd->flags & QIB_NODMA_RTAIL) { 478 u32 seq = qib_hdrget_seq(rhf_addr); 479 480 if (seq != rcd->seq_cnt) 481 goto bail; 482 hdrqtail = 0; 483 } else { 484 hdrqtail = qib_get_rcvhdrtail(rcd); 485 if (l == hdrqtail) 486 goto bail; 487 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 488 } 489 490 for (last = 0, i = 1; !last; i += !last) { 491 hdr = dd->f_get_msgheader(dd, rhf_addr); 492 eflags = qib_hdrget_err_flags(rhf_addr); 493 etype = qib_hdrget_rcv_type(rhf_addr); 494 /* total length */ 495 tlen = qib_hdrget_length_in_bytes(rhf_addr); 496 ebuf = NULL; 497 if ((dd->flags & QIB_NODMA_RTAIL) ? 498 qib_hdrget_use_egr_buf(rhf_addr) : 499 (etype != RCVHQ_RCV_TYPE_EXPECTED)) { 500 etail = qib_hdrget_index(rhf_addr); 501 updegr = 1; 502 if (tlen > sizeof(*hdr) || 503 etype >= RCVHQ_RCV_TYPE_NON_KD) { 504 ebuf = qib_get_egrbuf(rcd, etail); 505 prefetch_range(ebuf, tlen - sizeof(*hdr)); 506 } 507 } 508 if (!eflags) { 509 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; 510 511 if (lrh_len != tlen) { 512 qib_stats.sps_lenerrs++; 513 goto move_along; 514 } 515 } 516 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags && 517 ebuf == NULL && 518 tlen > (dd->rcvhdrentsize - 2 + 1 - 519 qib_hdrget_offset(rhf_addr)) << 2) { 520 goto move_along; 521 } 522 523 /* 524 * Both tiderr and qibhdrerr are set for all plain IB 525 * packets; only qibhdrerr should be set. 526 */ 527 if (unlikely(eflags)) 528 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, 529 etail, rhf_addr, hdr); 530 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 531 qib_ib_rcv(rcd, hdr, ebuf, tlen); 532 if (crcs) 533 crcs--; 534 else if (llic && *llic) 535 --*llic; 536 } 537 move_along: 538 l += rsize; 539 if (l >= maxcnt) 540 l = 0; 541 if (i == QIB_MAX_PKT_RECV) 542 last = 1; 543 544 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 545 if (dd->flags & QIB_NODMA_RTAIL) { 546 u32 seq = qib_hdrget_seq(rhf_addr); 547 548 if (++rcd->seq_cnt > 13) 549 rcd->seq_cnt = 1; 550 if (seq != rcd->seq_cnt) 551 last = 1; 552 } else if (l == hdrqtail) 553 last = 1; 554 /* 555 * Update head regs etc., every 16 packets, if not last pkt, 556 * to help prevent rcvhdrq overflows, when many packets 557 * are processed and queue is nearly full. 558 * Don't request an interrupt for intermediate updates. 559 */ 560 lval = l; 561 if (!last && !(i & 0xf)) { 562 dd->f_update_usrhead(rcd, lval, updegr, etail, i); 563 updegr = 0; 564 } 565 } 566 567 rcd->head = l; 568 569 /* 570 * Iterate over all QPs waiting to respond. 571 * The list won't change since the IRQ is only run on one CPU. 572 */ 573 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 574 list_del_init(&qp->rspwait); 575 if (qp->r_flags & RVT_R_RSP_NAK) { 576 qp->r_flags &= ~RVT_R_RSP_NAK; 577 qib_send_rc_ack(qp); 578 } 579 if (qp->r_flags & RVT_R_RSP_SEND) { 580 unsigned long flags; 581 582 qp->r_flags &= ~RVT_R_RSP_SEND; 583 spin_lock_irqsave(&qp->s_lock, flags); 584 if (ib_rvt_state_ops[qp->state] & 585 RVT_PROCESS_OR_FLUSH_SEND) 586 qib_schedule_send(qp); 587 spin_unlock_irqrestore(&qp->s_lock, flags); 588 } 589 rvt_put_qp(qp); 590 } 591 592 bail: 593 /* Report number of packets consumed */ 594 if (npkts) 595 *npkts = i; 596 597 /* 598 * Always write head at end, and setup rcv interrupt, even 599 * if no packets were processed. 600 */ 601 lval = (u64)rcd->head | dd->rhdrhead_intr_off; 602 dd->f_update_usrhead(rcd, lval, updegr, etail, i); 603 return crcs; 604 } 605 606 /** 607 * qib_set_mtu - set the MTU 608 * @ppd: the perport data 609 * @arg: the new MTU 610 * 611 * We can handle "any" incoming size, the issue here is whether we 612 * need to restrict our outgoing size. For now, we don't do any 613 * sanity checking on this, and we don't deal with what happens to 614 * programs that are already running when the size changes. 615 * NOTE: changing the MTU will usually cause the IBC to go back to 616 * link INIT state... 617 */ 618 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg) 619 { 620 u32 piosize; 621 int ret, chk; 622 623 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && 624 arg != 4096) { 625 ret = -EINVAL; 626 goto bail; 627 } 628 chk = ib_mtu_enum_to_int(qib_ibmtu); 629 if (chk > 0 && arg > chk) { 630 ret = -EINVAL; 631 goto bail; 632 } 633 634 piosize = ppd->ibmaxlen; 635 ppd->ibmtu = arg; 636 637 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) { 638 /* Only if it's not the initial value (or reset to it) */ 639 if (piosize != ppd->init_ibmaxlen) { 640 if (arg > piosize && arg <= ppd->init_ibmaxlen) 641 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32); 642 ppd->ibmaxlen = piosize; 643 } 644 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) { 645 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32); 646 ppd->ibmaxlen = piosize; 647 } 648 649 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0); 650 651 ret = 0; 652 653 bail: 654 return ret; 655 } 656 657 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 658 { 659 struct qib_devdata *dd = ppd->dd; 660 661 ppd->lid = lid; 662 ppd->lmc = lmc; 663 664 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC, 665 lid | (~((1U << lmc) - 1)) << 16); 666 667 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n", 668 dd->unit, ppd->port, lid); 669 670 return 0; 671 } 672 673 /* 674 * Following deal with the "obviously simple" task of overriding the state 675 * of the LEDS, which normally indicate link physical and logical status. 676 * The complications arise in dealing with different hardware mappings 677 * and the board-dependent routine being called from interrupts. 678 * and then there's the requirement to _flash_ them. 679 */ 680 #define LED_OVER_FREQ_SHIFT 8 681 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) 682 /* Below is "non-zero" to force override, but both actual LEDs are off */ 683 #define LED_OVER_BOTH_OFF (8) 684 685 static void qib_run_led_override(unsigned long opaque) 686 { 687 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 688 struct qib_devdata *dd = ppd->dd; 689 int timeoff; 690 int ph_idx; 691 692 if (!(dd->flags & QIB_INITTED)) 693 return; 694 695 ph_idx = ppd->led_override_phase++ & 1; 696 ppd->led_override = ppd->led_override_vals[ph_idx]; 697 timeoff = ppd->led_override_timeoff; 698 699 dd->f_setextled(ppd, 1); 700 /* 701 * don't re-fire the timer if user asked for it to be off; we let 702 * it fire one more time after they turn it off to simplify 703 */ 704 if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) 705 mod_timer(&ppd->led_override_timer, jiffies + timeoff); 706 } 707 708 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val) 709 { 710 struct qib_devdata *dd = ppd->dd; 711 int timeoff, freq; 712 713 if (!(dd->flags & QIB_INITTED)) 714 return; 715 716 /* First check if we are blinking. If not, use 1HZ polling */ 717 timeoff = HZ; 718 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; 719 720 if (freq) { 721 /* For blink, set each phase from one nybble of val */ 722 ppd->led_override_vals[0] = val & 0xF; 723 ppd->led_override_vals[1] = (val >> 4) & 0xF; 724 timeoff = (HZ << 4)/freq; 725 } else { 726 /* Non-blink set both phases the same. */ 727 ppd->led_override_vals[0] = val & 0xF; 728 ppd->led_override_vals[1] = val & 0xF; 729 } 730 ppd->led_override_timeoff = timeoff; 731 732 /* 733 * If the timer has not already been started, do so. Use a "quick" 734 * timeout so the function will be called soon, to look at our request. 735 */ 736 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { 737 /* Need to start timer */ 738 init_timer(&ppd->led_override_timer); 739 ppd->led_override_timer.function = qib_run_led_override; 740 ppd->led_override_timer.data = (unsigned long) ppd; 741 ppd->led_override_timer.expires = jiffies + 1; 742 add_timer(&ppd->led_override_timer); 743 } else { 744 if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) 745 mod_timer(&ppd->led_override_timer, jiffies + 1); 746 atomic_dec(&ppd->led_override_timer_active); 747 } 748 } 749 750 /** 751 * qib_reset_device - reset the chip if possible 752 * @unit: the device to reset 753 * 754 * Whether or not reset is successful, we attempt to re-initialize the chip 755 * (that is, much like a driver unload/reload). We clear the INITTED flag 756 * so that the various entry points will fail until we reinitialize. For 757 * now, we only allow this if no user contexts are open that use chip resources 758 */ 759 int qib_reset_device(int unit) 760 { 761 int ret, i; 762 struct qib_devdata *dd = qib_lookup(unit); 763 struct qib_pportdata *ppd; 764 unsigned long flags; 765 int pidx; 766 767 if (!dd) { 768 ret = -ENODEV; 769 goto bail; 770 } 771 772 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); 773 774 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { 775 qib_devinfo(dd->pcidev, 776 "Invalid unit number %u or not initialized or not present\n", 777 unit); 778 ret = -ENXIO; 779 goto bail; 780 } 781 782 spin_lock_irqsave(&dd->uctxt_lock, flags); 783 if (dd->rcd) 784 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { 785 if (!dd->rcd[i] || !dd->rcd[i]->cnt) 786 continue; 787 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 788 ret = -EBUSY; 789 goto bail; 790 } 791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 792 793 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 794 ppd = dd->pport + pidx; 795 if (atomic_read(&ppd->led_override_timer_active)) { 796 /* Need to stop LED timer, _then_ shut off LEDs */ 797 del_timer_sync(&ppd->led_override_timer); 798 atomic_set(&ppd->led_override_timer_active, 0); 799 } 800 801 /* Shut off LEDs after we are sure timer is not running */ 802 ppd->led_override = LED_OVER_BOTH_OFF; 803 dd->f_setextled(ppd, 0); 804 if (dd->flags & QIB_HAS_SEND_DMA) 805 qib_teardown_sdma(ppd); 806 } 807 808 ret = dd->f_reset(dd); 809 if (ret == 1) 810 ret = qib_init(dd, 1); 811 else 812 ret = -EAGAIN; 813 if (ret) 814 qib_dev_err(dd, 815 "Reinitialize unit %u after reset failed with %d\n", 816 unit, ret); 817 else 818 qib_devinfo(dd->pcidev, 819 "Reinitialized unit %u after resetting\n", 820 unit); 821 822 bail: 823 return ret; 824 } 825