1 /* 2 * Copyright(c) 2015-2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/spinlock.h> 49 #include <linux/pci.h> 50 #include <linux/io.h> 51 #include <linux/delay.h> 52 #include <linux/netdevice.h> 53 #include <linux/vmalloc.h> 54 #include <linux/module.h> 55 #include <linux/prefetch.h> 56 #include <rdma/ib_verbs.h> 57 58 #include "hfi.h" 59 #include "trace.h" 60 #include "qp.h" 61 #include "sdma.h" 62 #include "debugfs.h" 63 #include "vnic.h" 64 #include "fault.h" 65 66 #undef pr_fmt 67 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 68 69 /* 70 * The size has to be longer than this string, so we can append 71 * board/chip information to it in the initialization code. 72 */ 73 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; 74 75 DEFINE_MUTEX(hfi1_mutex); /* general driver use */ 76 77 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 78 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); 79 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( 80 HFI1_DEFAULT_MAX_MTU)); 81 82 unsigned int hfi1_cu = 1; 83 module_param_named(cu, hfi1_cu, uint, S_IRUGO); 84 MODULE_PARM_DESC(cu, "Credit return units"); 85 86 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 87 static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 88 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 89 static const struct kernel_param_ops cap_ops = { 90 .set = hfi1_caps_set, 91 .get = hfi1_caps_get 92 }; 93 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); 94 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); 95 96 MODULE_LICENSE("Dual BSD/GPL"); 97 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); 98 99 /* 100 * MAX_PKT_RCV is the max # if packets processed per receive interrupt. 101 */ 102 #define MAX_PKT_RECV 64 103 /* 104 * MAX_PKT_THREAD_RCV is the max # of packets processed before 105 * the qp_wait_list queue is flushed. 106 */ 107 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) 108 #define EGR_HEAD_UPDATE_THRESHOLD 16 109 110 struct hfi1_ib_stats hfi1_stats; 111 112 static int hfi1_caps_set(const char *val, const struct kernel_param *kp) 113 { 114 int ret = 0; 115 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, 116 cap_mask = *cap_mask_ptr, value, diff, 117 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | 118 HFI1_CAP_WRITABLE_MASK); 119 120 ret = kstrtoul(val, 0, &value); 121 if (ret) { 122 pr_warn("Invalid module parameter value for 'cap_mask'\n"); 123 goto done; 124 } 125 /* Get the changed bits (except the locked bit) */ 126 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); 127 128 /* Remove any bits that are not allowed to change after driver load */ 129 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { 130 pr_warn("Ignoring non-writable capability bits %#lx\n", 131 diff & ~write_mask); 132 diff &= write_mask; 133 } 134 135 /* Mask off any reserved bits */ 136 diff &= ~HFI1_CAP_RESERVED_MASK; 137 /* Clear any previously set and changing bits */ 138 cap_mask &= ~diff; 139 /* Update the bits with the new capability */ 140 cap_mask |= (value & diff); 141 /* Check for any kernel/user restrictions */ 142 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ 143 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); 144 cap_mask &= ~diff; 145 /* Set the bitmask to the final set */ 146 *cap_mask_ptr = cap_mask; 147 done: 148 return ret; 149 } 150 151 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) 152 { 153 unsigned long cap_mask = *(unsigned long *)kp->arg; 154 155 cap_mask &= ~HFI1_CAP_LOCKED_SMASK; 156 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); 157 158 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); 159 } 160 161 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) 162 { 163 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); 164 struct hfi1_devdata *dd = container_of(ibdev, 165 struct hfi1_devdata, verbs_dev); 166 return dd->pcidev; 167 } 168 169 /* 170 * Return count of units with at least one port ACTIVE. 171 */ 172 int hfi1_count_active_units(void) 173 { 174 struct hfi1_devdata *dd; 175 struct hfi1_pportdata *ppd; 176 unsigned long index, flags; 177 int pidx, nunits_active = 0; 178 179 xa_lock_irqsave(&hfi1_dev_table, flags); 180 xa_for_each(&hfi1_dev_table, index, dd) { 181 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) 182 continue; 183 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 184 ppd = dd->pport + pidx; 185 if (ppd->lid && ppd->linkup) { 186 nunits_active++; 187 break; 188 } 189 } 190 } 191 xa_unlock_irqrestore(&hfi1_dev_table, flags); 192 return nunits_active; 193 } 194 195 /* 196 * Get address of eager buffer from it's index (allocated in chunks, not 197 * contiguous). 198 */ 199 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, 200 u8 *update) 201 { 202 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); 203 204 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; 205 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + 206 (offset * RCV_BUF_BLOCK_SIZE)); 207 } 208 209 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, 210 __le32 *rhf_addr) 211 { 212 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); 213 214 return (void *)(rhf_addr - rcd->rhf_offset + offset); 215 } 216 217 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, 218 __le32 *rhf_addr) 219 { 220 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); 221 } 222 223 static inline struct hfi1_16b_header 224 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, 225 __le32 *rhf_addr) 226 { 227 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); 228 } 229 230 /* 231 * Validate and encode the a given RcvArray Buffer size. 232 * The function will check whether the given size falls within 233 * allowed size ranges for the respective type and, optionally, 234 * return the proper encoding. 235 */ 236 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) 237 { 238 if (unlikely(!PAGE_ALIGNED(size))) 239 return 0; 240 if (unlikely(size < MIN_EAGER_BUFFER)) 241 return 0; 242 if (size > 243 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) 244 return 0; 245 if (encoded) 246 *encoded = ilog2(size / PAGE_SIZE) + 1; 247 return 1; 248 } 249 250 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, 251 struct hfi1_packet *packet) 252 { 253 struct ib_header *rhdr = packet->hdr; 254 u32 rte = rhf_rcv_type_err(packet->rhf); 255 u32 mlid_base; 256 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 257 struct hfi1_devdata *dd = ppd->dd; 258 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 259 struct rvt_dev_info *rdi = &verbs_dev->rdi; 260 261 if ((packet->rhf & RHF_DC_ERR) && 262 hfi1_dbg_fault_suppress_err(verbs_dev)) 263 return; 264 265 if (packet->rhf & RHF_ICRC_ERR) 266 return; 267 268 if (packet->etype == RHF_RCV_TYPE_BYPASS) { 269 goto drop; 270 } else { 271 u8 lnh = ib_get_lnh(rhdr); 272 273 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); 274 if (lnh == HFI1_LRH_BTH) { 275 packet->ohdr = &rhdr->u.oth; 276 } else if (lnh == HFI1_LRH_GRH) { 277 packet->ohdr = &rhdr->u.l.oth; 278 packet->grh = &rhdr->u.l.grh; 279 } else { 280 goto drop; 281 } 282 } 283 284 if (packet->rhf & RHF_TID_ERR) { 285 /* For TIDERR and RC QPs preemptively schedule a NAK */ 286 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 287 u32 dlid = ib_get_dlid(rhdr); 288 u32 qp_num; 289 290 /* Sanity check packet */ 291 if (tlen < 24) 292 goto drop; 293 294 /* Check for GRH */ 295 if (packet->grh) { 296 u32 vtf; 297 struct ib_grh *grh = packet->grh; 298 299 if (grh->next_hdr != IB_GRH_NEXT_HDR) 300 goto drop; 301 vtf = be32_to_cpu(grh->version_tclass_flow); 302 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 303 goto drop; 304 } 305 306 /* Get the destination QP number. */ 307 qp_num = ib_bth_get_qpn(packet->ohdr); 308 if (dlid < mlid_base) { 309 struct rvt_qp *qp; 310 unsigned long flags; 311 312 rcu_read_lock(); 313 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 314 if (!qp) { 315 rcu_read_unlock(); 316 goto drop; 317 } 318 319 /* 320 * Handle only RC QPs - for other QP types drop error 321 * packet. 322 */ 323 spin_lock_irqsave(&qp->r_lock, flags); 324 325 /* Check for valid receive state. */ 326 if (!(ib_rvt_state_ops[qp->state] & 327 RVT_PROCESS_RECV_OK)) { 328 ibp->rvp.n_pkt_drops++; 329 } 330 331 switch (qp->ibqp.qp_type) { 332 case IB_QPT_RC: 333 hfi1_rc_hdrerr(rcd, packet, qp); 334 break; 335 default: 336 /* For now don't handle any other QP types */ 337 break; 338 } 339 340 spin_unlock_irqrestore(&qp->r_lock, flags); 341 rcu_read_unlock(); 342 } /* Unicast QP */ 343 } /* Valid packet with TIDErr */ 344 345 /* handle "RcvTypeErr" flags */ 346 switch (rte) { 347 case RHF_RTE_ERROR_OP_CODE_ERR: 348 { 349 void *ebuf = NULL; 350 u8 opcode; 351 352 if (rhf_use_egr_bfr(packet->rhf)) 353 ebuf = packet->ebuf; 354 355 if (!ebuf) 356 goto drop; /* this should never happen */ 357 358 opcode = ib_bth_get_opcode(packet->ohdr); 359 if (opcode == IB_OPCODE_CNP) { 360 /* 361 * Only in pre-B0 h/w is the CNP_OPCODE handled 362 * via this code path. 363 */ 364 struct rvt_qp *qp = NULL; 365 u32 lqpn, rqpn; 366 u16 rlid; 367 u8 svc_type, sl, sc5; 368 369 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); 370 sl = ibp->sc_to_sl[sc5]; 371 372 lqpn = ib_bth_get_qpn(packet->ohdr); 373 rcu_read_lock(); 374 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); 375 if (!qp) { 376 rcu_read_unlock(); 377 goto drop; 378 } 379 380 switch (qp->ibqp.qp_type) { 381 case IB_QPT_UD: 382 rlid = 0; 383 rqpn = 0; 384 svc_type = IB_CC_SVCTYPE_UD; 385 break; 386 case IB_QPT_UC: 387 rlid = ib_get_slid(rhdr); 388 rqpn = qp->remote_qpn; 389 svc_type = IB_CC_SVCTYPE_UC; 390 break; 391 default: 392 rcu_read_unlock(); 393 goto drop; 394 } 395 396 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 397 rcu_read_unlock(); 398 } 399 400 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; 401 break; 402 } 403 default: 404 break; 405 } 406 407 drop: 408 return; 409 } 410 411 static inline void init_packet(struct hfi1_ctxtdata *rcd, 412 struct hfi1_packet *packet) 413 { 414 packet->rsize = get_hdrqentsize(rcd); /* words */ 415 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ 416 packet->rcd = rcd; 417 packet->updegr = 0; 418 packet->etail = -1; 419 packet->rhf_addr = get_rhf_addr(rcd); 420 packet->rhf = rhf_to_cpu(packet->rhf_addr); 421 packet->rhqoff = hfi1_rcd_head(rcd); 422 packet->numpkt = 0; 423 } 424 425 /* We support only two types - 9B and 16B for now */ 426 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { 427 [HFI1_PKT_TYPE_9B] = &return_cnp, 428 [HFI1_PKT_TYPE_16B] = &return_cnp_16B 429 }; 430 431 /** 432 * hfi1_process_ecn_slowpath - Process FECN or BECN bits 433 * @qp: The packet's destination QP 434 * @pkt: The packet itself. 435 * @prescan: Is the caller the RXQ prescan 436 * 437 * Process the packet's FECN or BECN bits. By now, the packet 438 * has already been evaluated whether processing of those bit should 439 * be done. 440 * The significance of the @prescan argument is that if the caller 441 * is the RXQ prescan, a CNP will be send out instead of waiting for the 442 * normal packet processing to send an ACK with BECN set (or a CNP). 443 */ 444 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 445 bool prescan) 446 { 447 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 448 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 449 struct ib_other_headers *ohdr = pkt->ohdr; 450 struct ib_grh *grh = pkt->grh; 451 u32 rqpn = 0; 452 u16 pkey; 453 u32 rlid, slid, dlid = 0; 454 u8 hdr_type, sc, svc_type, opcode; 455 bool is_mcast = false, ignore_fecn = false, do_cnp = false, 456 fecn, becn; 457 458 /* can be called from prescan */ 459 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 460 pkey = hfi1_16B_get_pkey(pkt->hdr); 461 sc = hfi1_16B_get_sc(pkt->hdr); 462 dlid = hfi1_16B_get_dlid(pkt->hdr); 463 slid = hfi1_16B_get_slid(pkt->hdr); 464 is_mcast = hfi1_is_16B_mcast(dlid); 465 opcode = ib_bth_get_opcode(ohdr); 466 hdr_type = HFI1_PKT_TYPE_16B; 467 fecn = hfi1_16B_get_fecn(pkt->hdr); 468 becn = hfi1_16B_get_becn(pkt->hdr); 469 } else { 470 pkey = ib_bth_get_pkey(ohdr); 471 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); 472 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : 473 ppd->lid; 474 slid = ib_get_slid(pkt->hdr); 475 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && 476 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); 477 opcode = ib_bth_get_opcode(ohdr); 478 hdr_type = HFI1_PKT_TYPE_9B; 479 fecn = ib_bth_get_fecn(ohdr); 480 becn = ib_bth_get_becn(ohdr); 481 } 482 483 switch (qp->ibqp.qp_type) { 484 case IB_QPT_UD: 485 rlid = slid; 486 rqpn = ib_get_sqpn(pkt->ohdr); 487 svc_type = IB_CC_SVCTYPE_UD; 488 break; 489 case IB_QPT_SMI: 490 case IB_QPT_GSI: 491 rlid = slid; 492 rqpn = ib_get_sqpn(pkt->ohdr); 493 svc_type = IB_CC_SVCTYPE_UD; 494 break; 495 case IB_QPT_UC: 496 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 497 rqpn = qp->remote_qpn; 498 svc_type = IB_CC_SVCTYPE_UC; 499 break; 500 case IB_QPT_RC: 501 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 502 rqpn = qp->remote_qpn; 503 svc_type = IB_CC_SVCTYPE_RC; 504 break; 505 default: 506 return false; 507 } 508 509 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || 510 (opcode == IB_OPCODE_RC_ACKNOWLEDGE); 511 /* 512 * ACKNOWLEDGE packets do not get a CNP but this will be 513 * guarded by ignore_fecn above. 514 */ 515 do_cnp = prescan || 516 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && 517 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || 518 opcode == TID_OP(READ_RESP) || 519 opcode == TID_OP(ACK); 520 521 /* Call appropriate CNP handler */ 522 if (!ignore_fecn && do_cnp && fecn) 523 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, 524 dlid, rlid, sc, grh); 525 526 if (becn) { 527 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 528 u8 sl = ibp->sc_to_sl[sc]; 529 530 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 531 } 532 return !ignore_fecn && fecn; 533 } 534 535 struct ps_mdata { 536 struct hfi1_ctxtdata *rcd; 537 u32 rsize; 538 u32 maxcnt; 539 u32 ps_head; 540 u32 ps_tail; 541 u32 ps_seq; 542 }; 543 544 static inline void init_ps_mdata(struct ps_mdata *mdata, 545 struct hfi1_packet *packet) 546 { 547 struct hfi1_ctxtdata *rcd = packet->rcd; 548 549 mdata->rcd = rcd; 550 mdata->rsize = packet->rsize; 551 mdata->maxcnt = packet->maxcnt; 552 mdata->ps_head = packet->rhqoff; 553 554 if (get_dma_rtail_setting(rcd)) { 555 mdata->ps_tail = get_rcvhdrtail(rcd); 556 if (rcd->ctxt == HFI1_CTRL_CTXT) 557 mdata->ps_seq = hfi1_seq_cnt(rcd); 558 else 559 mdata->ps_seq = 0; /* not used with DMA_RTAIL */ 560 } else { 561 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ 562 mdata->ps_seq = hfi1_seq_cnt(rcd); 563 } 564 } 565 566 static inline int ps_done(struct ps_mdata *mdata, u64 rhf, 567 struct hfi1_ctxtdata *rcd) 568 { 569 if (get_dma_rtail_setting(rcd)) 570 return mdata->ps_head == mdata->ps_tail; 571 return mdata->ps_seq != rhf_rcv_seq(rhf); 572 } 573 574 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, 575 struct hfi1_ctxtdata *rcd) 576 { 577 /* 578 * Control context can potentially receive an invalid rhf. 579 * Drop such packets. 580 */ 581 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) 582 return mdata->ps_seq != rhf_rcv_seq(rhf); 583 584 return 0; 585 } 586 587 static inline void update_ps_mdata(struct ps_mdata *mdata, 588 struct hfi1_ctxtdata *rcd) 589 { 590 mdata->ps_head += mdata->rsize; 591 if (mdata->ps_head >= mdata->maxcnt) 592 mdata->ps_head = 0; 593 594 /* Control context must do seq counting */ 595 if (!get_dma_rtail_setting(rcd) || 596 rcd->ctxt == HFI1_CTRL_CTXT) 597 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); 598 } 599 600 /* 601 * prescan_rxq - search through the receive queue looking for packets 602 * containing Excplicit Congestion Notifications (FECNs, or BECNs). 603 * When an ECN is found, process the Congestion Notification, and toggle 604 * it off. 605 * This is declared as a macro to allow quick checking of the port to avoid 606 * the overhead of a function call if not enabled. 607 */ 608 #define prescan_rxq(rcd, packet) \ 609 do { \ 610 if (rcd->ppd->cc_prescan) \ 611 __prescan_rxq(packet); \ 612 } while (0) 613 static void __prescan_rxq(struct hfi1_packet *packet) 614 { 615 struct hfi1_ctxtdata *rcd = packet->rcd; 616 struct ps_mdata mdata; 617 618 init_ps_mdata(&mdata, packet); 619 620 while (1) { 621 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 622 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 623 packet->rcd->rhf_offset; 624 struct rvt_qp *qp; 625 struct ib_header *hdr; 626 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; 627 u64 rhf = rhf_to_cpu(rhf_addr); 628 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 629 u8 lnh; 630 631 if (ps_done(&mdata, rhf, rcd)) 632 break; 633 634 if (ps_skip(&mdata, rhf, rcd)) 635 goto next; 636 637 if (etype != RHF_RCV_TYPE_IB) 638 goto next; 639 640 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); 641 hdr = packet->hdr; 642 lnh = ib_get_lnh(hdr); 643 644 if (lnh == HFI1_LRH_BTH) { 645 packet->ohdr = &hdr->u.oth; 646 packet->grh = NULL; 647 } else if (lnh == HFI1_LRH_GRH) { 648 packet->ohdr = &hdr->u.l.oth; 649 packet->grh = &hdr->u.l.grh; 650 } else { 651 goto next; /* just in case */ 652 } 653 654 if (!hfi1_may_ecn(packet)) 655 goto next; 656 657 bth1 = be32_to_cpu(packet->ohdr->bth[1]); 658 qpn = bth1 & RVT_QPN_MASK; 659 rcu_read_lock(); 660 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); 661 662 if (!qp) { 663 rcu_read_unlock(); 664 goto next; 665 } 666 667 hfi1_process_ecn_slowpath(qp, packet, true); 668 rcu_read_unlock(); 669 670 /* turn off BECN, FECN */ 671 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); 672 packet->ohdr->bth[1] = cpu_to_be32(bth1); 673 next: 674 update_ps_mdata(&mdata, rcd); 675 } 676 } 677 678 static void process_rcv_qp_work(struct hfi1_packet *packet) 679 { 680 struct rvt_qp *qp, *nqp; 681 struct hfi1_ctxtdata *rcd = packet->rcd; 682 683 /* 684 * Iterate over all QPs waiting to respond. 685 * The list won't change since the IRQ is only run on one CPU. 686 */ 687 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 688 list_del_init(&qp->rspwait); 689 if (qp->r_flags & RVT_R_RSP_NAK) { 690 qp->r_flags &= ~RVT_R_RSP_NAK; 691 packet->qp = qp; 692 hfi1_send_rc_ack(packet, 0); 693 } 694 if (qp->r_flags & RVT_R_RSP_SEND) { 695 unsigned long flags; 696 697 qp->r_flags &= ~RVT_R_RSP_SEND; 698 spin_lock_irqsave(&qp->s_lock, flags); 699 if (ib_rvt_state_ops[qp->state] & 700 RVT_PROCESS_OR_FLUSH_SEND) 701 hfi1_schedule_send(qp); 702 spin_unlock_irqrestore(&qp->s_lock, flags); 703 } 704 rvt_put_qp(qp); 705 } 706 } 707 708 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) 709 { 710 if (thread) { 711 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 712 /* allow defered processing */ 713 process_rcv_qp_work(packet); 714 cond_resched(); 715 return RCV_PKT_OK; 716 } else { 717 this_cpu_inc(*packet->rcd->dd->rcv_limit); 718 return RCV_PKT_LIMIT; 719 } 720 } 721 722 static inline int check_max_packet(struct hfi1_packet *packet, int thread) 723 { 724 int ret = RCV_PKT_OK; 725 726 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) 727 ret = max_packet_exceeded(packet, thread); 728 return ret; 729 } 730 731 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) 732 { 733 int ret; 734 735 /* Set up for the next packet */ 736 packet->rhqoff += packet->rsize; 737 if (packet->rhqoff >= packet->maxcnt) 738 packet->rhqoff = 0; 739 740 packet->numpkt++; 741 ret = check_max_packet(packet, thread); 742 743 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 744 packet->rcd->rhf_offset; 745 packet->rhf = rhf_to_cpu(packet->rhf_addr); 746 747 return ret; 748 } 749 750 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) 751 { 752 int ret; 753 754 packet->etype = rhf_rcv_type(packet->rhf); 755 756 /* total length */ 757 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 758 /* retrieve eager buffer details */ 759 packet->ebuf = NULL; 760 if (rhf_use_egr_bfr(packet->rhf)) { 761 packet->etail = rhf_egr_index(packet->rhf); 762 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 763 &packet->updegr); 764 /* 765 * Prefetch the contents of the eager buffer. It is 766 * OK to send a negative length to prefetch_range(). 767 * The +2 is the size of the RHF. 768 */ 769 prefetch_range(packet->ebuf, 770 packet->tlen - ((get_hdrqentsize(packet->rcd) - 771 (rhf_hdrq_offset(packet->rhf) 772 + 2)) * 4)); 773 } 774 775 /* 776 * Call a type specific handler for the packet. We 777 * should be able to trust that etype won't be beyond 778 * the range of valid indexes. If so something is really 779 * wrong and we can probably just let things come 780 * crashing down. There is no need to eat another 781 * comparison in this performance critical code. 782 */ 783 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 784 packet->numpkt++; 785 786 /* Set up for the next packet */ 787 packet->rhqoff += packet->rsize; 788 if (packet->rhqoff >= packet->maxcnt) 789 packet->rhqoff = 0; 790 791 ret = check_max_packet(packet, thread); 792 793 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 794 packet->rcd->rhf_offset; 795 packet->rhf = rhf_to_cpu(packet->rhf_addr); 796 797 return ret; 798 } 799 800 static inline void process_rcv_update(int last, struct hfi1_packet *packet) 801 { 802 /* 803 * Update head regs etc., every 16 packets, if not last pkt, 804 * to help prevent rcvhdrq overflows, when many packets 805 * are processed and queue is nearly full. 806 * Don't request an interrupt for intermediate updates. 807 */ 808 if (!last && !(packet->numpkt & 0xf)) { 809 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, 810 packet->etail, 0, 0); 811 packet->updegr = 0; 812 } 813 packet->grh = NULL; 814 } 815 816 static inline void finish_packet(struct hfi1_packet *packet) 817 { 818 /* 819 * Nothing we need to free for the packet. 820 * 821 * The only thing we need to do is a final update and call for an 822 * interrupt 823 */ 824 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, 825 packet->etail, rcv_intr_dynamic, packet->numpkt); 826 } 827 828 /* 829 * Handle receive interrupts when using the no dma rtail option. 830 */ 831 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) 832 { 833 int last = RCV_PKT_OK; 834 struct hfi1_packet packet; 835 836 init_packet(rcd, &packet); 837 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 838 last = RCV_PKT_DONE; 839 goto bail; 840 } 841 842 prescan_rxq(rcd, &packet); 843 844 while (last == RCV_PKT_OK) { 845 last = process_rcv_packet(&packet, thread); 846 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 847 last = RCV_PKT_DONE; 848 process_rcv_update(last, &packet); 849 } 850 process_rcv_qp_work(&packet); 851 hfi1_set_rcd_head(rcd, packet.rhqoff); 852 bail: 853 finish_packet(&packet); 854 return last; 855 } 856 857 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) 858 { 859 u32 hdrqtail; 860 int last = RCV_PKT_OK; 861 struct hfi1_packet packet; 862 863 init_packet(rcd, &packet); 864 hdrqtail = get_rcvhdrtail(rcd); 865 if (packet.rhqoff == hdrqtail) { 866 last = RCV_PKT_DONE; 867 goto bail; 868 } 869 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 870 871 prescan_rxq(rcd, &packet); 872 873 while (last == RCV_PKT_OK) { 874 last = process_rcv_packet(&packet, thread); 875 if (packet.rhqoff == hdrqtail) 876 last = RCV_PKT_DONE; 877 process_rcv_update(last, &packet); 878 } 879 process_rcv_qp_work(&packet); 880 hfi1_set_rcd_head(rcd, packet.rhqoff); 881 bail: 882 finish_packet(&packet); 883 return last; 884 } 885 886 static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) 887 { 888 struct hfi1_ctxtdata *rcd; 889 u16 i; 890 891 /* 892 * For dynamically allocated kernel contexts (like vnic) switch 893 * interrupt handler only for that context. Otherwise, switch 894 * interrupt handler for all statically allocated kernel contexts. 895 */ 896 if (ctxt >= dd->first_dyn_alloc_ctxt) { 897 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); 898 if (rcd) { 899 rcd->do_interrupt = 900 &handle_receive_interrupt_nodma_rtail; 901 hfi1_rcd_put(rcd); 902 } 903 return; 904 } 905 906 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { 907 rcd = hfi1_rcd_get_by_index(dd, i); 908 if (rcd) 909 rcd->do_interrupt = 910 &handle_receive_interrupt_nodma_rtail; 911 hfi1_rcd_put(rcd); 912 } 913 } 914 915 static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt) 916 { 917 struct hfi1_ctxtdata *rcd; 918 u16 i; 919 920 /* 921 * For dynamically allocated kernel contexts (like vnic) switch 922 * interrupt handler only for that context. Otherwise, switch 923 * interrupt handler for all statically allocated kernel contexts. 924 */ 925 if (ctxt >= dd->first_dyn_alloc_ctxt) { 926 rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); 927 if (rcd) { 928 rcd->do_interrupt = 929 &handle_receive_interrupt_dma_rtail; 930 hfi1_rcd_put(rcd); 931 } 932 return; 933 } 934 935 for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { 936 rcd = hfi1_rcd_get_by_index(dd, i); 937 if (rcd) 938 rcd->do_interrupt = 939 &handle_receive_interrupt_dma_rtail; 940 hfi1_rcd_put(rcd); 941 } 942 } 943 944 void set_all_slowpath(struct hfi1_devdata *dd) 945 { 946 struct hfi1_ctxtdata *rcd; 947 u16 i; 948 949 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ 950 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 951 rcd = hfi1_rcd_get_by_index(dd, i); 952 if (!rcd) 953 continue; 954 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 955 rcd->do_interrupt = &handle_receive_interrupt; 956 957 hfi1_rcd_put(rcd); 958 } 959 } 960 961 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, 962 struct hfi1_packet *packet, 963 struct hfi1_devdata *dd) 964 { 965 struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; 966 u8 etype = rhf_rcv_type(packet->rhf); 967 u8 sc = SC15_PACKET; 968 969 if (etype == RHF_RCV_TYPE_IB) { 970 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, 971 packet->rhf_addr); 972 sc = hfi1_9B_get_sc5(hdr, packet->rhf); 973 } else if (etype == RHF_RCV_TYPE_BYPASS) { 974 struct hfi1_16b_header *hdr = hfi1_get_16B_header( 975 packet->rcd, 976 packet->rhf_addr); 977 sc = hfi1_16B_get_sc(hdr); 978 } 979 if (sc != SC15_PACKET) { 980 int hwstate = driver_lstate(rcd->ppd); 981 982 if (hwstate != IB_PORT_ACTIVE) { 983 dd_dev_info(dd, 984 "Unexpected link state %s\n", 985 opa_lstate_name(hwstate)); 986 return 0; 987 } 988 989 queue_work(rcd->ppd->link_wq, lsaw); 990 return 1; 991 } 992 return 0; 993 } 994 995 /* 996 * handle_receive_interrupt - receive a packet 997 * @rcd: the context 998 * 999 * Called from interrupt handler for errors or receive interrupt. 1000 * This is the slow path interrupt handler. 1001 */ 1002 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) 1003 { 1004 struct hfi1_devdata *dd = rcd->dd; 1005 u32 hdrqtail; 1006 int needset, last = RCV_PKT_OK; 1007 struct hfi1_packet packet; 1008 int skip_pkt = 0; 1009 1010 /* Control context will always use the slow path interrupt handler */ 1011 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; 1012 1013 init_packet(rcd, &packet); 1014 1015 if (!get_dma_rtail_setting(rcd)) { 1016 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 1017 last = RCV_PKT_DONE; 1018 goto bail; 1019 } 1020 hdrqtail = 0; 1021 } else { 1022 hdrqtail = get_rcvhdrtail(rcd); 1023 if (packet.rhqoff == hdrqtail) { 1024 last = RCV_PKT_DONE; 1025 goto bail; 1026 } 1027 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 1028 1029 /* 1030 * Control context can potentially receive an invalid 1031 * rhf. Drop such packets. 1032 */ 1033 if (rcd->ctxt == HFI1_CTRL_CTXT) 1034 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1035 skip_pkt = 1; 1036 } 1037 1038 prescan_rxq(rcd, &packet); 1039 1040 while (last == RCV_PKT_OK) { 1041 if (unlikely(dd->do_drop && 1042 atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == 1043 DROP_PACKET_ON)) { 1044 dd->do_drop = 0; 1045 1046 /* On to the next packet */ 1047 packet.rhqoff += packet.rsize; 1048 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1049 packet.rhqoff + 1050 rcd->rhf_offset; 1051 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1052 1053 } else if (skip_pkt) { 1054 last = skip_rcv_packet(&packet, thread); 1055 skip_pkt = 0; 1056 } else { 1057 /* Auto activate link on non-SC15 packet receive */ 1058 if (unlikely(rcd->ppd->host_link_state == 1059 HLS_UP_ARMED) && 1060 set_armed_to_active(rcd, &packet, dd)) 1061 goto bail; 1062 last = process_rcv_packet(&packet, thread); 1063 } 1064 1065 if (!get_dma_rtail_setting(rcd)) { 1066 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1067 last = RCV_PKT_DONE; 1068 if (needset) { 1069 dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n"); 1070 set_nodma_rtail(dd, rcd->ctxt); 1071 needset = 0; 1072 } 1073 } else { 1074 if (packet.rhqoff == hdrqtail) 1075 last = RCV_PKT_DONE; 1076 /* 1077 * Control context can potentially receive an invalid 1078 * rhf. Drop such packets. 1079 */ 1080 if (rcd->ctxt == HFI1_CTRL_CTXT) { 1081 bool lseq; 1082 1083 lseq = hfi1_seq_incr(rcd, 1084 rhf_rcv_seq(packet.rhf)); 1085 if (!last && lseq) 1086 skip_pkt = 1; 1087 } 1088 1089 if (needset) { 1090 dd_dev_info(dd, 1091 "Switching to DMA_RTAIL\n"); 1092 set_dma_rtail(dd, rcd->ctxt); 1093 needset = 0; 1094 } 1095 } 1096 1097 process_rcv_update(last, &packet); 1098 } 1099 1100 process_rcv_qp_work(&packet); 1101 hfi1_set_rcd_head(rcd, packet.rhqoff); 1102 1103 bail: 1104 /* 1105 * Always write head at end, and setup rcv interrupt, even 1106 * if no packets were processed. 1107 */ 1108 finish_packet(&packet); 1109 return last; 1110 } 1111 1112 /* 1113 * We may discover in the interrupt that the hardware link state has 1114 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), 1115 * and we need to update the driver's notion of the link state. We cannot 1116 * run set_link_state from interrupt context, so we queue this function on 1117 * a workqueue. 1118 * 1119 * We delay the regular interrupt processing until after the state changes 1120 * so that the link will be in the correct state by the time any application 1121 * we wake up attempts to send a reply to any message it received. 1122 * (Subsequent receive interrupts may possibly force the wakeup before we 1123 * update the link state.) 1124 * 1125 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes 1126 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, 1127 * so we're safe from use-after-free of the rcd. 1128 */ 1129 void receive_interrupt_work(struct work_struct *work) 1130 { 1131 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 1132 linkstate_active_work); 1133 struct hfi1_devdata *dd = ppd->dd; 1134 struct hfi1_ctxtdata *rcd; 1135 u16 i; 1136 1137 /* Received non-SC15 packet implies neighbor_normal */ 1138 ppd->neighbor_normal = 1; 1139 set_link_state(ppd, HLS_UP_ACTIVE); 1140 1141 /* 1142 * Interrupt all statically allocated kernel contexts that could 1143 * have had an interrupt during auto activation. 1144 */ 1145 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { 1146 rcd = hfi1_rcd_get_by_index(dd, i); 1147 if (rcd) 1148 force_recv_intr(rcd); 1149 hfi1_rcd_put(rcd); 1150 } 1151 } 1152 1153 /* 1154 * Convert a given MTU size to the on-wire MAD packet enumeration. 1155 * Return -1 if the size is invalid. 1156 */ 1157 int mtu_to_enum(u32 mtu, int default_if_bad) 1158 { 1159 switch (mtu) { 1160 case 0: return OPA_MTU_0; 1161 case 256: return OPA_MTU_256; 1162 case 512: return OPA_MTU_512; 1163 case 1024: return OPA_MTU_1024; 1164 case 2048: return OPA_MTU_2048; 1165 case 4096: return OPA_MTU_4096; 1166 case 8192: return OPA_MTU_8192; 1167 case 10240: return OPA_MTU_10240; 1168 } 1169 return default_if_bad; 1170 } 1171 1172 u16 enum_to_mtu(int mtu) 1173 { 1174 switch (mtu) { 1175 case OPA_MTU_0: return 0; 1176 case OPA_MTU_256: return 256; 1177 case OPA_MTU_512: return 512; 1178 case OPA_MTU_1024: return 1024; 1179 case OPA_MTU_2048: return 2048; 1180 case OPA_MTU_4096: return 4096; 1181 case OPA_MTU_8192: return 8192; 1182 case OPA_MTU_10240: return 10240; 1183 default: return 0xffff; 1184 } 1185 } 1186 1187 /* 1188 * set_mtu - set the MTU 1189 * @ppd: the per port data 1190 * 1191 * We can handle "any" incoming size, the issue here is whether we 1192 * need to restrict our outgoing size. We do not deal with what happens 1193 * to programs that are already running when the size changes. 1194 */ 1195 int set_mtu(struct hfi1_pportdata *ppd) 1196 { 1197 struct hfi1_devdata *dd = ppd->dd; 1198 int i, drain, ret = 0, is_up = 0; 1199 1200 ppd->ibmtu = 0; 1201 for (i = 0; i < ppd->vls_supported; i++) 1202 if (ppd->ibmtu < dd->vld[i].mtu) 1203 ppd->ibmtu = dd->vld[i].mtu; 1204 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); 1205 1206 mutex_lock(&ppd->hls_lock); 1207 if (ppd->host_link_state == HLS_UP_INIT || 1208 ppd->host_link_state == HLS_UP_ARMED || 1209 ppd->host_link_state == HLS_UP_ACTIVE) 1210 is_up = 1; 1211 1212 drain = !is_ax(dd) && is_up; 1213 1214 if (drain) 1215 /* 1216 * MTU is specified per-VL. To ensure that no packet gets 1217 * stuck (due, e.g., to the MTU for the packet's VL being 1218 * reduced), empty the per-VL FIFOs before adjusting MTU. 1219 */ 1220 ret = stop_drain_data_vls(dd); 1221 1222 if (ret) { 1223 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", 1224 __func__); 1225 goto err; 1226 } 1227 1228 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); 1229 1230 if (drain) 1231 open_fill_data_vls(dd); /* reopen all VLs */ 1232 1233 err: 1234 mutex_unlock(&ppd->hls_lock); 1235 1236 return ret; 1237 } 1238 1239 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) 1240 { 1241 struct hfi1_devdata *dd = ppd->dd; 1242 1243 ppd->lid = lid; 1244 ppd->lmc = lmc; 1245 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1246 1247 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); 1248 1249 return 0; 1250 } 1251 1252 void shutdown_led_override(struct hfi1_pportdata *ppd) 1253 { 1254 struct hfi1_devdata *dd = ppd->dd; 1255 1256 /* 1257 * This pairs with the memory barrier in hfi1_start_led_override to 1258 * ensure that we read the correct state of LED beaconing represented 1259 * by led_override_timer_active 1260 */ 1261 smp_rmb(); 1262 if (atomic_read(&ppd->led_override_timer_active)) { 1263 del_timer_sync(&ppd->led_override_timer); 1264 atomic_set(&ppd->led_override_timer_active, 0); 1265 /* Ensure the atomic_set is visible to all CPUs */ 1266 smp_wmb(); 1267 } 1268 1269 /* Hand control of the LED to the DC for normal operation */ 1270 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 1271 } 1272 1273 static void run_led_override(struct timer_list *t) 1274 { 1275 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer); 1276 struct hfi1_devdata *dd = ppd->dd; 1277 unsigned long timeout; 1278 int phase_idx; 1279 1280 if (!(dd->flags & HFI1_INITTED)) 1281 return; 1282 1283 phase_idx = ppd->led_override_phase & 1; 1284 1285 setextled(dd, phase_idx); 1286 1287 timeout = ppd->led_override_vals[phase_idx]; 1288 1289 /* Set up for next phase */ 1290 ppd->led_override_phase = !ppd->led_override_phase; 1291 1292 mod_timer(&ppd->led_override_timer, jiffies + timeout); 1293 } 1294 1295 /* 1296 * To have the LED blink in a particular pattern, provide timeon and timeoff 1297 * in milliseconds. 1298 * To turn off custom blinking and return to normal operation, use 1299 * shutdown_led_override() 1300 */ 1301 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1302 unsigned int timeoff) 1303 { 1304 if (!(ppd->dd->flags & HFI1_INITTED)) 1305 return; 1306 1307 /* Convert to jiffies for direct use in timer */ 1308 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); 1309 ppd->led_override_vals[1] = msecs_to_jiffies(timeon); 1310 1311 /* Arbitrarily start from LED on phase */ 1312 ppd->led_override_phase = 1; 1313 1314 /* 1315 * If the timer has not already been started, do so. Use a "quick" 1316 * timeout so the handler will be called soon to look at our request. 1317 */ 1318 if (!timer_pending(&ppd->led_override_timer)) { 1319 timer_setup(&ppd->led_override_timer, run_led_override, 0); 1320 ppd->led_override_timer.expires = jiffies + 1; 1321 add_timer(&ppd->led_override_timer); 1322 atomic_set(&ppd->led_override_timer_active, 1); 1323 /* Ensure the atomic_set is visible to all CPUs */ 1324 smp_wmb(); 1325 } 1326 } 1327 1328 /** 1329 * hfi1_reset_device - reset the chip if possible 1330 * @unit: the device to reset 1331 * 1332 * Whether or not reset is successful, we attempt to re-initialize the chip 1333 * (that is, much like a driver unload/reload). We clear the INITTED flag 1334 * so that the various entry points will fail until we reinitialize. For 1335 * now, we only allow this if no user contexts are open that use chip resources 1336 */ 1337 int hfi1_reset_device(int unit) 1338 { 1339 int ret; 1340 struct hfi1_devdata *dd = hfi1_lookup(unit); 1341 struct hfi1_pportdata *ppd; 1342 int pidx; 1343 1344 if (!dd) { 1345 ret = -ENODEV; 1346 goto bail; 1347 } 1348 1349 dd_dev_info(dd, "Reset on unit %u requested\n", unit); 1350 1351 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { 1352 dd_dev_info(dd, 1353 "Invalid unit number %u or not initialized or not present\n", 1354 unit); 1355 ret = -ENXIO; 1356 goto bail; 1357 } 1358 1359 /* If there are any user/vnic contexts, we cannot reset */ 1360 mutex_lock(&hfi1_mutex); 1361 if (dd->rcd) 1362 if (hfi1_stats.sps_ctxts) { 1363 mutex_unlock(&hfi1_mutex); 1364 ret = -EBUSY; 1365 goto bail; 1366 } 1367 mutex_unlock(&hfi1_mutex); 1368 1369 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1370 ppd = dd->pport + pidx; 1371 1372 shutdown_led_override(ppd); 1373 } 1374 if (dd->flags & HFI1_HAS_SEND_DMA) 1375 sdma_exit(dd); 1376 1377 hfi1_reset_cpu_counters(dd); 1378 1379 ret = hfi1_init(dd, 1); 1380 1381 if (ret) 1382 dd_dev_err(dd, 1383 "Reinitialize unit %u after reset failed with %d\n", 1384 unit, ret); 1385 else 1386 dd_dev_info(dd, "Reinitialized unit %u after resetting\n", 1387 unit); 1388 1389 bail: 1390 return ret; 1391 } 1392 1393 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) 1394 { 1395 packet->hdr = (struct hfi1_ib_message_header *) 1396 hfi1_get_msgheader(packet->rcd, 1397 packet->rhf_addr); 1398 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; 1399 } 1400 1401 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) 1402 { 1403 struct hfi1_pportdata *ppd = packet->rcd->ppd; 1404 1405 /* slid and dlid cannot be 0 */ 1406 if ((!packet->slid) || (!packet->dlid)) 1407 return -EINVAL; 1408 1409 /* Compare port lid with incoming packet dlid */ 1410 if ((!(hfi1_is_16B_mcast(packet->dlid))) && 1411 (packet->dlid != 1412 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { 1413 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) 1414 return -EINVAL; 1415 } 1416 1417 /* No multicast packets with SC15 */ 1418 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) 1419 return -EINVAL; 1420 1421 /* Packets with permissive DLID always on SC15 */ 1422 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 1423 16B)) && 1424 (packet->sc != 0xF)) 1425 return -EINVAL; 1426 1427 return 0; 1428 } 1429 1430 static int hfi1_setup_9B_packet(struct hfi1_packet *packet) 1431 { 1432 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1433 struct ib_header *hdr; 1434 u8 lnh; 1435 1436 hfi1_setup_ib_header(packet); 1437 hdr = packet->hdr; 1438 1439 lnh = ib_get_lnh(hdr); 1440 if (lnh == HFI1_LRH_BTH) { 1441 packet->ohdr = &hdr->u.oth; 1442 packet->grh = NULL; 1443 } else if (lnh == HFI1_LRH_GRH) { 1444 u32 vtf; 1445 1446 packet->ohdr = &hdr->u.l.oth; 1447 packet->grh = &hdr->u.l.grh; 1448 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1449 goto drop; 1450 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1451 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1452 goto drop; 1453 } else { 1454 goto drop; 1455 } 1456 1457 /* Query commonly used fields from packet header */ 1458 packet->payload = packet->ebuf; 1459 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1460 packet->slid = ib_get_slid(hdr); 1461 packet->dlid = ib_get_dlid(hdr); 1462 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 1463 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) 1464 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1465 be16_to_cpu(IB_MULTICAST_LID_BASE); 1466 packet->sl = ib_get_sl(hdr); 1467 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1468 packet->pad = ib_bth_get_pad(packet->ohdr); 1469 packet->extra_byte = 0; 1470 packet->pkey = ib_bth_get_pkey(packet->ohdr); 1471 packet->migrated = ib_bth_is_migration(packet->ohdr); 1472 1473 return 0; 1474 drop: 1475 ibp->rvp.n_pkt_drops++; 1476 return -EINVAL; 1477 } 1478 1479 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) 1480 { 1481 /* 1482 * Bypass packets have a different header/payload split 1483 * compared to an IB packet. 1484 * Current split is set such that 16 bytes of the actual 1485 * header is in the header buffer and the remining is in 1486 * the eager buffer. We chose 16 since hfi1 driver only 1487 * supports 16B bypass packets and we will be able to 1488 * receive the entire LRH with such a split. 1489 */ 1490 1491 struct hfi1_ctxtdata *rcd = packet->rcd; 1492 struct hfi1_pportdata *ppd = rcd->ppd; 1493 struct hfi1_ibport *ibp = &ppd->ibport_data; 1494 u8 l4; 1495 1496 packet->hdr = (struct hfi1_16b_header *) 1497 hfi1_get_16B_header(packet->rcd, 1498 packet->rhf_addr); 1499 l4 = hfi1_16B_get_l4(packet->hdr); 1500 if (l4 == OPA_16B_L4_IB_LOCAL) { 1501 packet->ohdr = packet->ebuf; 1502 packet->grh = NULL; 1503 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1504 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1505 /* hdr_len_by_opcode already has an IB LRH factored in */ 1506 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1507 (LRH_16B_BYTES - LRH_9B_BYTES); 1508 packet->migrated = opa_bth_is_migration(packet->ohdr); 1509 } else if (l4 == OPA_16B_L4_IB_GLOBAL) { 1510 u32 vtf; 1511 u8 grh_len = sizeof(struct ib_grh); 1512 1513 packet->ohdr = packet->ebuf + grh_len; 1514 packet->grh = packet->ebuf; 1515 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1516 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1517 /* hdr_len_by_opcode already has an IB LRH factored in */ 1518 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1519 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1520 packet->migrated = opa_bth_is_migration(packet->ohdr); 1521 1522 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1523 goto drop; 1524 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1525 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1526 goto drop; 1527 } else if (l4 == OPA_16B_L4_FM) { 1528 packet->mgmt = packet->ebuf; 1529 packet->ohdr = NULL; 1530 packet->grh = NULL; 1531 packet->opcode = IB_OPCODE_UD_SEND_ONLY; 1532 packet->pad = OPA_16B_L4_FM_PAD; 1533 packet->hlen = OPA_16B_L4_FM_HLEN; 1534 packet->migrated = false; 1535 } else { 1536 goto drop; 1537 } 1538 1539 /* Query commonly used fields from packet header */ 1540 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1541 packet->slid = hfi1_16B_get_slid(packet->hdr); 1542 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1543 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) 1544 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1545 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 1546 16B); 1547 packet->sc = hfi1_16B_get_sc(packet->hdr); 1548 packet->sl = ibp->sc_to_sl[packet->sc]; 1549 packet->extra_byte = SIZE_OF_LT; 1550 packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1551 1552 if (hfi1_bypass_ingress_pkt_check(packet)) 1553 goto drop; 1554 1555 return 0; 1556 drop: 1557 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__); 1558 ibp->rvp.n_pkt_drops++; 1559 return -EINVAL; 1560 } 1561 1562 static void show_eflags_errs(struct hfi1_packet *packet) 1563 { 1564 struct hfi1_ctxtdata *rcd = packet->rcd; 1565 u32 rte = rhf_rcv_type_err(packet->rhf); 1566 1567 dd_dev_err(rcd->dd, 1568 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", 1569 rcd->ctxt, packet->rhf, 1570 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", 1571 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", 1572 packet->rhf & RHF_DC_ERR ? "dc " : "", 1573 packet->rhf & RHF_TID_ERR ? "tid " : "", 1574 packet->rhf & RHF_LEN_ERR ? "len " : "", 1575 packet->rhf & RHF_ECC_ERR ? "ecc " : "", 1576 packet->rhf & RHF_ICRC_ERR ? "icrc " : "", 1577 rte); 1578 } 1579 1580 void handle_eflags(struct hfi1_packet *packet) 1581 { 1582 struct hfi1_ctxtdata *rcd = packet->rcd; 1583 1584 rcv_hdrerr(rcd, rcd->ppd, packet); 1585 if (rhf_err_flags(packet->rhf)) 1586 show_eflags_errs(packet); 1587 } 1588 1589 /* 1590 * The following functions are called by the interrupt handler. They are type 1591 * specific handlers for each packet type. 1592 */ 1593 static int process_receive_ib(struct hfi1_packet *packet) 1594 { 1595 if (hfi1_setup_9B_packet(packet)) 1596 return RHF_RCV_CONTINUE; 1597 1598 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1599 return RHF_RCV_CONTINUE; 1600 1601 trace_hfi1_rcvhdr(packet); 1602 1603 if (unlikely(rhf_err_flags(packet->rhf))) { 1604 handle_eflags(packet); 1605 return RHF_RCV_CONTINUE; 1606 } 1607 1608 hfi1_ib_rcv(packet); 1609 return RHF_RCV_CONTINUE; 1610 } 1611 1612 static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet) 1613 { 1614 /* Packet received in VNIC context via RSM */ 1615 if (packet->rcd->is_vnic) 1616 return true; 1617 1618 if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) && 1619 (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR)) 1620 return true; 1621 1622 return false; 1623 } 1624 1625 static int process_receive_bypass(struct hfi1_packet *packet) 1626 { 1627 struct hfi1_devdata *dd = packet->rcd->dd; 1628 1629 if (hfi1_is_vnic_packet(packet)) { 1630 hfi1_vnic_bypass_rcv(packet); 1631 return RHF_RCV_CONTINUE; 1632 } 1633 1634 if (hfi1_setup_bypass_packet(packet)) 1635 return RHF_RCV_CONTINUE; 1636 1637 trace_hfi1_rcvhdr(packet); 1638 1639 if (unlikely(rhf_err_flags(packet->rhf))) { 1640 handle_eflags(packet); 1641 return RHF_RCV_CONTINUE; 1642 } 1643 1644 if (hfi1_16B_get_l2(packet->hdr) == 0x2) { 1645 hfi1_16B_rcv(packet); 1646 } else { 1647 dd_dev_err(dd, 1648 "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); 1649 incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1650 if (!(dd->err_info_rcvport.status_and_code & 1651 OPA_EI_STATUS_SMASK)) { 1652 u64 *flits = packet->ebuf; 1653 1654 if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1655 dd->err_info_rcvport.packet_flit1 = flits[0]; 1656 dd->err_info_rcvport.packet_flit2 = 1657 packet->tlen > sizeof(flits[0]) ? 1658 flits[1] : 0; 1659 } 1660 dd->err_info_rcvport.status_and_code |= 1661 (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1662 } 1663 } 1664 return RHF_RCV_CONTINUE; 1665 } 1666 1667 static int process_receive_error(struct hfi1_packet *packet) 1668 { 1669 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ 1670 if (unlikely( 1671 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1672 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || 1673 packet->rhf & RHF_DC_ERR))) 1674 return RHF_RCV_CONTINUE; 1675 1676 hfi1_setup_ib_header(packet); 1677 handle_eflags(packet); 1678 1679 if (unlikely(rhf_err_flags(packet->rhf))) 1680 dd_dev_err(packet->rcd->dd, 1681 "Unhandled error packet received. Dropping.\n"); 1682 1683 return RHF_RCV_CONTINUE; 1684 } 1685 1686 static int kdeth_process_expected(struct hfi1_packet *packet) 1687 { 1688 hfi1_setup_9B_packet(packet); 1689 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1690 return RHF_RCV_CONTINUE; 1691 1692 if (unlikely(rhf_err_flags(packet->rhf))) { 1693 struct hfi1_ctxtdata *rcd = packet->rcd; 1694 1695 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1696 return RHF_RCV_CONTINUE; 1697 } 1698 1699 hfi1_kdeth_expected_rcv(packet); 1700 return RHF_RCV_CONTINUE; 1701 } 1702 1703 static int kdeth_process_eager(struct hfi1_packet *packet) 1704 { 1705 hfi1_setup_9B_packet(packet); 1706 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1707 return RHF_RCV_CONTINUE; 1708 1709 trace_hfi1_rcvhdr(packet); 1710 if (unlikely(rhf_err_flags(packet->rhf))) { 1711 struct hfi1_ctxtdata *rcd = packet->rcd; 1712 1713 show_eflags_errs(packet); 1714 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1715 return RHF_RCV_CONTINUE; 1716 } 1717 1718 hfi1_kdeth_eager_rcv(packet); 1719 return RHF_RCV_CONTINUE; 1720 } 1721 1722 static int process_receive_invalid(struct hfi1_packet *packet) 1723 { 1724 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", 1725 rhf_rcv_type(packet->rhf)); 1726 return RHF_RCV_CONTINUE; 1727 } 1728 1729 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) 1730 { 1731 struct hfi1_packet packet; 1732 struct ps_mdata mdata; 1733 1734 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n", 1735 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), 1736 get_dma_rtail_setting(rcd) ? 1737 "dma_rtail" : "nodma_rtail", 1738 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & 1739 RCV_HDR_HEAD_HEAD_MASK, 1740 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL)); 1741 1742 init_packet(rcd, &packet); 1743 init_ps_mdata(&mdata, &packet); 1744 1745 while (1) { 1746 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 1747 rcd->rhf_offset; 1748 struct ib_header *hdr; 1749 u64 rhf = rhf_to_cpu(rhf_addr); 1750 u32 etype = rhf_rcv_type(rhf), qpn; 1751 u8 opcode; 1752 u32 psn; 1753 u8 lnh; 1754 1755 if (ps_done(&mdata, rhf, rcd)) 1756 break; 1757 1758 if (ps_skip(&mdata, rhf, rcd)) 1759 goto next; 1760 1761 if (etype > RHF_RCV_TYPE_IB) 1762 goto next; 1763 1764 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); 1765 hdr = packet.hdr; 1766 1767 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1768 1769 if (lnh == HFI1_LRH_BTH) 1770 packet.ohdr = &hdr->u.oth; 1771 else if (lnh == HFI1_LRH_GRH) 1772 packet.ohdr = &hdr->u.l.oth; 1773 else 1774 goto next; /* just in case */ 1775 1776 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); 1777 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; 1778 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); 1779 1780 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", 1781 mdata.ps_head, opcode, qpn, psn); 1782 next: 1783 update_ps_mdata(&mdata, rcd); 1784 } 1785 } 1786 1787 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { 1788 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, 1789 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, 1790 [RHF_RCV_TYPE_IB] = process_receive_ib, 1791 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1792 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, 1793 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1794 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1795 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1796 }; 1797