1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 /* 3 * Copyright(c) 2015-2020 Intel Corporation. 4 * Copyright(c) 2021 Cornelis Networks. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/pci.h> 9 #include <linux/io.h> 10 #include <linux/delay.h> 11 #include <linux/netdevice.h> 12 #include <linux/vmalloc.h> 13 #include <linux/module.h> 14 #include <linux/prefetch.h> 15 #include <rdma/ib_verbs.h> 16 #include <linux/etherdevice.h> 17 18 #include "hfi.h" 19 #include "trace.h" 20 #include "qp.h" 21 #include "sdma.h" 22 #include "debugfs.h" 23 #include "vnic.h" 24 #include "fault.h" 25 26 #include "ipoib.h" 27 #include "netdev.h" 28 29 #undef pr_fmt 30 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 31 32 /* 33 * The size has to be longer than this string, so we can append 34 * board/chip information to it in the initialization code. 35 */ 36 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; 37 38 DEFINE_MUTEX(hfi1_mutex); /* general driver use */ 39 40 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 41 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); 42 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( 43 HFI1_DEFAULT_MAX_MTU)); 44 45 unsigned int hfi1_cu = 1; 46 module_param_named(cu, hfi1_cu, uint, S_IRUGO); 47 MODULE_PARM_DESC(cu, "Credit return units"); 48 49 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 50 static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 51 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 52 static const struct kernel_param_ops cap_ops = { 53 .set = hfi1_caps_set, 54 .get = hfi1_caps_get 55 }; 56 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); 57 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); 58 59 MODULE_LICENSE("Dual BSD/GPL"); 60 MODULE_DESCRIPTION("Cornelis Omni-Path Express driver"); 61 62 /* 63 * MAX_PKT_RCV is the max # if packets processed per receive interrupt. 64 */ 65 #define MAX_PKT_RECV 64 66 /* 67 * MAX_PKT_THREAD_RCV is the max # of packets processed before 68 * the qp_wait_list queue is flushed. 69 */ 70 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) 71 #define EGR_HEAD_UPDATE_THRESHOLD 16 72 73 struct hfi1_ib_stats hfi1_stats; 74 75 static int hfi1_caps_set(const char *val, const struct kernel_param *kp) 76 { 77 int ret = 0; 78 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, 79 cap_mask = *cap_mask_ptr, value, diff, 80 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | 81 HFI1_CAP_WRITABLE_MASK); 82 83 ret = kstrtoul(val, 0, &value); 84 if (ret) { 85 pr_warn("Invalid module parameter value for 'cap_mask'\n"); 86 goto done; 87 } 88 /* Get the changed bits (except the locked bit) */ 89 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); 90 91 /* Remove any bits that are not allowed to change after driver load */ 92 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { 93 pr_warn("Ignoring non-writable capability bits %#lx\n", 94 diff & ~write_mask); 95 diff &= write_mask; 96 } 97 98 /* Mask off any reserved bits */ 99 diff &= ~HFI1_CAP_RESERVED_MASK; 100 /* Clear any previously set and changing bits */ 101 cap_mask &= ~diff; 102 /* Update the bits with the new capability */ 103 cap_mask |= (value & diff); 104 /* Check for any kernel/user restrictions */ 105 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ 106 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); 107 cap_mask &= ~diff; 108 /* Set the bitmask to the final set */ 109 *cap_mask_ptr = cap_mask; 110 done: 111 return ret; 112 } 113 114 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) 115 { 116 unsigned long cap_mask = *(unsigned long *)kp->arg; 117 118 cap_mask &= ~HFI1_CAP_LOCKED_SMASK; 119 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); 120 121 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); 122 } 123 124 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) 125 { 126 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); 127 struct hfi1_devdata *dd = container_of(ibdev, 128 struct hfi1_devdata, verbs_dev); 129 return dd->pcidev; 130 } 131 132 /* 133 * Return count of units with at least one port ACTIVE. 134 */ 135 int hfi1_count_active_units(void) 136 { 137 struct hfi1_devdata *dd; 138 struct hfi1_pportdata *ppd; 139 unsigned long index, flags; 140 int pidx, nunits_active = 0; 141 142 xa_lock_irqsave(&hfi1_dev_table, flags); 143 xa_for_each(&hfi1_dev_table, index, dd) { 144 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) 145 continue; 146 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 147 ppd = dd->pport + pidx; 148 if (ppd->lid && ppd->linkup) { 149 nunits_active++; 150 break; 151 } 152 } 153 } 154 xa_unlock_irqrestore(&hfi1_dev_table, flags); 155 return nunits_active; 156 } 157 158 /* 159 * Get address of eager buffer from it's index (allocated in chunks, not 160 * contiguous). 161 */ 162 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, 163 u8 *update) 164 { 165 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); 166 167 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; 168 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + 169 (offset * RCV_BUF_BLOCK_SIZE)); 170 } 171 172 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, 173 __le32 *rhf_addr) 174 { 175 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); 176 177 return (void *)(rhf_addr - rcd->rhf_offset + offset); 178 } 179 180 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, 181 __le32 *rhf_addr) 182 { 183 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); 184 } 185 186 static inline struct hfi1_16b_header 187 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, 188 __le32 *rhf_addr) 189 { 190 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); 191 } 192 193 /* 194 * Validate and encode the a given RcvArray Buffer size. 195 * The function will check whether the given size falls within 196 * allowed size ranges for the respective type and, optionally, 197 * return the proper encoding. 198 */ 199 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) 200 { 201 if (unlikely(!PAGE_ALIGNED(size))) 202 return 0; 203 if (unlikely(size < MIN_EAGER_BUFFER)) 204 return 0; 205 if (size > 206 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) 207 return 0; 208 if (encoded) 209 *encoded = ilog2(size / PAGE_SIZE) + 1; 210 return 1; 211 } 212 213 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, 214 struct hfi1_packet *packet) 215 { 216 struct ib_header *rhdr = packet->hdr; 217 u32 rte = rhf_rcv_type_err(packet->rhf); 218 u32 mlid_base; 219 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 220 struct hfi1_devdata *dd = ppd->dd; 221 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 222 struct rvt_dev_info *rdi = &verbs_dev->rdi; 223 224 if ((packet->rhf & RHF_DC_ERR) && 225 hfi1_dbg_fault_suppress_err(verbs_dev)) 226 return; 227 228 if (packet->rhf & RHF_ICRC_ERR) 229 return; 230 231 if (packet->etype == RHF_RCV_TYPE_BYPASS) { 232 goto drop; 233 } else { 234 u8 lnh = ib_get_lnh(rhdr); 235 236 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); 237 if (lnh == HFI1_LRH_BTH) { 238 packet->ohdr = &rhdr->u.oth; 239 } else if (lnh == HFI1_LRH_GRH) { 240 packet->ohdr = &rhdr->u.l.oth; 241 packet->grh = &rhdr->u.l.grh; 242 } else { 243 goto drop; 244 } 245 } 246 247 if (packet->rhf & RHF_TID_ERR) { 248 /* For TIDERR and RC QPs preemptively schedule a NAK */ 249 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 250 u32 dlid = ib_get_dlid(rhdr); 251 u32 qp_num; 252 253 /* Sanity check packet */ 254 if (tlen < 24) 255 goto drop; 256 257 /* Check for GRH */ 258 if (packet->grh) { 259 u32 vtf; 260 struct ib_grh *grh = packet->grh; 261 262 if (grh->next_hdr != IB_GRH_NEXT_HDR) 263 goto drop; 264 vtf = be32_to_cpu(grh->version_tclass_flow); 265 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 266 goto drop; 267 } 268 269 /* Get the destination QP number. */ 270 qp_num = ib_bth_get_qpn(packet->ohdr); 271 if (dlid < mlid_base) { 272 struct rvt_qp *qp; 273 unsigned long flags; 274 275 rcu_read_lock(); 276 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 277 if (!qp) { 278 rcu_read_unlock(); 279 goto drop; 280 } 281 282 /* 283 * Handle only RC QPs - for other QP types drop error 284 * packet. 285 */ 286 spin_lock_irqsave(&qp->r_lock, flags); 287 288 /* Check for valid receive state. */ 289 if (!(ib_rvt_state_ops[qp->state] & 290 RVT_PROCESS_RECV_OK)) { 291 ibp->rvp.n_pkt_drops++; 292 } 293 294 switch (qp->ibqp.qp_type) { 295 case IB_QPT_RC: 296 hfi1_rc_hdrerr(rcd, packet, qp); 297 break; 298 default: 299 /* For now don't handle any other QP types */ 300 break; 301 } 302 303 spin_unlock_irqrestore(&qp->r_lock, flags); 304 rcu_read_unlock(); 305 } /* Unicast QP */ 306 } /* Valid packet with TIDErr */ 307 308 /* handle "RcvTypeErr" flags */ 309 switch (rte) { 310 case RHF_RTE_ERROR_OP_CODE_ERR: 311 { 312 void *ebuf = NULL; 313 u8 opcode; 314 315 if (rhf_use_egr_bfr(packet->rhf)) 316 ebuf = packet->ebuf; 317 318 if (!ebuf) 319 goto drop; /* this should never happen */ 320 321 opcode = ib_bth_get_opcode(packet->ohdr); 322 if (opcode == IB_OPCODE_CNP) { 323 /* 324 * Only in pre-B0 h/w is the CNP_OPCODE handled 325 * via this code path. 326 */ 327 struct rvt_qp *qp = NULL; 328 u32 lqpn, rqpn; 329 u16 rlid; 330 u8 svc_type, sl, sc5; 331 332 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); 333 sl = ibp->sc_to_sl[sc5]; 334 335 lqpn = ib_bth_get_qpn(packet->ohdr); 336 rcu_read_lock(); 337 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); 338 if (!qp) { 339 rcu_read_unlock(); 340 goto drop; 341 } 342 343 switch (qp->ibqp.qp_type) { 344 case IB_QPT_UD: 345 rlid = 0; 346 rqpn = 0; 347 svc_type = IB_CC_SVCTYPE_UD; 348 break; 349 case IB_QPT_UC: 350 rlid = ib_get_slid(rhdr); 351 rqpn = qp->remote_qpn; 352 svc_type = IB_CC_SVCTYPE_UC; 353 break; 354 default: 355 rcu_read_unlock(); 356 goto drop; 357 } 358 359 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 360 rcu_read_unlock(); 361 } 362 363 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; 364 break; 365 } 366 default: 367 break; 368 } 369 370 drop: 371 return; 372 } 373 374 static inline void init_packet(struct hfi1_ctxtdata *rcd, 375 struct hfi1_packet *packet) 376 { 377 packet->rsize = get_hdrqentsize(rcd); /* words */ 378 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ 379 packet->rcd = rcd; 380 packet->updegr = 0; 381 packet->etail = -1; 382 packet->rhf_addr = get_rhf_addr(rcd); 383 packet->rhf = rhf_to_cpu(packet->rhf_addr); 384 packet->rhqoff = hfi1_rcd_head(rcd); 385 packet->numpkt = 0; 386 } 387 388 /* We support only two types - 9B and 16B for now */ 389 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { 390 [HFI1_PKT_TYPE_9B] = &return_cnp, 391 [HFI1_PKT_TYPE_16B] = &return_cnp_16B 392 }; 393 394 /** 395 * hfi1_process_ecn_slowpath - Process FECN or BECN bits 396 * @qp: The packet's destination QP 397 * @pkt: The packet itself. 398 * @prescan: Is the caller the RXQ prescan 399 * 400 * Process the packet's FECN or BECN bits. By now, the packet 401 * has already been evaluated whether processing of those bit should 402 * be done. 403 * The significance of the @prescan argument is that if the caller 404 * is the RXQ prescan, a CNP will be send out instead of waiting for the 405 * normal packet processing to send an ACK with BECN set (or a CNP). 406 */ 407 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 408 bool prescan) 409 { 410 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 411 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 412 struct ib_other_headers *ohdr = pkt->ohdr; 413 struct ib_grh *grh = pkt->grh; 414 u32 rqpn = 0; 415 u16 pkey; 416 u32 rlid, slid, dlid = 0; 417 u8 hdr_type, sc, svc_type, opcode; 418 bool is_mcast = false, ignore_fecn = false, do_cnp = false, 419 fecn, becn; 420 421 /* can be called from prescan */ 422 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 423 pkey = hfi1_16B_get_pkey(pkt->hdr); 424 sc = hfi1_16B_get_sc(pkt->hdr); 425 dlid = hfi1_16B_get_dlid(pkt->hdr); 426 slid = hfi1_16B_get_slid(pkt->hdr); 427 is_mcast = hfi1_is_16B_mcast(dlid); 428 opcode = ib_bth_get_opcode(ohdr); 429 hdr_type = HFI1_PKT_TYPE_16B; 430 fecn = hfi1_16B_get_fecn(pkt->hdr); 431 becn = hfi1_16B_get_becn(pkt->hdr); 432 } else { 433 pkey = ib_bth_get_pkey(ohdr); 434 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); 435 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : 436 ppd->lid; 437 slid = ib_get_slid(pkt->hdr); 438 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && 439 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); 440 opcode = ib_bth_get_opcode(ohdr); 441 hdr_type = HFI1_PKT_TYPE_9B; 442 fecn = ib_bth_get_fecn(ohdr); 443 becn = ib_bth_get_becn(ohdr); 444 } 445 446 switch (qp->ibqp.qp_type) { 447 case IB_QPT_UD: 448 rlid = slid; 449 rqpn = ib_get_sqpn(pkt->ohdr); 450 svc_type = IB_CC_SVCTYPE_UD; 451 break; 452 case IB_QPT_SMI: 453 case IB_QPT_GSI: 454 rlid = slid; 455 rqpn = ib_get_sqpn(pkt->ohdr); 456 svc_type = IB_CC_SVCTYPE_UD; 457 break; 458 case IB_QPT_UC: 459 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 460 rqpn = qp->remote_qpn; 461 svc_type = IB_CC_SVCTYPE_UC; 462 break; 463 case IB_QPT_RC: 464 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 465 rqpn = qp->remote_qpn; 466 svc_type = IB_CC_SVCTYPE_RC; 467 break; 468 default: 469 return false; 470 } 471 472 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || 473 (opcode == IB_OPCODE_RC_ACKNOWLEDGE); 474 /* 475 * ACKNOWLEDGE packets do not get a CNP but this will be 476 * guarded by ignore_fecn above. 477 */ 478 do_cnp = prescan || 479 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && 480 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || 481 opcode == TID_OP(READ_RESP) || 482 opcode == TID_OP(ACK); 483 484 /* Call appropriate CNP handler */ 485 if (!ignore_fecn && do_cnp && fecn) 486 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, 487 dlid, rlid, sc, grh); 488 489 if (becn) { 490 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 491 u8 sl = ibp->sc_to_sl[sc]; 492 493 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 494 } 495 return !ignore_fecn && fecn; 496 } 497 498 struct ps_mdata { 499 struct hfi1_ctxtdata *rcd; 500 u32 rsize; 501 u32 maxcnt; 502 u32 ps_head; 503 u32 ps_tail; 504 u32 ps_seq; 505 }; 506 507 static inline void init_ps_mdata(struct ps_mdata *mdata, 508 struct hfi1_packet *packet) 509 { 510 struct hfi1_ctxtdata *rcd = packet->rcd; 511 512 mdata->rcd = rcd; 513 mdata->rsize = packet->rsize; 514 mdata->maxcnt = packet->maxcnt; 515 mdata->ps_head = packet->rhqoff; 516 517 if (get_dma_rtail_setting(rcd)) { 518 mdata->ps_tail = get_rcvhdrtail(rcd); 519 if (rcd->ctxt == HFI1_CTRL_CTXT) 520 mdata->ps_seq = hfi1_seq_cnt(rcd); 521 else 522 mdata->ps_seq = 0; /* not used with DMA_RTAIL */ 523 } else { 524 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ 525 mdata->ps_seq = hfi1_seq_cnt(rcd); 526 } 527 } 528 529 static inline int ps_done(struct ps_mdata *mdata, u64 rhf, 530 struct hfi1_ctxtdata *rcd) 531 { 532 if (get_dma_rtail_setting(rcd)) 533 return mdata->ps_head == mdata->ps_tail; 534 return mdata->ps_seq != rhf_rcv_seq(rhf); 535 } 536 537 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, 538 struct hfi1_ctxtdata *rcd) 539 { 540 /* 541 * Control context can potentially receive an invalid rhf. 542 * Drop such packets. 543 */ 544 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) 545 return mdata->ps_seq != rhf_rcv_seq(rhf); 546 547 return 0; 548 } 549 550 static inline void update_ps_mdata(struct ps_mdata *mdata, 551 struct hfi1_ctxtdata *rcd) 552 { 553 mdata->ps_head += mdata->rsize; 554 if (mdata->ps_head >= mdata->maxcnt) 555 mdata->ps_head = 0; 556 557 /* Control context must do seq counting */ 558 if (!get_dma_rtail_setting(rcd) || 559 rcd->ctxt == HFI1_CTRL_CTXT) 560 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); 561 } 562 563 /* 564 * prescan_rxq - search through the receive queue looking for packets 565 * containing Excplicit Congestion Notifications (FECNs, or BECNs). 566 * When an ECN is found, process the Congestion Notification, and toggle 567 * it off. 568 * This is declared as a macro to allow quick checking of the port to avoid 569 * the overhead of a function call if not enabled. 570 */ 571 #define prescan_rxq(rcd, packet) \ 572 do { \ 573 if (rcd->ppd->cc_prescan) \ 574 __prescan_rxq(packet); \ 575 } while (0) 576 static void __prescan_rxq(struct hfi1_packet *packet) 577 { 578 struct hfi1_ctxtdata *rcd = packet->rcd; 579 struct ps_mdata mdata; 580 581 init_ps_mdata(&mdata, packet); 582 583 while (1) { 584 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 585 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 586 packet->rcd->rhf_offset; 587 struct rvt_qp *qp; 588 struct ib_header *hdr; 589 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; 590 u64 rhf = rhf_to_cpu(rhf_addr); 591 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 592 u8 lnh; 593 594 if (ps_done(&mdata, rhf, rcd)) 595 break; 596 597 if (ps_skip(&mdata, rhf, rcd)) 598 goto next; 599 600 if (etype != RHF_RCV_TYPE_IB) 601 goto next; 602 603 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); 604 hdr = packet->hdr; 605 lnh = ib_get_lnh(hdr); 606 607 if (lnh == HFI1_LRH_BTH) { 608 packet->ohdr = &hdr->u.oth; 609 packet->grh = NULL; 610 } else if (lnh == HFI1_LRH_GRH) { 611 packet->ohdr = &hdr->u.l.oth; 612 packet->grh = &hdr->u.l.grh; 613 } else { 614 goto next; /* just in case */ 615 } 616 617 if (!hfi1_may_ecn(packet)) 618 goto next; 619 620 bth1 = be32_to_cpu(packet->ohdr->bth[1]); 621 qpn = bth1 & RVT_QPN_MASK; 622 rcu_read_lock(); 623 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); 624 625 if (!qp) { 626 rcu_read_unlock(); 627 goto next; 628 } 629 630 hfi1_process_ecn_slowpath(qp, packet, true); 631 rcu_read_unlock(); 632 633 /* turn off BECN, FECN */ 634 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); 635 packet->ohdr->bth[1] = cpu_to_be32(bth1); 636 next: 637 update_ps_mdata(&mdata, rcd); 638 } 639 } 640 641 static void process_rcv_qp_work(struct hfi1_packet *packet) 642 { 643 struct rvt_qp *qp, *nqp; 644 struct hfi1_ctxtdata *rcd = packet->rcd; 645 646 /* 647 * Iterate over all QPs waiting to respond. 648 * The list won't change since the IRQ is only run on one CPU. 649 */ 650 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 651 list_del_init(&qp->rspwait); 652 if (qp->r_flags & RVT_R_RSP_NAK) { 653 qp->r_flags &= ~RVT_R_RSP_NAK; 654 packet->qp = qp; 655 hfi1_send_rc_ack(packet, 0); 656 } 657 if (qp->r_flags & RVT_R_RSP_SEND) { 658 unsigned long flags; 659 660 qp->r_flags &= ~RVT_R_RSP_SEND; 661 spin_lock_irqsave(&qp->s_lock, flags); 662 if (ib_rvt_state_ops[qp->state] & 663 RVT_PROCESS_OR_FLUSH_SEND) 664 hfi1_schedule_send(qp); 665 spin_unlock_irqrestore(&qp->s_lock, flags); 666 } 667 rvt_put_qp(qp); 668 } 669 } 670 671 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) 672 { 673 if (thread) { 674 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 675 /* allow defered processing */ 676 process_rcv_qp_work(packet); 677 cond_resched(); 678 return RCV_PKT_OK; 679 } else { 680 this_cpu_inc(*packet->rcd->dd->rcv_limit); 681 return RCV_PKT_LIMIT; 682 } 683 } 684 685 static inline int check_max_packet(struct hfi1_packet *packet, int thread) 686 { 687 int ret = RCV_PKT_OK; 688 689 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) 690 ret = max_packet_exceeded(packet, thread); 691 return ret; 692 } 693 694 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) 695 { 696 int ret; 697 698 packet->rcd->dd->ctx0_seq_drop++; 699 /* Set up for the next packet */ 700 packet->rhqoff += packet->rsize; 701 if (packet->rhqoff >= packet->maxcnt) 702 packet->rhqoff = 0; 703 704 packet->numpkt++; 705 ret = check_max_packet(packet, thread); 706 707 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 708 packet->rcd->rhf_offset; 709 packet->rhf = rhf_to_cpu(packet->rhf_addr); 710 711 return ret; 712 } 713 714 static void process_rcv_packet_napi(struct hfi1_packet *packet) 715 { 716 packet->etype = rhf_rcv_type(packet->rhf); 717 718 /* total length */ 719 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 720 /* retrieve eager buffer details */ 721 packet->etail = rhf_egr_index(packet->rhf); 722 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 723 &packet->updegr); 724 /* 725 * Prefetch the contents of the eager buffer. It is 726 * OK to send a negative length to prefetch_range(). 727 * The +2 is the size of the RHF. 728 */ 729 prefetch_range(packet->ebuf, 730 packet->tlen - ((packet->rcd->rcvhdrqentsize - 731 (rhf_hdrq_offset(packet->rhf) 732 + 2)) * 4)); 733 734 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 735 packet->numpkt++; 736 737 /* Set up for the next packet */ 738 packet->rhqoff += packet->rsize; 739 if (packet->rhqoff >= packet->maxcnt) 740 packet->rhqoff = 0; 741 742 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 743 packet->rcd->rhf_offset; 744 packet->rhf = rhf_to_cpu(packet->rhf_addr); 745 } 746 747 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) 748 { 749 int ret; 750 751 packet->etype = rhf_rcv_type(packet->rhf); 752 753 /* total length */ 754 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 755 /* retrieve eager buffer details */ 756 packet->ebuf = NULL; 757 if (rhf_use_egr_bfr(packet->rhf)) { 758 packet->etail = rhf_egr_index(packet->rhf); 759 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 760 &packet->updegr); 761 /* 762 * Prefetch the contents of the eager buffer. It is 763 * OK to send a negative length to prefetch_range(). 764 * The +2 is the size of the RHF. 765 */ 766 prefetch_range(packet->ebuf, 767 packet->tlen - ((get_hdrqentsize(packet->rcd) - 768 (rhf_hdrq_offset(packet->rhf) 769 + 2)) * 4)); 770 } 771 772 /* 773 * Call a type specific handler for the packet. We 774 * should be able to trust that etype won't be beyond 775 * the range of valid indexes. If so something is really 776 * wrong and we can probably just let things come 777 * crashing down. There is no need to eat another 778 * comparison in this performance critical code. 779 */ 780 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 781 packet->numpkt++; 782 783 /* Set up for the next packet */ 784 packet->rhqoff += packet->rsize; 785 if (packet->rhqoff >= packet->maxcnt) 786 packet->rhqoff = 0; 787 788 ret = check_max_packet(packet, thread); 789 790 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 791 packet->rcd->rhf_offset; 792 packet->rhf = rhf_to_cpu(packet->rhf_addr); 793 794 return ret; 795 } 796 797 static inline void process_rcv_update(int last, struct hfi1_packet *packet) 798 { 799 /* 800 * Update head regs etc., every 16 packets, if not last pkt, 801 * to help prevent rcvhdrq overflows, when many packets 802 * are processed and queue is nearly full. 803 * Don't request an interrupt for intermediate updates. 804 */ 805 if (!last && !(packet->numpkt & 0xf)) { 806 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, 807 packet->etail, 0, 0); 808 packet->updegr = 0; 809 } 810 packet->grh = NULL; 811 } 812 813 static inline void finish_packet(struct hfi1_packet *packet) 814 { 815 /* 816 * Nothing we need to free for the packet. 817 * 818 * The only thing we need to do is a final update and call for an 819 * interrupt 820 */ 821 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, 822 packet->etail, rcv_intr_dynamic, packet->numpkt); 823 } 824 825 /* 826 * handle_receive_interrupt_napi_fp - receive a packet 827 * @rcd: the context 828 * @budget: polling budget 829 * 830 * Called from interrupt handler for receive interrupt. 831 * This is the fast path interrupt handler 832 * when executing napi soft irq environment. 833 */ 834 int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) 835 { 836 struct hfi1_packet packet; 837 838 init_packet(rcd, &packet); 839 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 840 goto bail; 841 842 while (packet.numpkt < budget) { 843 process_rcv_packet_napi(&packet); 844 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 845 break; 846 847 process_rcv_update(0, &packet); 848 } 849 hfi1_set_rcd_head(rcd, packet.rhqoff); 850 bail: 851 finish_packet(&packet); 852 return packet.numpkt; 853 } 854 855 /* 856 * Handle receive interrupts when using the no dma rtail option. 857 */ 858 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) 859 { 860 int last = RCV_PKT_OK; 861 struct hfi1_packet packet; 862 863 init_packet(rcd, &packet); 864 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 865 last = RCV_PKT_DONE; 866 goto bail; 867 } 868 869 prescan_rxq(rcd, &packet); 870 871 while (last == RCV_PKT_OK) { 872 last = process_rcv_packet(&packet, thread); 873 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 874 last = RCV_PKT_DONE; 875 process_rcv_update(last, &packet); 876 } 877 process_rcv_qp_work(&packet); 878 hfi1_set_rcd_head(rcd, packet.rhqoff); 879 bail: 880 finish_packet(&packet); 881 return last; 882 } 883 884 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) 885 { 886 u32 hdrqtail; 887 int last = RCV_PKT_OK; 888 struct hfi1_packet packet; 889 890 init_packet(rcd, &packet); 891 hdrqtail = get_rcvhdrtail(rcd); 892 if (packet.rhqoff == hdrqtail) { 893 last = RCV_PKT_DONE; 894 goto bail; 895 } 896 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 897 898 prescan_rxq(rcd, &packet); 899 900 while (last == RCV_PKT_OK) { 901 last = process_rcv_packet(&packet, thread); 902 if (packet.rhqoff == hdrqtail) 903 last = RCV_PKT_DONE; 904 process_rcv_update(last, &packet); 905 } 906 process_rcv_qp_work(&packet); 907 hfi1_set_rcd_head(rcd, packet.rhqoff); 908 bail: 909 finish_packet(&packet); 910 return last; 911 } 912 913 static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 914 { 915 u16 i; 916 917 /* 918 * For dynamically allocated kernel contexts (like vnic) switch 919 * interrupt handler only for that context. Otherwise, switch 920 * interrupt handler for all statically allocated kernel contexts. 921 */ 922 if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { 923 hfi1_rcd_get(rcd); 924 hfi1_set_fast(rcd); 925 hfi1_rcd_put(rcd); 926 return; 927 } 928 929 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 930 rcd = hfi1_rcd_get_by_index(dd, i); 931 if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) 932 hfi1_set_fast(rcd); 933 hfi1_rcd_put(rcd); 934 } 935 } 936 937 void set_all_slowpath(struct hfi1_devdata *dd) 938 { 939 struct hfi1_ctxtdata *rcd; 940 u16 i; 941 942 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ 943 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 944 rcd = hfi1_rcd_get_by_index(dd, i); 945 if (!rcd) 946 continue; 947 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 948 rcd->do_interrupt = rcd->slow_handler; 949 950 hfi1_rcd_put(rcd); 951 } 952 } 953 954 static bool __set_armed_to_active(struct hfi1_packet *packet) 955 { 956 u8 etype = rhf_rcv_type(packet->rhf); 957 u8 sc = SC15_PACKET; 958 959 if (etype == RHF_RCV_TYPE_IB) { 960 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, 961 packet->rhf_addr); 962 sc = hfi1_9B_get_sc5(hdr, packet->rhf); 963 } else if (etype == RHF_RCV_TYPE_BYPASS) { 964 struct hfi1_16b_header *hdr = hfi1_get_16B_header( 965 packet->rcd, 966 packet->rhf_addr); 967 sc = hfi1_16B_get_sc(hdr); 968 } 969 if (sc != SC15_PACKET) { 970 int hwstate = driver_lstate(packet->rcd->ppd); 971 struct work_struct *lsaw = 972 &packet->rcd->ppd->linkstate_active_work; 973 974 if (hwstate != IB_PORT_ACTIVE) { 975 dd_dev_info(packet->rcd->dd, 976 "Unexpected link state %s\n", 977 opa_lstate_name(hwstate)); 978 return false; 979 } 980 981 queue_work(packet->rcd->ppd->link_wq, lsaw); 982 return true; 983 } 984 return false; 985 } 986 987 /** 988 * set_armed_to_active - the fast path for armed to active 989 * @packet: the packet structure 990 * 991 * Return true if packet processing needs to bail. 992 */ 993 static bool set_armed_to_active(struct hfi1_packet *packet) 994 { 995 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) 996 return false; 997 return __set_armed_to_active(packet); 998 } 999 1000 /* 1001 * handle_receive_interrupt - receive a packet 1002 * @rcd: the context 1003 * 1004 * Called from interrupt handler for errors or receive interrupt. 1005 * This is the slow path interrupt handler. 1006 */ 1007 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) 1008 { 1009 struct hfi1_devdata *dd = rcd->dd; 1010 u32 hdrqtail; 1011 int needset, last = RCV_PKT_OK; 1012 struct hfi1_packet packet; 1013 int skip_pkt = 0; 1014 1015 if (!rcd->rcvhdrq) 1016 return RCV_PKT_OK; 1017 /* Control context will always use the slow path interrupt handler */ 1018 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; 1019 1020 init_packet(rcd, &packet); 1021 1022 if (!get_dma_rtail_setting(rcd)) { 1023 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 1024 last = RCV_PKT_DONE; 1025 goto bail; 1026 } 1027 hdrqtail = 0; 1028 } else { 1029 hdrqtail = get_rcvhdrtail(rcd); 1030 if (packet.rhqoff == hdrqtail) { 1031 last = RCV_PKT_DONE; 1032 goto bail; 1033 } 1034 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 1035 1036 /* 1037 * Control context can potentially receive an invalid 1038 * rhf. Drop such packets. 1039 */ 1040 if (rcd->ctxt == HFI1_CTRL_CTXT) 1041 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1042 skip_pkt = 1; 1043 } 1044 1045 prescan_rxq(rcd, &packet); 1046 1047 while (last == RCV_PKT_OK) { 1048 if (hfi1_need_drop(dd)) { 1049 /* On to the next packet */ 1050 packet.rhqoff += packet.rsize; 1051 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1052 packet.rhqoff + 1053 rcd->rhf_offset; 1054 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1055 1056 } else if (skip_pkt) { 1057 last = skip_rcv_packet(&packet, thread); 1058 skip_pkt = 0; 1059 } else { 1060 if (set_armed_to_active(&packet)) 1061 goto bail; 1062 last = process_rcv_packet(&packet, thread); 1063 } 1064 1065 if (!get_dma_rtail_setting(rcd)) { 1066 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1067 last = RCV_PKT_DONE; 1068 } else { 1069 if (packet.rhqoff == hdrqtail) 1070 last = RCV_PKT_DONE; 1071 /* 1072 * Control context can potentially receive an invalid 1073 * rhf. Drop such packets. 1074 */ 1075 if (rcd->ctxt == HFI1_CTRL_CTXT) { 1076 bool lseq; 1077 1078 lseq = hfi1_seq_incr(rcd, 1079 rhf_rcv_seq(packet.rhf)); 1080 if (!last && lseq) 1081 skip_pkt = 1; 1082 } 1083 } 1084 1085 if (needset) { 1086 needset = false; 1087 set_all_fastpath(dd, rcd); 1088 } 1089 process_rcv_update(last, &packet); 1090 } 1091 1092 process_rcv_qp_work(&packet); 1093 hfi1_set_rcd_head(rcd, packet.rhqoff); 1094 1095 bail: 1096 /* 1097 * Always write head at end, and setup rcv interrupt, even 1098 * if no packets were processed. 1099 */ 1100 finish_packet(&packet); 1101 return last; 1102 } 1103 1104 /* 1105 * handle_receive_interrupt_napi_sp - receive a packet 1106 * @rcd: the context 1107 * @budget: polling budget 1108 * 1109 * Called from interrupt handler for errors or receive interrupt. 1110 * This is the slow path interrupt handler 1111 * when executing napi soft irq environment. 1112 */ 1113 int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) 1114 { 1115 struct hfi1_devdata *dd = rcd->dd; 1116 int last = RCV_PKT_OK; 1117 bool needset = true; 1118 struct hfi1_packet packet; 1119 1120 init_packet(rcd, &packet); 1121 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1122 goto bail; 1123 1124 while (last != RCV_PKT_DONE && packet.numpkt < budget) { 1125 if (hfi1_need_drop(dd)) { 1126 /* On to the next packet */ 1127 packet.rhqoff += packet.rsize; 1128 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1129 packet.rhqoff + 1130 rcd->rhf_offset; 1131 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1132 1133 } else { 1134 if (set_armed_to_active(&packet)) 1135 goto bail; 1136 process_rcv_packet_napi(&packet); 1137 } 1138 1139 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1140 last = RCV_PKT_DONE; 1141 1142 if (needset) { 1143 needset = false; 1144 set_all_fastpath(dd, rcd); 1145 } 1146 1147 process_rcv_update(last, &packet); 1148 } 1149 1150 hfi1_set_rcd_head(rcd, packet.rhqoff); 1151 1152 bail: 1153 /* 1154 * Always write head at end, and setup rcv interrupt, even 1155 * if no packets were processed. 1156 */ 1157 finish_packet(&packet); 1158 return packet.numpkt; 1159 } 1160 1161 /* 1162 * We may discover in the interrupt that the hardware link state has 1163 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), 1164 * and we need to update the driver's notion of the link state. We cannot 1165 * run set_link_state from interrupt context, so we queue this function on 1166 * a workqueue. 1167 * 1168 * We delay the regular interrupt processing until after the state changes 1169 * so that the link will be in the correct state by the time any application 1170 * we wake up attempts to send a reply to any message it received. 1171 * (Subsequent receive interrupts may possibly force the wakeup before we 1172 * update the link state.) 1173 * 1174 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes 1175 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, 1176 * so we're safe from use-after-free of the rcd. 1177 */ 1178 void receive_interrupt_work(struct work_struct *work) 1179 { 1180 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 1181 linkstate_active_work); 1182 struct hfi1_devdata *dd = ppd->dd; 1183 struct hfi1_ctxtdata *rcd; 1184 u16 i; 1185 1186 /* Received non-SC15 packet implies neighbor_normal */ 1187 ppd->neighbor_normal = 1; 1188 set_link_state(ppd, HLS_UP_ACTIVE); 1189 1190 /* 1191 * Interrupt all statically allocated kernel contexts that could 1192 * have had an interrupt during auto activation. 1193 */ 1194 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { 1195 rcd = hfi1_rcd_get_by_index(dd, i); 1196 if (rcd) 1197 force_recv_intr(rcd); 1198 hfi1_rcd_put(rcd); 1199 } 1200 } 1201 1202 /* 1203 * Convert a given MTU size to the on-wire MAD packet enumeration. 1204 * Return -1 if the size is invalid. 1205 */ 1206 int mtu_to_enum(u32 mtu, int default_if_bad) 1207 { 1208 switch (mtu) { 1209 case 0: return OPA_MTU_0; 1210 case 256: return OPA_MTU_256; 1211 case 512: return OPA_MTU_512; 1212 case 1024: return OPA_MTU_1024; 1213 case 2048: return OPA_MTU_2048; 1214 case 4096: return OPA_MTU_4096; 1215 case 8192: return OPA_MTU_8192; 1216 case 10240: return OPA_MTU_10240; 1217 } 1218 return default_if_bad; 1219 } 1220 1221 u16 enum_to_mtu(int mtu) 1222 { 1223 switch (mtu) { 1224 case OPA_MTU_0: return 0; 1225 case OPA_MTU_256: return 256; 1226 case OPA_MTU_512: return 512; 1227 case OPA_MTU_1024: return 1024; 1228 case OPA_MTU_2048: return 2048; 1229 case OPA_MTU_4096: return 4096; 1230 case OPA_MTU_8192: return 8192; 1231 case OPA_MTU_10240: return 10240; 1232 default: return 0xffff; 1233 } 1234 } 1235 1236 /* 1237 * set_mtu - set the MTU 1238 * @ppd: the per port data 1239 * 1240 * We can handle "any" incoming size, the issue here is whether we 1241 * need to restrict our outgoing size. We do not deal with what happens 1242 * to programs that are already running when the size changes. 1243 */ 1244 int set_mtu(struct hfi1_pportdata *ppd) 1245 { 1246 struct hfi1_devdata *dd = ppd->dd; 1247 int i, drain, ret = 0, is_up = 0; 1248 1249 ppd->ibmtu = 0; 1250 for (i = 0; i < ppd->vls_supported; i++) 1251 if (ppd->ibmtu < dd->vld[i].mtu) 1252 ppd->ibmtu = dd->vld[i].mtu; 1253 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); 1254 1255 mutex_lock(&ppd->hls_lock); 1256 if (ppd->host_link_state == HLS_UP_INIT || 1257 ppd->host_link_state == HLS_UP_ARMED || 1258 ppd->host_link_state == HLS_UP_ACTIVE) 1259 is_up = 1; 1260 1261 drain = !is_ax(dd) && is_up; 1262 1263 if (drain) 1264 /* 1265 * MTU is specified per-VL. To ensure that no packet gets 1266 * stuck (due, e.g., to the MTU for the packet's VL being 1267 * reduced), empty the per-VL FIFOs before adjusting MTU. 1268 */ 1269 ret = stop_drain_data_vls(dd); 1270 1271 if (ret) { 1272 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", 1273 __func__); 1274 goto err; 1275 } 1276 1277 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); 1278 1279 if (drain) 1280 open_fill_data_vls(dd); /* reopen all VLs */ 1281 1282 err: 1283 mutex_unlock(&ppd->hls_lock); 1284 1285 return ret; 1286 } 1287 1288 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) 1289 { 1290 struct hfi1_devdata *dd = ppd->dd; 1291 1292 ppd->lid = lid; 1293 ppd->lmc = lmc; 1294 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1295 1296 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); 1297 1298 return 0; 1299 } 1300 1301 void shutdown_led_override(struct hfi1_pportdata *ppd) 1302 { 1303 struct hfi1_devdata *dd = ppd->dd; 1304 1305 /* 1306 * This pairs with the memory barrier in hfi1_start_led_override to 1307 * ensure that we read the correct state of LED beaconing represented 1308 * by led_override_timer_active 1309 */ 1310 smp_rmb(); 1311 if (atomic_read(&ppd->led_override_timer_active)) { 1312 del_timer_sync(&ppd->led_override_timer); 1313 atomic_set(&ppd->led_override_timer_active, 0); 1314 /* Ensure the atomic_set is visible to all CPUs */ 1315 smp_wmb(); 1316 } 1317 1318 /* Hand control of the LED to the DC for normal operation */ 1319 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 1320 } 1321 1322 static void run_led_override(struct timer_list *t) 1323 { 1324 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer); 1325 struct hfi1_devdata *dd = ppd->dd; 1326 unsigned long timeout; 1327 int phase_idx; 1328 1329 if (!(dd->flags & HFI1_INITTED)) 1330 return; 1331 1332 phase_idx = ppd->led_override_phase & 1; 1333 1334 setextled(dd, phase_idx); 1335 1336 timeout = ppd->led_override_vals[phase_idx]; 1337 1338 /* Set up for next phase */ 1339 ppd->led_override_phase = !ppd->led_override_phase; 1340 1341 mod_timer(&ppd->led_override_timer, jiffies + timeout); 1342 } 1343 1344 /* 1345 * To have the LED blink in a particular pattern, provide timeon and timeoff 1346 * in milliseconds. 1347 * To turn off custom blinking and return to normal operation, use 1348 * shutdown_led_override() 1349 */ 1350 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1351 unsigned int timeoff) 1352 { 1353 if (!(ppd->dd->flags & HFI1_INITTED)) 1354 return; 1355 1356 /* Convert to jiffies for direct use in timer */ 1357 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); 1358 ppd->led_override_vals[1] = msecs_to_jiffies(timeon); 1359 1360 /* Arbitrarily start from LED on phase */ 1361 ppd->led_override_phase = 1; 1362 1363 /* 1364 * If the timer has not already been started, do so. Use a "quick" 1365 * timeout so the handler will be called soon to look at our request. 1366 */ 1367 if (!timer_pending(&ppd->led_override_timer)) { 1368 timer_setup(&ppd->led_override_timer, run_led_override, 0); 1369 ppd->led_override_timer.expires = jiffies + 1; 1370 add_timer(&ppd->led_override_timer); 1371 atomic_set(&ppd->led_override_timer_active, 1); 1372 /* Ensure the atomic_set is visible to all CPUs */ 1373 smp_wmb(); 1374 } 1375 } 1376 1377 /** 1378 * hfi1_reset_device - reset the chip if possible 1379 * @unit: the device to reset 1380 * 1381 * Whether or not reset is successful, we attempt to re-initialize the chip 1382 * (that is, much like a driver unload/reload). We clear the INITTED flag 1383 * so that the various entry points will fail until we reinitialize. For 1384 * now, we only allow this if no user contexts are open that use chip resources 1385 */ 1386 int hfi1_reset_device(int unit) 1387 { 1388 int ret; 1389 struct hfi1_devdata *dd = hfi1_lookup(unit); 1390 struct hfi1_pportdata *ppd; 1391 int pidx; 1392 1393 if (!dd) { 1394 ret = -ENODEV; 1395 goto bail; 1396 } 1397 1398 dd_dev_info(dd, "Reset on unit %u requested\n", unit); 1399 1400 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { 1401 dd_dev_info(dd, 1402 "Invalid unit number %u or not initialized or not present\n", 1403 unit); 1404 ret = -ENXIO; 1405 goto bail; 1406 } 1407 1408 /* If there are any user/vnic contexts, we cannot reset */ 1409 mutex_lock(&hfi1_mutex); 1410 if (dd->rcd) 1411 if (hfi1_stats.sps_ctxts) { 1412 mutex_unlock(&hfi1_mutex); 1413 ret = -EBUSY; 1414 goto bail; 1415 } 1416 mutex_unlock(&hfi1_mutex); 1417 1418 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1419 ppd = dd->pport + pidx; 1420 1421 shutdown_led_override(ppd); 1422 } 1423 if (dd->flags & HFI1_HAS_SEND_DMA) 1424 sdma_exit(dd); 1425 1426 hfi1_reset_cpu_counters(dd); 1427 1428 ret = hfi1_init(dd, 1); 1429 1430 if (ret) 1431 dd_dev_err(dd, 1432 "Reinitialize unit %u after reset failed with %d\n", 1433 unit, ret); 1434 else 1435 dd_dev_info(dd, "Reinitialized unit %u after resetting\n", 1436 unit); 1437 1438 bail: 1439 return ret; 1440 } 1441 1442 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) 1443 { 1444 packet->hdr = (struct hfi1_ib_message_header *) 1445 hfi1_get_msgheader(packet->rcd, 1446 packet->rhf_addr); 1447 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; 1448 } 1449 1450 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) 1451 { 1452 struct hfi1_pportdata *ppd = packet->rcd->ppd; 1453 1454 /* slid and dlid cannot be 0 */ 1455 if ((!packet->slid) || (!packet->dlid)) 1456 return -EINVAL; 1457 1458 /* Compare port lid with incoming packet dlid */ 1459 if ((!(hfi1_is_16B_mcast(packet->dlid))) && 1460 (packet->dlid != 1461 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { 1462 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) 1463 return -EINVAL; 1464 } 1465 1466 /* No multicast packets with SC15 */ 1467 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) 1468 return -EINVAL; 1469 1470 /* Packets with permissive DLID always on SC15 */ 1471 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 1472 16B)) && 1473 (packet->sc != 0xF)) 1474 return -EINVAL; 1475 1476 return 0; 1477 } 1478 1479 static int hfi1_setup_9B_packet(struct hfi1_packet *packet) 1480 { 1481 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1482 struct ib_header *hdr; 1483 u8 lnh; 1484 1485 hfi1_setup_ib_header(packet); 1486 hdr = packet->hdr; 1487 1488 lnh = ib_get_lnh(hdr); 1489 if (lnh == HFI1_LRH_BTH) { 1490 packet->ohdr = &hdr->u.oth; 1491 packet->grh = NULL; 1492 } else if (lnh == HFI1_LRH_GRH) { 1493 u32 vtf; 1494 1495 packet->ohdr = &hdr->u.l.oth; 1496 packet->grh = &hdr->u.l.grh; 1497 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1498 goto drop; 1499 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1500 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1501 goto drop; 1502 } else { 1503 goto drop; 1504 } 1505 1506 /* Query commonly used fields from packet header */ 1507 packet->payload = packet->ebuf; 1508 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1509 packet->slid = ib_get_slid(hdr); 1510 packet->dlid = ib_get_dlid(hdr); 1511 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 1512 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) 1513 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1514 be16_to_cpu(IB_MULTICAST_LID_BASE); 1515 packet->sl = ib_get_sl(hdr); 1516 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1517 packet->pad = ib_bth_get_pad(packet->ohdr); 1518 packet->extra_byte = 0; 1519 packet->pkey = ib_bth_get_pkey(packet->ohdr); 1520 packet->migrated = ib_bth_is_migration(packet->ohdr); 1521 1522 return 0; 1523 drop: 1524 ibp->rvp.n_pkt_drops++; 1525 return -EINVAL; 1526 } 1527 1528 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) 1529 { 1530 /* 1531 * Bypass packets have a different header/payload split 1532 * compared to an IB packet. 1533 * Current split is set such that 16 bytes of the actual 1534 * header is in the header buffer and the remining is in 1535 * the eager buffer. We chose 16 since hfi1 driver only 1536 * supports 16B bypass packets and we will be able to 1537 * receive the entire LRH with such a split. 1538 */ 1539 1540 struct hfi1_ctxtdata *rcd = packet->rcd; 1541 struct hfi1_pportdata *ppd = rcd->ppd; 1542 struct hfi1_ibport *ibp = &ppd->ibport_data; 1543 u8 l4; 1544 1545 packet->hdr = (struct hfi1_16b_header *) 1546 hfi1_get_16B_header(packet->rcd, 1547 packet->rhf_addr); 1548 l4 = hfi1_16B_get_l4(packet->hdr); 1549 if (l4 == OPA_16B_L4_IB_LOCAL) { 1550 packet->ohdr = packet->ebuf; 1551 packet->grh = NULL; 1552 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1553 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1554 /* hdr_len_by_opcode already has an IB LRH factored in */ 1555 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1556 (LRH_16B_BYTES - LRH_9B_BYTES); 1557 packet->migrated = opa_bth_is_migration(packet->ohdr); 1558 } else if (l4 == OPA_16B_L4_IB_GLOBAL) { 1559 u32 vtf; 1560 u8 grh_len = sizeof(struct ib_grh); 1561 1562 packet->ohdr = packet->ebuf + grh_len; 1563 packet->grh = packet->ebuf; 1564 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1565 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1566 /* hdr_len_by_opcode already has an IB LRH factored in */ 1567 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1568 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1569 packet->migrated = opa_bth_is_migration(packet->ohdr); 1570 1571 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1572 goto drop; 1573 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1574 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1575 goto drop; 1576 } else if (l4 == OPA_16B_L4_FM) { 1577 packet->mgmt = packet->ebuf; 1578 packet->ohdr = NULL; 1579 packet->grh = NULL; 1580 packet->opcode = IB_OPCODE_UD_SEND_ONLY; 1581 packet->pad = OPA_16B_L4_FM_PAD; 1582 packet->hlen = OPA_16B_L4_FM_HLEN; 1583 packet->migrated = false; 1584 } else { 1585 goto drop; 1586 } 1587 1588 /* Query commonly used fields from packet header */ 1589 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1590 packet->slid = hfi1_16B_get_slid(packet->hdr); 1591 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1592 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) 1593 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1594 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 1595 16B); 1596 packet->sc = hfi1_16B_get_sc(packet->hdr); 1597 packet->sl = ibp->sc_to_sl[packet->sc]; 1598 packet->extra_byte = SIZE_OF_LT; 1599 packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1600 1601 if (hfi1_bypass_ingress_pkt_check(packet)) 1602 goto drop; 1603 1604 return 0; 1605 drop: 1606 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__); 1607 ibp->rvp.n_pkt_drops++; 1608 return -EINVAL; 1609 } 1610 1611 static void show_eflags_errs(struct hfi1_packet *packet) 1612 { 1613 struct hfi1_ctxtdata *rcd = packet->rcd; 1614 u32 rte = rhf_rcv_type_err(packet->rhf); 1615 1616 dd_dev_err(rcd->dd, 1617 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", 1618 rcd->ctxt, packet->rhf, 1619 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", 1620 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", 1621 packet->rhf & RHF_DC_ERR ? "dc " : "", 1622 packet->rhf & RHF_TID_ERR ? "tid " : "", 1623 packet->rhf & RHF_LEN_ERR ? "len " : "", 1624 packet->rhf & RHF_ECC_ERR ? "ecc " : "", 1625 packet->rhf & RHF_ICRC_ERR ? "icrc " : "", 1626 rte); 1627 } 1628 1629 void handle_eflags(struct hfi1_packet *packet) 1630 { 1631 struct hfi1_ctxtdata *rcd = packet->rcd; 1632 1633 rcv_hdrerr(rcd, rcd->ppd, packet); 1634 if (rhf_err_flags(packet->rhf)) 1635 show_eflags_errs(packet); 1636 } 1637 1638 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet) 1639 { 1640 struct hfi1_ibport *ibp; 1641 struct net_device *netdev; 1642 struct hfi1_ctxtdata *rcd = packet->rcd; 1643 struct napi_struct *napi = rcd->napi; 1644 struct sk_buff *skb; 1645 struct hfi1_netdev_rxq *rxq = container_of(napi, 1646 struct hfi1_netdev_rxq, napi); 1647 u32 extra_bytes; 1648 u32 tlen, qpnum; 1649 bool do_work, do_cnp; 1650 1651 trace_hfi1_rcvhdr(packet); 1652 1653 hfi1_setup_ib_header(packet); 1654 1655 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth; 1656 packet->grh = NULL; 1657 1658 if (unlikely(rhf_err_flags(packet->rhf))) { 1659 handle_eflags(packet); 1660 return; 1661 } 1662 1663 qpnum = ib_bth_get_qpn(packet->ohdr); 1664 netdev = hfi1_netdev_get_data(rcd->dd, qpnum); 1665 if (!netdev) 1666 goto drop_no_nd; 1667 1668 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 1669 trace_ctxt_rsm_hist(rcd->ctxt); 1670 1671 /* handle congestion notifications */ 1672 do_work = hfi1_may_ecn(packet); 1673 if (unlikely(do_work)) { 1674 do_cnp = (packet->opcode != IB_OPCODE_CNP); 1675 (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp, 1676 packet, do_cnp); 1677 } 1678 1679 /* 1680 * We have split point after last byte of DETH 1681 * lets strip padding and CRC and ICRC. 1682 * tlen is whole packet len so we need to 1683 * subtract header size as well. 1684 */ 1685 tlen = packet->tlen; 1686 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) + 1687 packet->hlen; 1688 if (unlikely(tlen < extra_bytes)) 1689 goto drop; 1690 1691 tlen -= extra_bytes; 1692 1693 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf); 1694 if (unlikely(!skb)) 1695 goto drop; 1696 1697 dev_sw_netstats_rx_add(netdev, skb->len); 1698 1699 skb->dev = netdev; 1700 skb->pkt_type = PACKET_HOST; 1701 netif_receive_skb(skb); 1702 1703 return; 1704 1705 drop: 1706 ++netdev->stats.rx_dropped; 1707 drop_no_nd: 1708 ibp = rcd_to_iport(packet->rcd); 1709 ++ibp->rvp.n_pkt_drops; 1710 } 1711 1712 /* 1713 * The following functions are called by the interrupt handler. They are type 1714 * specific handlers for each packet type. 1715 */ 1716 static void process_receive_ib(struct hfi1_packet *packet) 1717 { 1718 if (hfi1_setup_9B_packet(packet)) 1719 return; 1720 1721 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1722 return; 1723 1724 trace_hfi1_rcvhdr(packet); 1725 1726 if (unlikely(rhf_err_flags(packet->rhf))) { 1727 handle_eflags(packet); 1728 return; 1729 } 1730 1731 hfi1_ib_rcv(packet); 1732 } 1733 1734 static void process_receive_bypass(struct hfi1_packet *packet) 1735 { 1736 struct hfi1_devdata *dd = packet->rcd->dd; 1737 1738 if (hfi1_setup_bypass_packet(packet)) 1739 return; 1740 1741 trace_hfi1_rcvhdr(packet); 1742 1743 if (unlikely(rhf_err_flags(packet->rhf))) { 1744 handle_eflags(packet); 1745 return; 1746 } 1747 1748 if (hfi1_16B_get_l2(packet->hdr) == 0x2) { 1749 hfi1_16B_rcv(packet); 1750 } else { 1751 dd_dev_err(dd, 1752 "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); 1753 incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1754 if (!(dd->err_info_rcvport.status_and_code & 1755 OPA_EI_STATUS_SMASK)) { 1756 u64 *flits = packet->ebuf; 1757 1758 if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1759 dd->err_info_rcvport.packet_flit1 = flits[0]; 1760 dd->err_info_rcvport.packet_flit2 = 1761 packet->tlen > sizeof(flits[0]) ? 1762 flits[1] : 0; 1763 } 1764 dd->err_info_rcvport.status_and_code |= 1765 (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1766 } 1767 } 1768 } 1769 1770 static void process_receive_error(struct hfi1_packet *packet) 1771 { 1772 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ 1773 if (unlikely( 1774 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1775 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || 1776 packet->rhf & RHF_DC_ERR))) 1777 return; 1778 1779 hfi1_setup_ib_header(packet); 1780 handle_eflags(packet); 1781 1782 if (unlikely(rhf_err_flags(packet->rhf))) 1783 dd_dev_err(packet->rcd->dd, 1784 "Unhandled error packet received. Dropping.\n"); 1785 } 1786 1787 static void kdeth_process_expected(struct hfi1_packet *packet) 1788 { 1789 hfi1_setup_9B_packet(packet); 1790 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1791 return; 1792 1793 if (unlikely(rhf_err_flags(packet->rhf))) { 1794 struct hfi1_ctxtdata *rcd = packet->rcd; 1795 1796 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1797 return; 1798 } 1799 1800 hfi1_kdeth_expected_rcv(packet); 1801 } 1802 1803 static void kdeth_process_eager(struct hfi1_packet *packet) 1804 { 1805 hfi1_setup_9B_packet(packet); 1806 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1807 return; 1808 1809 trace_hfi1_rcvhdr(packet); 1810 if (unlikely(rhf_err_flags(packet->rhf))) { 1811 struct hfi1_ctxtdata *rcd = packet->rcd; 1812 1813 show_eflags_errs(packet); 1814 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1815 return; 1816 } 1817 1818 hfi1_kdeth_eager_rcv(packet); 1819 } 1820 1821 static void process_receive_invalid(struct hfi1_packet *packet) 1822 { 1823 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", 1824 rhf_rcv_type(packet->rhf)); 1825 } 1826 1827 #define HFI1_RCVHDR_DUMP_MAX 5 1828 1829 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) 1830 { 1831 struct hfi1_packet packet; 1832 struct ps_mdata mdata; 1833 int i; 1834 1835 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", 1836 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), 1837 get_dma_rtail_setting(rcd) ? 1838 "dma_rtail" : "nodma_rtail", 1839 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), 1840 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), 1841 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & 1842 RCV_HDR_HEAD_HEAD_MASK, 1843 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), 1844 rcd->head); 1845 1846 init_packet(rcd, &packet); 1847 init_ps_mdata(&mdata, &packet); 1848 1849 for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { 1850 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 1851 rcd->rhf_offset; 1852 struct ib_header *hdr; 1853 u64 rhf = rhf_to_cpu(rhf_addr); 1854 u32 etype = rhf_rcv_type(rhf), qpn; 1855 u8 opcode; 1856 u32 psn; 1857 u8 lnh; 1858 1859 if (ps_done(&mdata, rhf, rcd)) 1860 break; 1861 1862 if (ps_skip(&mdata, rhf, rcd)) 1863 goto next; 1864 1865 if (etype > RHF_RCV_TYPE_IB) 1866 goto next; 1867 1868 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); 1869 hdr = packet.hdr; 1870 1871 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1872 1873 if (lnh == HFI1_LRH_BTH) 1874 packet.ohdr = &hdr->u.oth; 1875 else if (lnh == HFI1_LRH_GRH) 1876 packet.ohdr = &hdr->u.l.oth; 1877 else 1878 goto next; /* just in case */ 1879 1880 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); 1881 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; 1882 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); 1883 1884 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", 1885 mdata.ps_head, opcode, qpn, psn); 1886 next: 1887 update_ps_mdata(&mdata, rcd); 1888 } 1889 } 1890 1891 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { 1892 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, 1893 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, 1894 [RHF_RCV_TYPE_IB] = process_receive_ib, 1895 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1896 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, 1897 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1898 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1899 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1900 }; 1901 1902 const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = { 1903 [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid, 1904 [RHF_RCV_TYPE_EAGER] = process_receive_invalid, 1905 [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv, 1906 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1907 [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv, 1908 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1909 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1910 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1911 }; 1912