1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 /* 3 * Copyright(c) 2015-2020 Intel Corporation. 4 * Copyright(c) 2021 Cornelis Networks. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/pci.h> 9 #include <linux/io.h> 10 #include <linux/delay.h> 11 #include <linux/netdevice.h> 12 #include <linux/vmalloc.h> 13 #include <linux/module.h> 14 #include <linux/prefetch.h> 15 #include <rdma/ib_verbs.h> 16 #include <linux/etherdevice.h> 17 18 #include "hfi.h" 19 #include "trace.h" 20 #include "qp.h" 21 #include "sdma.h" 22 #include "debugfs.h" 23 #include "vnic.h" 24 #include "fault.h" 25 26 #include "ipoib.h" 27 #include "netdev.h" 28 29 #undef pr_fmt 30 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 31 32 /* 33 * The size has to be longer than this string, so we can append 34 * board/chip information to it in the initialization code. 35 */ 36 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n"; 37 38 DEFINE_MUTEX(hfi1_mutex); /* general driver use */ 39 40 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 41 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); 42 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( 43 HFI1_DEFAULT_MAX_MTU)); 44 45 unsigned int hfi1_cu = 1; 46 module_param_named(cu, hfi1_cu, uint, S_IRUGO); 47 MODULE_PARM_DESC(cu, "Credit return units"); 48 49 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 50 static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 51 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 52 static const struct kernel_param_ops cap_ops = { 53 .set = hfi1_caps_set, 54 .get = hfi1_caps_get 55 }; 56 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); 57 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); 58 59 MODULE_LICENSE("Dual BSD/GPL"); 60 MODULE_DESCRIPTION("Cornelis Omni-Path Express driver"); 61 62 /* 63 * MAX_PKT_RCV is the max # if packets processed per receive interrupt. 64 */ 65 #define MAX_PKT_RECV 64 66 /* 67 * MAX_PKT_THREAD_RCV is the max # of packets processed before 68 * the qp_wait_list queue is flushed. 69 */ 70 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) 71 #define EGR_HEAD_UPDATE_THRESHOLD 16 72 73 struct hfi1_ib_stats hfi1_stats; 74 75 static int hfi1_caps_set(const char *val, const struct kernel_param *kp) 76 { 77 int ret = 0; 78 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, 79 cap_mask = *cap_mask_ptr, value, diff, 80 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | 81 HFI1_CAP_WRITABLE_MASK); 82 83 ret = kstrtoul(val, 0, &value); 84 if (ret) { 85 pr_warn("Invalid module parameter value for 'cap_mask'\n"); 86 goto done; 87 } 88 /* Get the changed bits (except the locked bit) */ 89 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); 90 91 /* Remove any bits that are not allowed to change after driver load */ 92 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { 93 pr_warn("Ignoring non-writable capability bits %#lx\n", 94 diff & ~write_mask); 95 diff &= write_mask; 96 } 97 98 /* Mask off any reserved bits */ 99 diff &= ~HFI1_CAP_RESERVED_MASK; 100 /* Clear any previously set and changing bits */ 101 cap_mask &= ~diff; 102 /* Update the bits with the new capability */ 103 cap_mask |= (value & diff); 104 /* Check for any kernel/user restrictions */ 105 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ 106 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); 107 cap_mask &= ~diff; 108 /* Set the bitmask to the final set */ 109 *cap_mask_ptr = cap_mask; 110 done: 111 return ret; 112 } 113 114 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) 115 { 116 unsigned long cap_mask = *(unsigned long *)kp->arg; 117 118 cap_mask &= ~HFI1_CAP_LOCKED_SMASK; 119 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); 120 121 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask); 122 } 123 124 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) 125 { 126 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); 127 struct hfi1_devdata *dd = container_of(ibdev, 128 struct hfi1_devdata, verbs_dev); 129 return dd->pcidev; 130 } 131 132 /* 133 * Return count of units with at least one port ACTIVE. 134 */ 135 int hfi1_count_active_units(void) 136 { 137 struct hfi1_devdata *dd; 138 struct hfi1_pportdata *ppd; 139 unsigned long index, flags; 140 int pidx, nunits_active = 0; 141 142 xa_lock_irqsave(&hfi1_dev_table, flags); 143 xa_for_each(&hfi1_dev_table, index, dd) { 144 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) 145 continue; 146 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 147 ppd = dd->pport + pidx; 148 if (ppd->lid && ppd->linkup) { 149 nunits_active++; 150 break; 151 } 152 } 153 } 154 xa_unlock_irqrestore(&hfi1_dev_table, flags); 155 return nunits_active; 156 } 157 158 /* 159 * Get address of eager buffer from it's index (allocated in chunks, not 160 * contiguous). 161 */ 162 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, 163 u8 *update) 164 { 165 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); 166 167 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; 168 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + 169 (offset * RCV_BUF_BLOCK_SIZE)); 170 } 171 172 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, 173 __le32 *rhf_addr) 174 { 175 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); 176 177 return (void *)(rhf_addr - rcd->rhf_offset + offset); 178 } 179 180 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, 181 __le32 *rhf_addr) 182 { 183 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); 184 } 185 186 static inline struct hfi1_16b_header 187 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, 188 __le32 *rhf_addr) 189 { 190 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); 191 } 192 193 /* 194 * Validate and encode the a given RcvArray Buffer size. 195 * The function will check whether the given size falls within 196 * allowed size ranges for the respective type and, optionally, 197 * return the proper encoding. 198 */ 199 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) 200 { 201 if (unlikely(!PAGE_ALIGNED(size))) 202 return 0; 203 if (unlikely(size < MIN_EAGER_BUFFER)) 204 return 0; 205 if (size > 206 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) 207 return 0; 208 if (encoded) 209 *encoded = ilog2(size / PAGE_SIZE) + 1; 210 return 1; 211 } 212 213 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, 214 struct hfi1_packet *packet) 215 { 216 struct ib_header *rhdr = packet->hdr; 217 u32 rte = rhf_rcv_type_err(packet->rhf); 218 u32 mlid_base; 219 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 220 struct hfi1_devdata *dd = ppd->dd; 221 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 222 struct rvt_dev_info *rdi = &verbs_dev->rdi; 223 224 if ((packet->rhf & RHF_DC_ERR) && 225 hfi1_dbg_fault_suppress_err(verbs_dev)) 226 return; 227 228 if (packet->rhf & RHF_ICRC_ERR) 229 return; 230 231 if (packet->etype == RHF_RCV_TYPE_BYPASS) { 232 goto drop; 233 } else { 234 u8 lnh = ib_get_lnh(rhdr); 235 236 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); 237 if (lnh == HFI1_LRH_BTH) { 238 packet->ohdr = &rhdr->u.oth; 239 } else if (lnh == HFI1_LRH_GRH) { 240 packet->ohdr = &rhdr->u.l.oth; 241 packet->grh = &rhdr->u.l.grh; 242 } else { 243 goto drop; 244 } 245 } 246 247 if (packet->rhf & RHF_TID_ERR) { 248 /* For TIDERR and RC QPs preemptively schedule a NAK */ 249 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 250 u32 dlid = ib_get_dlid(rhdr); 251 u32 qp_num; 252 253 /* Sanity check packet */ 254 if (tlen < 24) 255 goto drop; 256 257 /* Check for GRH */ 258 if (packet->grh) { 259 u32 vtf; 260 struct ib_grh *grh = packet->grh; 261 262 if (grh->next_hdr != IB_GRH_NEXT_HDR) 263 goto drop; 264 vtf = be32_to_cpu(grh->version_tclass_flow); 265 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 266 goto drop; 267 } 268 269 /* Get the destination QP number. */ 270 qp_num = ib_bth_get_qpn(packet->ohdr); 271 if (dlid < mlid_base) { 272 struct rvt_qp *qp; 273 unsigned long flags; 274 275 rcu_read_lock(); 276 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 277 if (!qp) { 278 rcu_read_unlock(); 279 goto drop; 280 } 281 282 /* 283 * Handle only RC QPs - for other QP types drop error 284 * packet. 285 */ 286 spin_lock_irqsave(&qp->r_lock, flags); 287 288 /* Check for valid receive state. */ 289 if (!(ib_rvt_state_ops[qp->state] & 290 RVT_PROCESS_RECV_OK)) { 291 ibp->rvp.n_pkt_drops++; 292 } 293 294 switch (qp->ibqp.qp_type) { 295 case IB_QPT_RC: 296 hfi1_rc_hdrerr(rcd, packet, qp); 297 break; 298 default: 299 /* For now don't handle any other QP types */ 300 break; 301 } 302 303 spin_unlock_irqrestore(&qp->r_lock, flags); 304 rcu_read_unlock(); 305 } /* Unicast QP */ 306 } /* Valid packet with TIDErr */ 307 308 /* handle "RcvTypeErr" flags */ 309 switch (rte) { 310 case RHF_RTE_ERROR_OP_CODE_ERR: 311 { 312 void *ebuf = NULL; 313 u8 opcode; 314 315 if (rhf_use_egr_bfr(packet->rhf)) 316 ebuf = packet->ebuf; 317 318 if (!ebuf) 319 goto drop; /* this should never happen */ 320 321 opcode = ib_bth_get_opcode(packet->ohdr); 322 if (opcode == IB_OPCODE_CNP) { 323 /* 324 * Only in pre-B0 h/w is the CNP_OPCODE handled 325 * via this code path. 326 */ 327 struct rvt_qp *qp = NULL; 328 u32 lqpn, rqpn; 329 u16 rlid; 330 u8 svc_type, sl, sc5; 331 332 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); 333 sl = ibp->sc_to_sl[sc5]; 334 335 lqpn = ib_bth_get_qpn(packet->ohdr); 336 rcu_read_lock(); 337 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); 338 if (!qp) { 339 rcu_read_unlock(); 340 goto drop; 341 } 342 343 switch (qp->ibqp.qp_type) { 344 case IB_QPT_UD: 345 rlid = 0; 346 rqpn = 0; 347 svc_type = IB_CC_SVCTYPE_UD; 348 break; 349 case IB_QPT_UC: 350 rlid = ib_get_slid(rhdr); 351 rqpn = qp->remote_qpn; 352 svc_type = IB_CC_SVCTYPE_UC; 353 break; 354 default: 355 rcu_read_unlock(); 356 goto drop; 357 } 358 359 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 360 rcu_read_unlock(); 361 } 362 363 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; 364 break; 365 } 366 default: 367 break; 368 } 369 370 drop: 371 return; 372 } 373 374 static inline void init_packet(struct hfi1_ctxtdata *rcd, 375 struct hfi1_packet *packet) 376 { 377 packet->rsize = get_hdrqentsize(rcd); /* words */ 378 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ 379 packet->rcd = rcd; 380 packet->updegr = 0; 381 packet->etail = -1; 382 packet->rhf_addr = get_rhf_addr(rcd); 383 packet->rhf = rhf_to_cpu(packet->rhf_addr); 384 packet->rhqoff = hfi1_rcd_head(rcd); 385 packet->numpkt = 0; 386 } 387 388 /* We support only two types - 9B and 16B for now */ 389 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { 390 [HFI1_PKT_TYPE_9B] = &return_cnp, 391 [HFI1_PKT_TYPE_16B] = &return_cnp_16B 392 }; 393 394 /** 395 * hfi1_process_ecn_slowpath - Process FECN or BECN bits 396 * @qp: The packet's destination QP 397 * @pkt: The packet itself. 398 * @prescan: Is the caller the RXQ prescan 399 * 400 * Process the packet's FECN or BECN bits. By now, the packet 401 * has already been evaluated whether processing of those bit should 402 * be done. 403 * The significance of the @prescan argument is that if the caller 404 * is the RXQ prescan, a CNP will be send out instead of waiting for the 405 * normal packet processing to send an ACK with BECN set (or a CNP). 406 */ 407 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 408 bool prescan) 409 { 410 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 411 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 412 struct ib_other_headers *ohdr = pkt->ohdr; 413 struct ib_grh *grh = pkt->grh; 414 u32 rqpn = 0; 415 u16 pkey; 416 u32 rlid, slid, dlid = 0; 417 u8 hdr_type, sc, svc_type, opcode; 418 bool is_mcast = false, ignore_fecn = false, do_cnp = false, 419 fecn, becn; 420 421 /* can be called from prescan */ 422 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 423 pkey = hfi1_16B_get_pkey(pkt->hdr); 424 sc = hfi1_16B_get_sc(pkt->hdr); 425 dlid = hfi1_16B_get_dlid(pkt->hdr); 426 slid = hfi1_16B_get_slid(pkt->hdr); 427 is_mcast = hfi1_is_16B_mcast(dlid); 428 opcode = ib_bth_get_opcode(ohdr); 429 hdr_type = HFI1_PKT_TYPE_16B; 430 fecn = hfi1_16B_get_fecn(pkt->hdr); 431 becn = hfi1_16B_get_becn(pkt->hdr); 432 } else { 433 pkey = ib_bth_get_pkey(ohdr); 434 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); 435 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : 436 ppd->lid; 437 slid = ib_get_slid(pkt->hdr); 438 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && 439 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); 440 opcode = ib_bth_get_opcode(ohdr); 441 hdr_type = HFI1_PKT_TYPE_9B; 442 fecn = ib_bth_get_fecn(ohdr); 443 becn = ib_bth_get_becn(ohdr); 444 } 445 446 switch (qp->ibqp.qp_type) { 447 case IB_QPT_UD: 448 rlid = slid; 449 rqpn = ib_get_sqpn(pkt->ohdr); 450 svc_type = IB_CC_SVCTYPE_UD; 451 break; 452 case IB_QPT_SMI: 453 case IB_QPT_GSI: 454 rlid = slid; 455 rqpn = ib_get_sqpn(pkt->ohdr); 456 svc_type = IB_CC_SVCTYPE_UD; 457 break; 458 case IB_QPT_UC: 459 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 460 rqpn = qp->remote_qpn; 461 svc_type = IB_CC_SVCTYPE_UC; 462 break; 463 case IB_QPT_RC: 464 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 465 rqpn = qp->remote_qpn; 466 svc_type = IB_CC_SVCTYPE_RC; 467 break; 468 default: 469 return false; 470 } 471 472 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || 473 (opcode == IB_OPCODE_RC_ACKNOWLEDGE); 474 /* 475 * ACKNOWLEDGE packets do not get a CNP but this will be 476 * guarded by ignore_fecn above. 477 */ 478 do_cnp = prescan || 479 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && 480 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || 481 opcode == TID_OP(READ_RESP) || 482 opcode == TID_OP(ACK); 483 484 /* Call appropriate CNP handler */ 485 if (!ignore_fecn && do_cnp && fecn) 486 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, 487 dlid, rlid, sc, grh); 488 489 if (becn) { 490 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 491 u8 sl = ibp->sc_to_sl[sc]; 492 493 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 494 } 495 return !ignore_fecn && fecn; 496 } 497 498 struct ps_mdata { 499 struct hfi1_ctxtdata *rcd; 500 u32 rsize; 501 u32 maxcnt; 502 u32 ps_head; 503 u32 ps_tail; 504 u32 ps_seq; 505 }; 506 507 static inline void init_ps_mdata(struct ps_mdata *mdata, 508 struct hfi1_packet *packet) 509 { 510 struct hfi1_ctxtdata *rcd = packet->rcd; 511 512 mdata->rcd = rcd; 513 mdata->rsize = packet->rsize; 514 mdata->maxcnt = packet->maxcnt; 515 mdata->ps_head = packet->rhqoff; 516 517 if (get_dma_rtail_setting(rcd)) { 518 mdata->ps_tail = get_rcvhdrtail(rcd); 519 if (rcd->ctxt == HFI1_CTRL_CTXT) 520 mdata->ps_seq = hfi1_seq_cnt(rcd); 521 else 522 mdata->ps_seq = 0; /* not used with DMA_RTAIL */ 523 } else { 524 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ 525 mdata->ps_seq = hfi1_seq_cnt(rcd); 526 } 527 } 528 529 static inline int ps_done(struct ps_mdata *mdata, u64 rhf, 530 struct hfi1_ctxtdata *rcd) 531 { 532 if (get_dma_rtail_setting(rcd)) 533 return mdata->ps_head == mdata->ps_tail; 534 return mdata->ps_seq != rhf_rcv_seq(rhf); 535 } 536 537 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, 538 struct hfi1_ctxtdata *rcd) 539 { 540 /* 541 * Control context can potentially receive an invalid rhf. 542 * Drop such packets. 543 */ 544 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) 545 return mdata->ps_seq != rhf_rcv_seq(rhf); 546 547 return 0; 548 } 549 550 static inline void update_ps_mdata(struct ps_mdata *mdata, 551 struct hfi1_ctxtdata *rcd) 552 { 553 mdata->ps_head += mdata->rsize; 554 if (mdata->ps_head >= mdata->maxcnt) 555 mdata->ps_head = 0; 556 557 /* Control context must do seq counting */ 558 if (!get_dma_rtail_setting(rcd) || 559 rcd->ctxt == HFI1_CTRL_CTXT) 560 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); 561 } 562 563 /* 564 * prescan_rxq - search through the receive queue looking for packets 565 * containing Excplicit Congestion Notifications (FECNs, or BECNs). 566 * When an ECN is found, process the Congestion Notification, and toggle 567 * it off. 568 * This is declared as a macro to allow quick checking of the port to avoid 569 * the overhead of a function call if not enabled. 570 */ 571 #define prescan_rxq(rcd, packet) \ 572 do { \ 573 if (rcd->ppd->cc_prescan) \ 574 __prescan_rxq(packet); \ 575 } while (0) 576 static void __prescan_rxq(struct hfi1_packet *packet) 577 { 578 struct hfi1_ctxtdata *rcd = packet->rcd; 579 struct ps_mdata mdata; 580 581 init_ps_mdata(&mdata, packet); 582 583 while (1) { 584 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 585 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 586 packet->rcd->rhf_offset; 587 struct rvt_qp *qp; 588 struct ib_header *hdr; 589 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; 590 u64 rhf = rhf_to_cpu(rhf_addr); 591 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 592 u8 lnh; 593 594 if (ps_done(&mdata, rhf, rcd)) 595 break; 596 597 if (ps_skip(&mdata, rhf, rcd)) 598 goto next; 599 600 if (etype != RHF_RCV_TYPE_IB) 601 goto next; 602 603 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); 604 hdr = packet->hdr; 605 lnh = ib_get_lnh(hdr); 606 607 if (lnh == HFI1_LRH_BTH) { 608 packet->ohdr = &hdr->u.oth; 609 packet->grh = NULL; 610 } else if (lnh == HFI1_LRH_GRH) { 611 packet->ohdr = &hdr->u.l.oth; 612 packet->grh = &hdr->u.l.grh; 613 } else { 614 goto next; /* just in case */ 615 } 616 617 if (!hfi1_may_ecn(packet)) 618 goto next; 619 620 bth1 = be32_to_cpu(packet->ohdr->bth[1]); 621 qpn = bth1 & RVT_QPN_MASK; 622 rcu_read_lock(); 623 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); 624 625 if (!qp) { 626 rcu_read_unlock(); 627 goto next; 628 } 629 630 hfi1_process_ecn_slowpath(qp, packet, true); 631 rcu_read_unlock(); 632 633 /* turn off BECN, FECN */ 634 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); 635 packet->ohdr->bth[1] = cpu_to_be32(bth1); 636 next: 637 update_ps_mdata(&mdata, rcd); 638 } 639 } 640 641 static void process_rcv_qp_work(struct hfi1_packet *packet) 642 { 643 struct rvt_qp *qp, *nqp; 644 struct hfi1_ctxtdata *rcd = packet->rcd; 645 646 /* 647 * Iterate over all QPs waiting to respond. 648 * The list won't change since the IRQ is only run on one CPU. 649 */ 650 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 651 list_del_init(&qp->rspwait); 652 if (qp->r_flags & RVT_R_RSP_NAK) { 653 qp->r_flags &= ~RVT_R_RSP_NAK; 654 packet->qp = qp; 655 hfi1_send_rc_ack(packet, 0); 656 } 657 if (qp->r_flags & RVT_R_RSP_SEND) { 658 unsigned long flags; 659 660 qp->r_flags &= ~RVT_R_RSP_SEND; 661 spin_lock_irqsave(&qp->s_lock, flags); 662 if (ib_rvt_state_ops[qp->state] & 663 RVT_PROCESS_OR_FLUSH_SEND) 664 hfi1_schedule_send(qp); 665 spin_unlock_irqrestore(&qp->s_lock, flags); 666 } 667 rvt_put_qp(qp); 668 } 669 } 670 671 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) 672 { 673 if (thread) { 674 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 675 /* allow defered processing */ 676 process_rcv_qp_work(packet); 677 cond_resched(); 678 return RCV_PKT_OK; 679 } else { 680 this_cpu_inc(*packet->rcd->dd->rcv_limit); 681 return RCV_PKT_LIMIT; 682 } 683 } 684 685 static inline int check_max_packet(struct hfi1_packet *packet, int thread) 686 { 687 int ret = RCV_PKT_OK; 688 689 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) 690 ret = max_packet_exceeded(packet, thread); 691 return ret; 692 } 693 694 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) 695 { 696 int ret; 697 698 packet->rcd->dd->ctx0_seq_drop++; 699 /* Set up for the next packet */ 700 packet->rhqoff += packet->rsize; 701 if (packet->rhqoff >= packet->maxcnt) 702 packet->rhqoff = 0; 703 704 packet->numpkt++; 705 ret = check_max_packet(packet, thread); 706 707 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 708 packet->rcd->rhf_offset; 709 packet->rhf = rhf_to_cpu(packet->rhf_addr); 710 711 return ret; 712 } 713 714 static void process_rcv_packet_napi(struct hfi1_packet *packet) 715 { 716 packet->etype = rhf_rcv_type(packet->rhf); 717 718 /* total length */ 719 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 720 /* retrieve eager buffer details */ 721 packet->etail = rhf_egr_index(packet->rhf); 722 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 723 &packet->updegr); 724 /* 725 * Prefetch the contents of the eager buffer. It is 726 * OK to send a negative length to prefetch_range(). 727 * The +2 is the size of the RHF. 728 */ 729 prefetch_range(packet->ebuf, 730 packet->tlen - ((packet->rcd->rcvhdrqentsize - 731 (rhf_hdrq_offset(packet->rhf) 732 + 2)) * 4)); 733 734 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 735 packet->numpkt++; 736 737 /* Set up for the next packet */ 738 packet->rhqoff += packet->rsize; 739 if (packet->rhqoff >= packet->maxcnt) 740 packet->rhqoff = 0; 741 742 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 743 packet->rcd->rhf_offset; 744 packet->rhf = rhf_to_cpu(packet->rhf_addr); 745 } 746 747 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) 748 { 749 int ret; 750 751 packet->etype = rhf_rcv_type(packet->rhf); 752 753 /* total length */ 754 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 755 /* retrieve eager buffer details */ 756 packet->ebuf = NULL; 757 if (rhf_use_egr_bfr(packet->rhf)) { 758 packet->etail = rhf_egr_index(packet->rhf); 759 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 760 &packet->updegr); 761 /* 762 * Prefetch the contents of the eager buffer. It is 763 * OK to send a negative length to prefetch_range(). 764 * The +2 is the size of the RHF. 765 */ 766 prefetch_range(packet->ebuf, 767 packet->tlen - ((get_hdrqentsize(packet->rcd) - 768 (rhf_hdrq_offset(packet->rhf) 769 + 2)) * 4)); 770 } 771 772 /* 773 * Call a type specific handler for the packet. We 774 * should be able to trust that etype won't be beyond 775 * the range of valid indexes. If so something is really 776 * wrong and we can probably just let things come 777 * crashing down. There is no need to eat another 778 * comparison in this performance critical code. 779 */ 780 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 781 packet->numpkt++; 782 783 /* Set up for the next packet */ 784 packet->rhqoff += packet->rsize; 785 if (packet->rhqoff >= packet->maxcnt) 786 packet->rhqoff = 0; 787 788 ret = check_max_packet(packet, thread); 789 790 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 791 packet->rcd->rhf_offset; 792 packet->rhf = rhf_to_cpu(packet->rhf_addr); 793 794 return ret; 795 } 796 797 static inline void process_rcv_update(int last, struct hfi1_packet *packet) 798 { 799 /* 800 * Update head regs etc., every 16 packets, if not last pkt, 801 * to help prevent rcvhdrq overflows, when many packets 802 * are processed and queue is nearly full. 803 * Don't request an interrupt for intermediate updates. 804 */ 805 if (!last && !(packet->numpkt & 0xf)) { 806 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, 807 packet->etail, 0, 0); 808 packet->updegr = 0; 809 } 810 packet->grh = NULL; 811 } 812 813 static inline void finish_packet(struct hfi1_packet *packet) 814 { 815 /* 816 * Nothing we need to free for the packet. 817 * 818 * The only thing we need to do is a final update and call for an 819 * interrupt 820 */ 821 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, 822 packet->etail, rcv_intr_dynamic, packet->numpkt); 823 } 824 825 /* 826 * handle_receive_interrupt_napi_fp - receive a packet 827 * @rcd: the context 828 * @budget: polling budget 829 * 830 * Called from interrupt handler for receive interrupt. 831 * This is the fast path interrupt handler 832 * when executing napi soft irq environment. 833 */ 834 int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) 835 { 836 struct hfi1_packet packet; 837 838 init_packet(rcd, &packet); 839 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 840 goto bail; 841 842 while (packet.numpkt < budget) { 843 process_rcv_packet_napi(&packet); 844 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 845 break; 846 847 process_rcv_update(0, &packet); 848 } 849 hfi1_set_rcd_head(rcd, packet.rhqoff); 850 bail: 851 finish_packet(&packet); 852 return packet.numpkt; 853 } 854 855 /* 856 * Handle receive interrupts when using the no dma rtail option. 857 */ 858 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) 859 { 860 int last = RCV_PKT_OK; 861 struct hfi1_packet packet; 862 863 init_packet(rcd, &packet); 864 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 865 last = RCV_PKT_DONE; 866 goto bail; 867 } 868 869 prescan_rxq(rcd, &packet); 870 871 while (last == RCV_PKT_OK) { 872 last = process_rcv_packet(&packet, thread); 873 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 874 last = RCV_PKT_DONE; 875 process_rcv_update(last, &packet); 876 } 877 process_rcv_qp_work(&packet); 878 hfi1_set_rcd_head(rcd, packet.rhqoff); 879 bail: 880 finish_packet(&packet); 881 return last; 882 } 883 884 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) 885 { 886 u32 hdrqtail; 887 int last = RCV_PKT_OK; 888 struct hfi1_packet packet; 889 890 init_packet(rcd, &packet); 891 hdrqtail = get_rcvhdrtail(rcd); 892 if (packet.rhqoff == hdrqtail) { 893 last = RCV_PKT_DONE; 894 goto bail; 895 } 896 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 897 898 prescan_rxq(rcd, &packet); 899 900 while (last == RCV_PKT_OK) { 901 last = process_rcv_packet(&packet, thread); 902 if (packet.rhqoff == hdrqtail) 903 last = RCV_PKT_DONE; 904 process_rcv_update(last, &packet); 905 } 906 process_rcv_qp_work(&packet); 907 hfi1_set_rcd_head(rcd, packet.rhqoff); 908 bail: 909 finish_packet(&packet); 910 return last; 911 } 912 913 static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 914 { 915 u16 i; 916 917 /* 918 * For dynamically allocated kernel contexts (like vnic) switch 919 * interrupt handler only for that context. Otherwise, switch 920 * interrupt handler for all statically allocated kernel contexts. 921 */ 922 if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { 923 hfi1_rcd_get(rcd); 924 hfi1_set_fast(rcd); 925 hfi1_rcd_put(rcd); 926 return; 927 } 928 929 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 930 rcd = hfi1_rcd_get_by_index(dd, i); 931 if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) 932 hfi1_set_fast(rcd); 933 hfi1_rcd_put(rcd); 934 } 935 } 936 937 void set_all_slowpath(struct hfi1_devdata *dd) 938 { 939 struct hfi1_ctxtdata *rcd; 940 u16 i; 941 942 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ 943 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 944 rcd = hfi1_rcd_get_by_index(dd, i); 945 if (!rcd) 946 continue; 947 if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) 948 rcd->do_interrupt = rcd->slow_handler; 949 950 hfi1_rcd_put(rcd); 951 } 952 } 953 954 static bool __set_armed_to_active(struct hfi1_packet *packet) 955 { 956 u8 etype = rhf_rcv_type(packet->rhf); 957 u8 sc = SC15_PACKET; 958 959 if (etype == RHF_RCV_TYPE_IB) { 960 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, 961 packet->rhf_addr); 962 sc = hfi1_9B_get_sc5(hdr, packet->rhf); 963 } else if (etype == RHF_RCV_TYPE_BYPASS) { 964 struct hfi1_16b_header *hdr = hfi1_get_16B_header( 965 packet->rcd, 966 packet->rhf_addr); 967 sc = hfi1_16B_get_sc(hdr); 968 } 969 if (sc != SC15_PACKET) { 970 int hwstate = driver_lstate(packet->rcd->ppd); 971 struct work_struct *lsaw = 972 &packet->rcd->ppd->linkstate_active_work; 973 974 if (hwstate != IB_PORT_ACTIVE) { 975 dd_dev_info(packet->rcd->dd, 976 "Unexpected link state %s\n", 977 opa_lstate_name(hwstate)); 978 return false; 979 } 980 981 queue_work(packet->rcd->ppd->link_wq, lsaw); 982 return true; 983 } 984 return false; 985 } 986 987 /** 988 * set_armed_to_active - the fast path for armed to active 989 * @packet: the packet structure 990 * 991 * Return true if packet processing needs to bail. 992 */ 993 static bool set_armed_to_active(struct hfi1_packet *packet) 994 { 995 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) 996 return false; 997 return __set_armed_to_active(packet); 998 } 999 1000 /* 1001 * handle_receive_interrupt - receive a packet 1002 * @rcd: the context 1003 * 1004 * Called from interrupt handler for errors or receive interrupt. 1005 * This is the slow path interrupt handler. 1006 */ 1007 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) 1008 { 1009 struct hfi1_devdata *dd = rcd->dd; 1010 u32 hdrqtail; 1011 int needset, last = RCV_PKT_OK; 1012 struct hfi1_packet packet; 1013 int skip_pkt = 0; 1014 1015 /* Control context will always use the slow path interrupt handler */ 1016 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; 1017 1018 init_packet(rcd, &packet); 1019 1020 if (!get_dma_rtail_setting(rcd)) { 1021 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 1022 last = RCV_PKT_DONE; 1023 goto bail; 1024 } 1025 hdrqtail = 0; 1026 } else { 1027 hdrqtail = get_rcvhdrtail(rcd); 1028 if (packet.rhqoff == hdrqtail) { 1029 last = RCV_PKT_DONE; 1030 goto bail; 1031 } 1032 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 1033 1034 /* 1035 * Control context can potentially receive an invalid 1036 * rhf. Drop such packets. 1037 */ 1038 if (rcd->ctxt == HFI1_CTRL_CTXT) 1039 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1040 skip_pkt = 1; 1041 } 1042 1043 prescan_rxq(rcd, &packet); 1044 1045 while (last == RCV_PKT_OK) { 1046 if (hfi1_need_drop(dd)) { 1047 /* On to the next packet */ 1048 packet.rhqoff += packet.rsize; 1049 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1050 packet.rhqoff + 1051 rcd->rhf_offset; 1052 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1053 1054 } else if (skip_pkt) { 1055 last = skip_rcv_packet(&packet, thread); 1056 skip_pkt = 0; 1057 } else { 1058 if (set_armed_to_active(&packet)) 1059 goto bail; 1060 last = process_rcv_packet(&packet, thread); 1061 } 1062 1063 if (!get_dma_rtail_setting(rcd)) { 1064 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1065 last = RCV_PKT_DONE; 1066 } else { 1067 if (packet.rhqoff == hdrqtail) 1068 last = RCV_PKT_DONE; 1069 /* 1070 * Control context can potentially receive an invalid 1071 * rhf. Drop such packets. 1072 */ 1073 if (rcd->ctxt == HFI1_CTRL_CTXT) { 1074 bool lseq; 1075 1076 lseq = hfi1_seq_incr(rcd, 1077 rhf_rcv_seq(packet.rhf)); 1078 if (!last && lseq) 1079 skip_pkt = 1; 1080 } 1081 } 1082 1083 if (needset) { 1084 needset = false; 1085 set_all_fastpath(dd, rcd); 1086 } 1087 process_rcv_update(last, &packet); 1088 } 1089 1090 process_rcv_qp_work(&packet); 1091 hfi1_set_rcd_head(rcd, packet.rhqoff); 1092 1093 bail: 1094 /* 1095 * Always write head at end, and setup rcv interrupt, even 1096 * if no packets were processed. 1097 */ 1098 finish_packet(&packet); 1099 return last; 1100 } 1101 1102 /* 1103 * handle_receive_interrupt_napi_sp - receive a packet 1104 * @rcd: the context 1105 * @budget: polling budget 1106 * 1107 * Called from interrupt handler for errors or receive interrupt. 1108 * This is the slow path interrupt handler 1109 * when executing napi soft irq environment. 1110 */ 1111 int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) 1112 { 1113 struct hfi1_devdata *dd = rcd->dd; 1114 int last = RCV_PKT_OK; 1115 bool needset = true; 1116 struct hfi1_packet packet; 1117 1118 init_packet(rcd, &packet); 1119 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1120 goto bail; 1121 1122 while (last != RCV_PKT_DONE && packet.numpkt < budget) { 1123 if (hfi1_need_drop(dd)) { 1124 /* On to the next packet */ 1125 packet.rhqoff += packet.rsize; 1126 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1127 packet.rhqoff + 1128 rcd->rhf_offset; 1129 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1130 1131 } else { 1132 if (set_armed_to_active(&packet)) 1133 goto bail; 1134 process_rcv_packet_napi(&packet); 1135 } 1136 1137 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1138 last = RCV_PKT_DONE; 1139 1140 if (needset) { 1141 needset = false; 1142 set_all_fastpath(dd, rcd); 1143 } 1144 1145 process_rcv_update(last, &packet); 1146 } 1147 1148 hfi1_set_rcd_head(rcd, packet.rhqoff); 1149 1150 bail: 1151 /* 1152 * Always write head at end, and setup rcv interrupt, even 1153 * if no packets were processed. 1154 */ 1155 finish_packet(&packet); 1156 return packet.numpkt; 1157 } 1158 1159 /* 1160 * We may discover in the interrupt that the hardware link state has 1161 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), 1162 * and we need to update the driver's notion of the link state. We cannot 1163 * run set_link_state from interrupt context, so we queue this function on 1164 * a workqueue. 1165 * 1166 * We delay the regular interrupt processing until after the state changes 1167 * so that the link will be in the correct state by the time any application 1168 * we wake up attempts to send a reply to any message it received. 1169 * (Subsequent receive interrupts may possibly force the wakeup before we 1170 * update the link state.) 1171 * 1172 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes 1173 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, 1174 * so we're safe from use-after-free of the rcd. 1175 */ 1176 void receive_interrupt_work(struct work_struct *work) 1177 { 1178 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 1179 linkstate_active_work); 1180 struct hfi1_devdata *dd = ppd->dd; 1181 struct hfi1_ctxtdata *rcd; 1182 u16 i; 1183 1184 /* Received non-SC15 packet implies neighbor_normal */ 1185 ppd->neighbor_normal = 1; 1186 set_link_state(ppd, HLS_UP_ACTIVE); 1187 1188 /* 1189 * Interrupt all statically allocated kernel contexts that could 1190 * have had an interrupt during auto activation. 1191 */ 1192 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { 1193 rcd = hfi1_rcd_get_by_index(dd, i); 1194 if (rcd) 1195 force_recv_intr(rcd); 1196 hfi1_rcd_put(rcd); 1197 } 1198 } 1199 1200 /* 1201 * Convert a given MTU size to the on-wire MAD packet enumeration. 1202 * Return -1 if the size is invalid. 1203 */ 1204 int mtu_to_enum(u32 mtu, int default_if_bad) 1205 { 1206 switch (mtu) { 1207 case 0: return OPA_MTU_0; 1208 case 256: return OPA_MTU_256; 1209 case 512: return OPA_MTU_512; 1210 case 1024: return OPA_MTU_1024; 1211 case 2048: return OPA_MTU_2048; 1212 case 4096: return OPA_MTU_4096; 1213 case 8192: return OPA_MTU_8192; 1214 case 10240: return OPA_MTU_10240; 1215 } 1216 return default_if_bad; 1217 } 1218 1219 u16 enum_to_mtu(int mtu) 1220 { 1221 switch (mtu) { 1222 case OPA_MTU_0: return 0; 1223 case OPA_MTU_256: return 256; 1224 case OPA_MTU_512: return 512; 1225 case OPA_MTU_1024: return 1024; 1226 case OPA_MTU_2048: return 2048; 1227 case OPA_MTU_4096: return 4096; 1228 case OPA_MTU_8192: return 8192; 1229 case OPA_MTU_10240: return 10240; 1230 default: return 0xffff; 1231 } 1232 } 1233 1234 /* 1235 * set_mtu - set the MTU 1236 * @ppd: the per port data 1237 * 1238 * We can handle "any" incoming size, the issue here is whether we 1239 * need to restrict our outgoing size. We do not deal with what happens 1240 * to programs that are already running when the size changes. 1241 */ 1242 int set_mtu(struct hfi1_pportdata *ppd) 1243 { 1244 struct hfi1_devdata *dd = ppd->dd; 1245 int i, drain, ret = 0, is_up = 0; 1246 1247 ppd->ibmtu = 0; 1248 for (i = 0; i < ppd->vls_supported; i++) 1249 if (ppd->ibmtu < dd->vld[i].mtu) 1250 ppd->ibmtu = dd->vld[i].mtu; 1251 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); 1252 1253 mutex_lock(&ppd->hls_lock); 1254 if (ppd->host_link_state == HLS_UP_INIT || 1255 ppd->host_link_state == HLS_UP_ARMED || 1256 ppd->host_link_state == HLS_UP_ACTIVE) 1257 is_up = 1; 1258 1259 drain = !is_ax(dd) && is_up; 1260 1261 if (drain) 1262 /* 1263 * MTU is specified per-VL. To ensure that no packet gets 1264 * stuck (due, e.g., to the MTU for the packet's VL being 1265 * reduced), empty the per-VL FIFOs before adjusting MTU. 1266 */ 1267 ret = stop_drain_data_vls(dd); 1268 1269 if (ret) { 1270 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", 1271 __func__); 1272 goto err; 1273 } 1274 1275 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); 1276 1277 if (drain) 1278 open_fill_data_vls(dd); /* reopen all VLs */ 1279 1280 err: 1281 mutex_unlock(&ppd->hls_lock); 1282 1283 return ret; 1284 } 1285 1286 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) 1287 { 1288 struct hfi1_devdata *dd = ppd->dd; 1289 1290 ppd->lid = lid; 1291 ppd->lmc = lmc; 1292 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1293 1294 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); 1295 1296 return 0; 1297 } 1298 1299 void shutdown_led_override(struct hfi1_pportdata *ppd) 1300 { 1301 struct hfi1_devdata *dd = ppd->dd; 1302 1303 /* 1304 * This pairs with the memory barrier in hfi1_start_led_override to 1305 * ensure that we read the correct state of LED beaconing represented 1306 * by led_override_timer_active 1307 */ 1308 smp_rmb(); 1309 if (atomic_read(&ppd->led_override_timer_active)) { 1310 del_timer_sync(&ppd->led_override_timer); 1311 atomic_set(&ppd->led_override_timer_active, 0); 1312 /* Ensure the atomic_set is visible to all CPUs */ 1313 smp_wmb(); 1314 } 1315 1316 /* Hand control of the LED to the DC for normal operation */ 1317 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 1318 } 1319 1320 static void run_led_override(struct timer_list *t) 1321 { 1322 struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer); 1323 struct hfi1_devdata *dd = ppd->dd; 1324 unsigned long timeout; 1325 int phase_idx; 1326 1327 if (!(dd->flags & HFI1_INITTED)) 1328 return; 1329 1330 phase_idx = ppd->led_override_phase & 1; 1331 1332 setextled(dd, phase_idx); 1333 1334 timeout = ppd->led_override_vals[phase_idx]; 1335 1336 /* Set up for next phase */ 1337 ppd->led_override_phase = !ppd->led_override_phase; 1338 1339 mod_timer(&ppd->led_override_timer, jiffies + timeout); 1340 } 1341 1342 /* 1343 * To have the LED blink in a particular pattern, provide timeon and timeoff 1344 * in milliseconds. 1345 * To turn off custom blinking and return to normal operation, use 1346 * shutdown_led_override() 1347 */ 1348 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1349 unsigned int timeoff) 1350 { 1351 if (!(ppd->dd->flags & HFI1_INITTED)) 1352 return; 1353 1354 /* Convert to jiffies for direct use in timer */ 1355 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); 1356 ppd->led_override_vals[1] = msecs_to_jiffies(timeon); 1357 1358 /* Arbitrarily start from LED on phase */ 1359 ppd->led_override_phase = 1; 1360 1361 /* 1362 * If the timer has not already been started, do so. Use a "quick" 1363 * timeout so the handler will be called soon to look at our request. 1364 */ 1365 if (!timer_pending(&ppd->led_override_timer)) { 1366 timer_setup(&ppd->led_override_timer, run_led_override, 0); 1367 ppd->led_override_timer.expires = jiffies + 1; 1368 add_timer(&ppd->led_override_timer); 1369 atomic_set(&ppd->led_override_timer_active, 1); 1370 /* Ensure the atomic_set is visible to all CPUs */ 1371 smp_wmb(); 1372 } 1373 } 1374 1375 /** 1376 * hfi1_reset_device - reset the chip if possible 1377 * @unit: the device to reset 1378 * 1379 * Whether or not reset is successful, we attempt to re-initialize the chip 1380 * (that is, much like a driver unload/reload). We clear the INITTED flag 1381 * so that the various entry points will fail until we reinitialize. For 1382 * now, we only allow this if no user contexts are open that use chip resources 1383 */ 1384 int hfi1_reset_device(int unit) 1385 { 1386 int ret; 1387 struct hfi1_devdata *dd = hfi1_lookup(unit); 1388 struct hfi1_pportdata *ppd; 1389 int pidx; 1390 1391 if (!dd) { 1392 ret = -ENODEV; 1393 goto bail; 1394 } 1395 1396 dd_dev_info(dd, "Reset on unit %u requested\n", unit); 1397 1398 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { 1399 dd_dev_info(dd, 1400 "Invalid unit number %u or not initialized or not present\n", 1401 unit); 1402 ret = -ENXIO; 1403 goto bail; 1404 } 1405 1406 /* If there are any user/vnic contexts, we cannot reset */ 1407 mutex_lock(&hfi1_mutex); 1408 if (dd->rcd) 1409 if (hfi1_stats.sps_ctxts) { 1410 mutex_unlock(&hfi1_mutex); 1411 ret = -EBUSY; 1412 goto bail; 1413 } 1414 mutex_unlock(&hfi1_mutex); 1415 1416 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1417 ppd = dd->pport + pidx; 1418 1419 shutdown_led_override(ppd); 1420 } 1421 if (dd->flags & HFI1_HAS_SEND_DMA) 1422 sdma_exit(dd); 1423 1424 hfi1_reset_cpu_counters(dd); 1425 1426 ret = hfi1_init(dd, 1); 1427 1428 if (ret) 1429 dd_dev_err(dd, 1430 "Reinitialize unit %u after reset failed with %d\n", 1431 unit, ret); 1432 else 1433 dd_dev_info(dd, "Reinitialized unit %u after resetting\n", 1434 unit); 1435 1436 bail: 1437 return ret; 1438 } 1439 1440 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) 1441 { 1442 packet->hdr = (struct hfi1_ib_message_header *) 1443 hfi1_get_msgheader(packet->rcd, 1444 packet->rhf_addr); 1445 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; 1446 } 1447 1448 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) 1449 { 1450 struct hfi1_pportdata *ppd = packet->rcd->ppd; 1451 1452 /* slid and dlid cannot be 0 */ 1453 if ((!packet->slid) || (!packet->dlid)) 1454 return -EINVAL; 1455 1456 /* Compare port lid with incoming packet dlid */ 1457 if ((!(hfi1_is_16B_mcast(packet->dlid))) && 1458 (packet->dlid != 1459 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { 1460 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) 1461 return -EINVAL; 1462 } 1463 1464 /* No multicast packets with SC15 */ 1465 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) 1466 return -EINVAL; 1467 1468 /* Packets with permissive DLID always on SC15 */ 1469 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 1470 16B)) && 1471 (packet->sc != 0xF)) 1472 return -EINVAL; 1473 1474 return 0; 1475 } 1476 1477 static int hfi1_setup_9B_packet(struct hfi1_packet *packet) 1478 { 1479 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1480 struct ib_header *hdr; 1481 u8 lnh; 1482 1483 hfi1_setup_ib_header(packet); 1484 hdr = packet->hdr; 1485 1486 lnh = ib_get_lnh(hdr); 1487 if (lnh == HFI1_LRH_BTH) { 1488 packet->ohdr = &hdr->u.oth; 1489 packet->grh = NULL; 1490 } else if (lnh == HFI1_LRH_GRH) { 1491 u32 vtf; 1492 1493 packet->ohdr = &hdr->u.l.oth; 1494 packet->grh = &hdr->u.l.grh; 1495 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1496 goto drop; 1497 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1498 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1499 goto drop; 1500 } else { 1501 goto drop; 1502 } 1503 1504 /* Query commonly used fields from packet header */ 1505 packet->payload = packet->ebuf; 1506 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1507 packet->slid = ib_get_slid(hdr); 1508 packet->dlid = ib_get_dlid(hdr); 1509 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 1510 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) 1511 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1512 be16_to_cpu(IB_MULTICAST_LID_BASE); 1513 packet->sl = ib_get_sl(hdr); 1514 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1515 packet->pad = ib_bth_get_pad(packet->ohdr); 1516 packet->extra_byte = 0; 1517 packet->pkey = ib_bth_get_pkey(packet->ohdr); 1518 packet->migrated = ib_bth_is_migration(packet->ohdr); 1519 1520 return 0; 1521 drop: 1522 ibp->rvp.n_pkt_drops++; 1523 return -EINVAL; 1524 } 1525 1526 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) 1527 { 1528 /* 1529 * Bypass packets have a different header/payload split 1530 * compared to an IB packet. 1531 * Current split is set such that 16 bytes of the actual 1532 * header is in the header buffer and the remining is in 1533 * the eager buffer. We chose 16 since hfi1 driver only 1534 * supports 16B bypass packets and we will be able to 1535 * receive the entire LRH with such a split. 1536 */ 1537 1538 struct hfi1_ctxtdata *rcd = packet->rcd; 1539 struct hfi1_pportdata *ppd = rcd->ppd; 1540 struct hfi1_ibport *ibp = &ppd->ibport_data; 1541 u8 l4; 1542 1543 packet->hdr = (struct hfi1_16b_header *) 1544 hfi1_get_16B_header(packet->rcd, 1545 packet->rhf_addr); 1546 l4 = hfi1_16B_get_l4(packet->hdr); 1547 if (l4 == OPA_16B_L4_IB_LOCAL) { 1548 packet->ohdr = packet->ebuf; 1549 packet->grh = NULL; 1550 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1551 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1552 /* hdr_len_by_opcode already has an IB LRH factored in */ 1553 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1554 (LRH_16B_BYTES - LRH_9B_BYTES); 1555 packet->migrated = opa_bth_is_migration(packet->ohdr); 1556 } else if (l4 == OPA_16B_L4_IB_GLOBAL) { 1557 u32 vtf; 1558 u8 grh_len = sizeof(struct ib_grh); 1559 1560 packet->ohdr = packet->ebuf + grh_len; 1561 packet->grh = packet->ebuf; 1562 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1563 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1564 /* hdr_len_by_opcode already has an IB LRH factored in */ 1565 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1566 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1567 packet->migrated = opa_bth_is_migration(packet->ohdr); 1568 1569 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1570 goto drop; 1571 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1572 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1573 goto drop; 1574 } else if (l4 == OPA_16B_L4_FM) { 1575 packet->mgmt = packet->ebuf; 1576 packet->ohdr = NULL; 1577 packet->grh = NULL; 1578 packet->opcode = IB_OPCODE_UD_SEND_ONLY; 1579 packet->pad = OPA_16B_L4_FM_PAD; 1580 packet->hlen = OPA_16B_L4_FM_HLEN; 1581 packet->migrated = false; 1582 } else { 1583 goto drop; 1584 } 1585 1586 /* Query commonly used fields from packet header */ 1587 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1588 packet->slid = hfi1_16B_get_slid(packet->hdr); 1589 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1590 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) 1591 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1592 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 1593 16B); 1594 packet->sc = hfi1_16B_get_sc(packet->hdr); 1595 packet->sl = ibp->sc_to_sl[packet->sc]; 1596 packet->extra_byte = SIZE_OF_LT; 1597 packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1598 1599 if (hfi1_bypass_ingress_pkt_check(packet)) 1600 goto drop; 1601 1602 return 0; 1603 drop: 1604 hfi1_cdbg(PKT, "%s: packet dropped\n", __func__); 1605 ibp->rvp.n_pkt_drops++; 1606 return -EINVAL; 1607 } 1608 1609 static void show_eflags_errs(struct hfi1_packet *packet) 1610 { 1611 struct hfi1_ctxtdata *rcd = packet->rcd; 1612 u32 rte = rhf_rcv_type_err(packet->rhf); 1613 1614 dd_dev_err(rcd->dd, 1615 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", 1616 rcd->ctxt, packet->rhf, 1617 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", 1618 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", 1619 packet->rhf & RHF_DC_ERR ? "dc " : "", 1620 packet->rhf & RHF_TID_ERR ? "tid " : "", 1621 packet->rhf & RHF_LEN_ERR ? "len " : "", 1622 packet->rhf & RHF_ECC_ERR ? "ecc " : "", 1623 packet->rhf & RHF_ICRC_ERR ? "icrc " : "", 1624 rte); 1625 } 1626 1627 void handle_eflags(struct hfi1_packet *packet) 1628 { 1629 struct hfi1_ctxtdata *rcd = packet->rcd; 1630 1631 rcv_hdrerr(rcd, rcd->ppd, packet); 1632 if (rhf_err_flags(packet->rhf)) 1633 show_eflags_errs(packet); 1634 } 1635 1636 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet) 1637 { 1638 struct hfi1_ibport *ibp; 1639 struct net_device *netdev; 1640 struct hfi1_ctxtdata *rcd = packet->rcd; 1641 struct napi_struct *napi = rcd->napi; 1642 struct sk_buff *skb; 1643 struct hfi1_netdev_rxq *rxq = container_of(napi, 1644 struct hfi1_netdev_rxq, napi); 1645 u32 extra_bytes; 1646 u32 tlen, qpnum; 1647 bool do_work, do_cnp; 1648 1649 trace_hfi1_rcvhdr(packet); 1650 1651 hfi1_setup_ib_header(packet); 1652 1653 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth; 1654 packet->grh = NULL; 1655 1656 if (unlikely(rhf_err_flags(packet->rhf))) { 1657 handle_eflags(packet); 1658 return; 1659 } 1660 1661 qpnum = ib_bth_get_qpn(packet->ohdr); 1662 netdev = hfi1_netdev_get_data(rcd->dd, qpnum); 1663 if (!netdev) 1664 goto drop_no_nd; 1665 1666 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 1667 trace_ctxt_rsm_hist(rcd->ctxt); 1668 1669 /* handle congestion notifications */ 1670 do_work = hfi1_may_ecn(packet); 1671 if (unlikely(do_work)) { 1672 do_cnp = (packet->opcode != IB_OPCODE_CNP); 1673 (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp, 1674 packet, do_cnp); 1675 } 1676 1677 /* 1678 * We have split point after last byte of DETH 1679 * lets strip padding and CRC and ICRC. 1680 * tlen is whole packet len so we need to 1681 * subtract header size as well. 1682 */ 1683 tlen = packet->tlen; 1684 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) + 1685 packet->hlen; 1686 if (unlikely(tlen < extra_bytes)) 1687 goto drop; 1688 1689 tlen -= extra_bytes; 1690 1691 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf); 1692 if (unlikely(!skb)) 1693 goto drop; 1694 1695 dev_sw_netstats_rx_add(netdev, skb->len); 1696 1697 skb->dev = netdev; 1698 skb->pkt_type = PACKET_HOST; 1699 netif_receive_skb(skb); 1700 1701 return; 1702 1703 drop: 1704 ++netdev->stats.rx_dropped; 1705 drop_no_nd: 1706 ibp = rcd_to_iport(packet->rcd); 1707 ++ibp->rvp.n_pkt_drops; 1708 } 1709 1710 /* 1711 * The following functions are called by the interrupt handler. They are type 1712 * specific handlers for each packet type. 1713 */ 1714 static void process_receive_ib(struct hfi1_packet *packet) 1715 { 1716 if (hfi1_setup_9B_packet(packet)) 1717 return; 1718 1719 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1720 return; 1721 1722 trace_hfi1_rcvhdr(packet); 1723 1724 if (unlikely(rhf_err_flags(packet->rhf))) { 1725 handle_eflags(packet); 1726 return; 1727 } 1728 1729 hfi1_ib_rcv(packet); 1730 } 1731 1732 static void process_receive_bypass(struct hfi1_packet *packet) 1733 { 1734 struct hfi1_devdata *dd = packet->rcd->dd; 1735 1736 if (hfi1_setup_bypass_packet(packet)) 1737 return; 1738 1739 trace_hfi1_rcvhdr(packet); 1740 1741 if (unlikely(rhf_err_flags(packet->rhf))) { 1742 handle_eflags(packet); 1743 return; 1744 } 1745 1746 if (hfi1_16B_get_l2(packet->hdr) == 0x2) { 1747 hfi1_16B_rcv(packet); 1748 } else { 1749 dd_dev_err(dd, 1750 "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); 1751 incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1752 if (!(dd->err_info_rcvport.status_and_code & 1753 OPA_EI_STATUS_SMASK)) { 1754 u64 *flits = packet->ebuf; 1755 1756 if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1757 dd->err_info_rcvport.packet_flit1 = flits[0]; 1758 dd->err_info_rcvport.packet_flit2 = 1759 packet->tlen > sizeof(flits[0]) ? 1760 flits[1] : 0; 1761 } 1762 dd->err_info_rcvport.status_and_code |= 1763 (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1764 } 1765 } 1766 } 1767 1768 static void process_receive_error(struct hfi1_packet *packet) 1769 { 1770 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ 1771 if (unlikely( 1772 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1773 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || 1774 packet->rhf & RHF_DC_ERR))) 1775 return; 1776 1777 hfi1_setup_ib_header(packet); 1778 handle_eflags(packet); 1779 1780 if (unlikely(rhf_err_flags(packet->rhf))) 1781 dd_dev_err(packet->rcd->dd, 1782 "Unhandled error packet received. Dropping.\n"); 1783 } 1784 1785 static void kdeth_process_expected(struct hfi1_packet *packet) 1786 { 1787 hfi1_setup_9B_packet(packet); 1788 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1789 return; 1790 1791 if (unlikely(rhf_err_flags(packet->rhf))) { 1792 struct hfi1_ctxtdata *rcd = packet->rcd; 1793 1794 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1795 return; 1796 } 1797 1798 hfi1_kdeth_expected_rcv(packet); 1799 } 1800 1801 static void kdeth_process_eager(struct hfi1_packet *packet) 1802 { 1803 hfi1_setup_9B_packet(packet); 1804 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1805 return; 1806 1807 trace_hfi1_rcvhdr(packet); 1808 if (unlikely(rhf_err_flags(packet->rhf))) { 1809 struct hfi1_ctxtdata *rcd = packet->rcd; 1810 1811 show_eflags_errs(packet); 1812 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1813 return; 1814 } 1815 1816 hfi1_kdeth_eager_rcv(packet); 1817 } 1818 1819 static void process_receive_invalid(struct hfi1_packet *packet) 1820 { 1821 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", 1822 rhf_rcv_type(packet->rhf)); 1823 } 1824 1825 #define HFI1_RCVHDR_DUMP_MAX 5 1826 1827 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) 1828 { 1829 struct hfi1_packet packet; 1830 struct ps_mdata mdata; 1831 int i; 1832 1833 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", 1834 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), 1835 get_dma_rtail_setting(rcd) ? 1836 "dma_rtail" : "nodma_rtail", 1837 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), 1838 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), 1839 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & 1840 RCV_HDR_HEAD_HEAD_MASK, 1841 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), 1842 rcd->head); 1843 1844 init_packet(rcd, &packet); 1845 init_ps_mdata(&mdata, &packet); 1846 1847 for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { 1848 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 1849 rcd->rhf_offset; 1850 struct ib_header *hdr; 1851 u64 rhf = rhf_to_cpu(rhf_addr); 1852 u32 etype = rhf_rcv_type(rhf), qpn; 1853 u8 opcode; 1854 u32 psn; 1855 u8 lnh; 1856 1857 if (ps_done(&mdata, rhf, rcd)) 1858 break; 1859 1860 if (ps_skip(&mdata, rhf, rcd)) 1861 goto next; 1862 1863 if (etype > RHF_RCV_TYPE_IB) 1864 goto next; 1865 1866 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); 1867 hdr = packet.hdr; 1868 1869 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1870 1871 if (lnh == HFI1_LRH_BTH) 1872 packet.ohdr = &hdr->u.oth; 1873 else if (lnh == HFI1_LRH_GRH) 1874 packet.ohdr = &hdr->u.l.oth; 1875 else 1876 goto next; /* just in case */ 1877 1878 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); 1879 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; 1880 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); 1881 1882 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", 1883 mdata.ps_head, opcode, qpn, psn); 1884 next: 1885 update_ps_mdata(&mdata, rcd); 1886 } 1887 } 1888 1889 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { 1890 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, 1891 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, 1892 [RHF_RCV_TYPE_IB] = process_receive_ib, 1893 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1894 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, 1895 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1896 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1897 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1898 }; 1899 1900 const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = { 1901 [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid, 1902 [RHF_RCV_TYPE_EAGER] = process_receive_invalid, 1903 [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv, 1904 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1905 [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv, 1906 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1907 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1908 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1909 }; 1910