1 /*
2  * Copyright(c) 2015-2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/spinlock.h>
49 #include <linux/pci.h>
50 #include <linux/io.h>
51 #include <linux/delay.h>
52 #include <linux/netdevice.h>
53 #include <linux/vmalloc.h>
54 #include <linux/module.h>
55 #include <linux/prefetch.h>
56 #include <rdma/ib_verbs.h>
57 
58 #include "hfi.h"
59 #include "trace.h"
60 #include "qp.h"
61 #include "sdma.h"
62 #include "debugfs.h"
63 #include "vnic.h"
64 #include "fault.h"
65 
66 #undef pr_fmt
67 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
68 
69 /*
70  * The size has to be longer than this string, so we can append
71  * board/chip information to it in the initialization code.
72  */
73 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
74 
75 DEFINE_MUTEX(hfi1_mutex);	/* general driver use */
76 
77 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
78 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
80 		 HFI1_DEFAULT_MAX_MTU));
81 
82 unsigned int hfi1_cu = 1;
83 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
84 MODULE_PARM_DESC(cu, "Credit return units");
85 
86 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
87 static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
88 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
89 static const struct kernel_param_ops cap_ops = {
90 	.set = hfi1_caps_set,
91 	.get = hfi1_caps_get
92 };
93 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
94 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
95 
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
98 
99 /*
100  * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
101  */
102 #define MAX_PKT_RECV 64
103 /*
104  * MAX_PKT_THREAD_RCV is the max # of packets processed before
105  * the qp_wait_list queue is flushed.
106  */
107 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4)
108 #define EGR_HEAD_UPDATE_THRESHOLD 16
109 
110 struct hfi1_ib_stats hfi1_stats;
111 
112 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
113 {
114 	int ret = 0;
115 	unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
116 		cap_mask = *cap_mask_ptr, value, diff,
117 		write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
118 			      HFI1_CAP_WRITABLE_MASK);
119 
120 	ret = kstrtoul(val, 0, &value);
121 	if (ret) {
122 		pr_warn("Invalid module parameter value for 'cap_mask'\n");
123 		goto done;
124 	}
125 	/* Get the changed bits (except the locked bit) */
126 	diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
127 
128 	/* Remove any bits that are not allowed to change after driver load */
129 	if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
130 		pr_warn("Ignoring non-writable capability bits %#lx\n",
131 			diff & ~write_mask);
132 		diff &= write_mask;
133 	}
134 
135 	/* Mask off any reserved bits */
136 	diff &= ~HFI1_CAP_RESERVED_MASK;
137 	/* Clear any previously set and changing bits */
138 	cap_mask &= ~diff;
139 	/* Update the bits with the new capability */
140 	cap_mask |= (value & diff);
141 	/* Check for any kernel/user restrictions */
142 	diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
143 		((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
144 	cap_mask &= ~diff;
145 	/* Set the bitmask to the final set */
146 	*cap_mask_ptr = cap_mask;
147 done:
148 	return ret;
149 }
150 
151 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
152 {
153 	unsigned long cap_mask = *(unsigned long *)kp->arg;
154 
155 	cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
156 	cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
157 
158 	return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
159 }
160 
161 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
162 {
163 	struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
164 	struct hfi1_devdata *dd = container_of(ibdev,
165 					       struct hfi1_devdata, verbs_dev);
166 	return dd->pcidev;
167 }
168 
169 /*
170  * Return count of units with at least one port ACTIVE.
171  */
172 int hfi1_count_active_units(void)
173 {
174 	struct hfi1_devdata *dd;
175 	struct hfi1_pportdata *ppd;
176 	unsigned long index, flags;
177 	int pidx, nunits_active = 0;
178 
179 	xa_lock_irqsave(&hfi1_dev_table, flags);
180 	xa_for_each(&hfi1_dev_table, index, dd) {
181 		if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
182 			continue;
183 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
184 			ppd = dd->pport + pidx;
185 			if (ppd->lid && ppd->linkup) {
186 				nunits_active++;
187 				break;
188 			}
189 		}
190 	}
191 	xa_unlock_irqrestore(&hfi1_dev_table, flags);
192 	return nunits_active;
193 }
194 
195 /*
196  * Get address of eager buffer from it's index (allocated in chunks, not
197  * contiguous).
198  */
199 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
200 			       u8 *update)
201 {
202 	u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
203 
204 	*update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
205 	return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
206 			(offset * RCV_BUF_BLOCK_SIZE));
207 }
208 
209 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd,
210 				    __le32 *rhf_addr)
211 {
212 	u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
213 
214 	return (void *)(rhf_addr - rcd->rhf_offset + offset);
215 }
216 
217 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd,
218 						   __le32 *rhf_addr)
219 {
220 	return (struct ib_header *)hfi1_get_header(rcd, rhf_addr);
221 }
222 
223 static inline struct hfi1_16b_header
224 		*hfi1_get_16B_header(struct hfi1_ctxtdata *rcd,
225 				     __le32 *rhf_addr)
226 {
227 	return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr);
228 }
229 
230 /*
231  * Validate and encode the a given RcvArray Buffer size.
232  * The function will check whether the given size falls within
233  * allowed size ranges for the respective type and, optionally,
234  * return the proper encoding.
235  */
236 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
237 {
238 	if (unlikely(!PAGE_ALIGNED(size)))
239 		return 0;
240 	if (unlikely(size < MIN_EAGER_BUFFER))
241 		return 0;
242 	if (size >
243 	    (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
244 		return 0;
245 	if (encoded)
246 		*encoded = ilog2(size / PAGE_SIZE) + 1;
247 	return 1;
248 }
249 
250 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
251 		       struct hfi1_packet *packet)
252 {
253 	struct ib_header *rhdr = packet->hdr;
254 	u32 rte = rhf_rcv_type_err(packet->rhf);
255 	u32 mlid_base;
256 	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
257 	struct hfi1_devdata *dd = ppd->dd;
258 	struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
259 	struct rvt_dev_info *rdi = &verbs_dev->rdi;
260 
261 	if ((packet->rhf & RHF_DC_ERR) &&
262 	    hfi1_dbg_fault_suppress_err(verbs_dev))
263 		return;
264 
265 	if (packet->rhf & RHF_ICRC_ERR)
266 		return;
267 
268 	if (packet->etype == RHF_RCV_TYPE_BYPASS) {
269 		goto drop;
270 	} else {
271 		u8 lnh = ib_get_lnh(rhdr);
272 
273 		mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
274 		if (lnh == HFI1_LRH_BTH) {
275 			packet->ohdr = &rhdr->u.oth;
276 		} else if (lnh == HFI1_LRH_GRH) {
277 			packet->ohdr = &rhdr->u.l.oth;
278 			packet->grh = &rhdr->u.l.grh;
279 		} else {
280 			goto drop;
281 		}
282 	}
283 
284 	if (packet->rhf & RHF_TID_ERR) {
285 		/* For TIDERR and RC QPs preemptively schedule a NAK */
286 		u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
287 		u32 dlid = ib_get_dlid(rhdr);
288 		u32 qp_num;
289 
290 		/* Sanity check packet */
291 		if (tlen < 24)
292 			goto drop;
293 
294 		/* Check for GRH */
295 		if (packet->grh) {
296 			u32 vtf;
297 			struct ib_grh *grh = packet->grh;
298 
299 			if (grh->next_hdr != IB_GRH_NEXT_HDR)
300 				goto drop;
301 			vtf = be32_to_cpu(grh->version_tclass_flow);
302 			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
303 				goto drop;
304 		}
305 
306 		/* Get the destination QP number. */
307 		qp_num = ib_bth_get_qpn(packet->ohdr);
308 		if (dlid < mlid_base) {
309 			struct rvt_qp *qp;
310 			unsigned long flags;
311 
312 			rcu_read_lock();
313 			qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
314 			if (!qp) {
315 				rcu_read_unlock();
316 				goto drop;
317 			}
318 
319 			/*
320 			 * Handle only RC QPs - for other QP types drop error
321 			 * packet.
322 			 */
323 			spin_lock_irqsave(&qp->r_lock, flags);
324 
325 			/* Check for valid receive state. */
326 			if (!(ib_rvt_state_ops[qp->state] &
327 			      RVT_PROCESS_RECV_OK)) {
328 				ibp->rvp.n_pkt_drops++;
329 			}
330 
331 			switch (qp->ibqp.qp_type) {
332 			case IB_QPT_RC:
333 				hfi1_rc_hdrerr(rcd, packet, qp);
334 				break;
335 			default:
336 				/* For now don't handle any other QP types */
337 				break;
338 			}
339 
340 			spin_unlock_irqrestore(&qp->r_lock, flags);
341 			rcu_read_unlock();
342 		} /* Unicast QP */
343 	} /* Valid packet with TIDErr */
344 
345 	/* handle "RcvTypeErr" flags */
346 	switch (rte) {
347 	case RHF_RTE_ERROR_OP_CODE_ERR:
348 	{
349 		void *ebuf = NULL;
350 		u8 opcode;
351 
352 		if (rhf_use_egr_bfr(packet->rhf))
353 			ebuf = packet->ebuf;
354 
355 		if (!ebuf)
356 			goto drop; /* this should never happen */
357 
358 		opcode = ib_bth_get_opcode(packet->ohdr);
359 		if (opcode == IB_OPCODE_CNP) {
360 			/*
361 			 * Only in pre-B0 h/w is the CNP_OPCODE handled
362 			 * via this code path.
363 			 */
364 			struct rvt_qp *qp = NULL;
365 			u32 lqpn, rqpn;
366 			u16 rlid;
367 			u8 svc_type, sl, sc5;
368 
369 			sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
370 			sl = ibp->sc_to_sl[sc5];
371 
372 			lqpn = ib_bth_get_qpn(packet->ohdr);
373 			rcu_read_lock();
374 			qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
375 			if (!qp) {
376 				rcu_read_unlock();
377 				goto drop;
378 			}
379 
380 			switch (qp->ibqp.qp_type) {
381 			case IB_QPT_UD:
382 				rlid = 0;
383 				rqpn = 0;
384 				svc_type = IB_CC_SVCTYPE_UD;
385 				break;
386 			case IB_QPT_UC:
387 				rlid = ib_get_slid(rhdr);
388 				rqpn = qp->remote_qpn;
389 				svc_type = IB_CC_SVCTYPE_UC;
390 				break;
391 			default:
392 				rcu_read_unlock();
393 				goto drop;
394 			}
395 
396 			process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
397 			rcu_read_unlock();
398 		}
399 
400 		packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
401 		break;
402 	}
403 	default:
404 		break;
405 	}
406 
407 drop:
408 	return;
409 }
410 
411 static inline void init_packet(struct hfi1_ctxtdata *rcd,
412 			       struct hfi1_packet *packet)
413 {
414 	packet->rsize = get_hdrqentsize(rcd); /* words */
415 	packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */
416 	packet->rcd = rcd;
417 	packet->updegr = 0;
418 	packet->etail = -1;
419 	packet->rhf_addr = get_rhf_addr(rcd);
420 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
421 	packet->rhqoff = hfi1_rcd_head(rcd);
422 	packet->numpkt = 0;
423 }
424 
425 /* We support only two types - 9B and 16B for now */
426 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
427 	[HFI1_PKT_TYPE_9B] = &return_cnp,
428 	[HFI1_PKT_TYPE_16B] = &return_cnp_16B
429 };
430 
431 /**
432  * hfi1_process_ecn_slowpath - Process FECN or BECN bits
433  * @qp: The packet's destination QP
434  * @pkt: The packet itself.
435  * @prescan: Is the caller the RXQ prescan
436  *
437  * Process the packet's FECN or BECN bits. By now, the packet
438  * has already been evaluated whether processing of those bit should
439  * be done.
440  * The significance of the @prescan argument is that if the caller
441  * is the RXQ prescan, a CNP will be send out instead of waiting for the
442  * normal packet processing to send an ACK with BECN set (or a CNP).
443  */
444 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
445 			       bool prescan)
446 {
447 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
448 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
449 	struct ib_other_headers *ohdr = pkt->ohdr;
450 	struct ib_grh *grh = pkt->grh;
451 	u32 rqpn = 0;
452 	u16 pkey;
453 	u32 rlid, slid, dlid = 0;
454 	u8 hdr_type, sc, svc_type, opcode;
455 	bool is_mcast = false, ignore_fecn = false, do_cnp = false,
456 		fecn, becn;
457 
458 	/* can be called from prescan */
459 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
460 		pkey = hfi1_16B_get_pkey(pkt->hdr);
461 		sc = hfi1_16B_get_sc(pkt->hdr);
462 		dlid = hfi1_16B_get_dlid(pkt->hdr);
463 		slid = hfi1_16B_get_slid(pkt->hdr);
464 		is_mcast = hfi1_is_16B_mcast(dlid);
465 		opcode = ib_bth_get_opcode(ohdr);
466 		hdr_type = HFI1_PKT_TYPE_16B;
467 		fecn = hfi1_16B_get_fecn(pkt->hdr);
468 		becn = hfi1_16B_get_becn(pkt->hdr);
469 	} else {
470 		pkey = ib_bth_get_pkey(ohdr);
471 		sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
472 		dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
473 			ppd->lid;
474 		slid = ib_get_slid(pkt->hdr);
475 		is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
476 			   (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
477 		opcode = ib_bth_get_opcode(ohdr);
478 		hdr_type = HFI1_PKT_TYPE_9B;
479 		fecn = ib_bth_get_fecn(ohdr);
480 		becn = ib_bth_get_becn(ohdr);
481 	}
482 
483 	switch (qp->ibqp.qp_type) {
484 	case IB_QPT_UD:
485 		rlid = slid;
486 		rqpn = ib_get_sqpn(pkt->ohdr);
487 		svc_type = IB_CC_SVCTYPE_UD;
488 		break;
489 	case IB_QPT_SMI:
490 	case IB_QPT_GSI:
491 		rlid = slid;
492 		rqpn = ib_get_sqpn(pkt->ohdr);
493 		svc_type = IB_CC_SVCTYPE_UD;
494 		break;
495 	case IB_QPT_UC:
496 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
497 		rqpn = qp->remote_qpn;
498 		svc_type = IB_CC_SVCTYPE_UC;
499 		break;
500 	case IB_QPT_RC:
501 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
502 		rqpn = qp->remote_qpn;
503 		svc_type = IB_CC_SVCTYPE_RC;
504 		break;
505 	default:
506 		return false;
507 	}
508 
509 	ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
510 		(opcode == IB_OPCODE_RC_ACKNOWLEDGE);
511 	/*
512 	 * ACKNOWLEDGE packets do not get a CNP but this will be
513 	 * guarded by ignore_fecn above.
514 	 */
515 	do_cnp = prescan ||
516 		(opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
517 		 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
518 		opcode == TID_OP(READ_RESP) ||
519 		opcode == TID_OP(ACK);
520 
521 	/* Call appropriate CNP handler */
522 	if (!ignore_fecn && do_cnp && fecn)
523 		hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
524 					      dlid, rlid, sc, grh);
525 
526 	if (becn) {
527 		u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
528 		u8 sl = ibp->sc_to_sl[sc];
529 
530 		process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
531 	}
532 	return !ignore_fecn && fecn;
533 }
534 
535 struct ps_mdata {
536 	struct hfi1_ctxtdata *rcd;
537 	u32 rsize;
538 	u32 maxcnt;
539 	u32 ps_head;
540 	u32 ps_tail;
541 	u32 ps_seq;
542 };
543 
544 static inline void init_ps_mdata(struct ps_mdata *mdata,
545 				 struct hfi1_packet *packet)
546 {
547 	struct hfi1_ctxtdata *rcd = packet->rcd;
548 
549 	mdata->rcd = rcd;
550 	mdata->rsize = packet->rsize;
551 	mdata->maxcnt = packet->maxcnt;
552 	mdata->ps_head = packet->rhqoff;
553 
554 	if (get_dma_rtail_setting(rcd)) {
555 		mdata->ps_tail = get_rcvhdrtail(rcd);
556 		if (rcd->ctxt == HFI1_CTRL_CTXT)
557 			mdata->ps_seq = hfi1_seq_cnt(rcd);
558 		else
559 			mdata->ps_seq = 0; /* not used with DMA_RTAIL */
560 	} else {
561 		mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
562 		mdata->ps_seq = hfi1_seq_cnt(rcd);
563 	}
564 }
565 
566 static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
567 			  struct hfi1_ctxtdata *rcd)
568 {
569 	if (get_dma_rtail_setting(rcd))
570 		return mdata->ps_head == mdata->ps_tail;
571 	return mdata->ps_seq != rhf_rcv_seq(rhf);
572 }
573 
574 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
575 			  struct hfi1_ctxtdata *rcd)
576 {
577 	/*
578 	 * Control context can potentially receive an invalid rhf.
579 	 * Drop such packets.
580 	 */
581 	if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
582 		return mdata->ps_seq != rhf_rcv_seq(rhf);
583 
584 	return 0;
585 }
586 
587 static inline void update_ps_mdata(struct ps_mdata *mdata,
588 				   struct hfi1_ctxtdata *rcd)
589 {
590 	mdata->ps_head += mdata->rsize;
591 	if (mdata->ps_head >= mdata->maxcnt)
592 		mdata->ps_head = 0;
593 
594 	/* Control context must do seq counting */
595 	if (!get_dma_rtail_setting(rcd) ||
596 	    rcd->ctxt == HFI1_CTRL_CTXT)
597 		mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq);
598 }
599 
600 /*
601  * prescan_rxq - search through the receive queue looking for packets
602  * containing Excplicit Congestion Notifications (FECNs, or BECNs).
603  * When an ECN is found, process the Congestion Notification, and toggle
604  * it off.
605  * This is declared as a macro to allow quick checking of the port to avoid
606  * the overhead of a function call if not enabled.
607  */
608 #define prescan_rxq(rcd, packet) \
609 	do { \
610 		if (rcd->ppd->cc_prescan) \
611 			__prescan_rxq(packet); \
612 	} while (0)
613 static void __prescan_rxq(struct hfi1_packet *packet)
614 {
615 	struct hfi1_ctxtdata *rcd = packet->rcd;
616 	struct ps_mdata mdata;
617 
618 	init_ps_mdata(&mdata, packet);
619 
620 	while (1) {
621 		struct hfi1_ibport *ibp = rcd_to_iport(rcd);
622 		__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
623 					 packet->rcd->rhf_offset;
624 		struct rvt_qp *qp;
625 		struct ib_header *hdr;
626 		struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
627 		u64 rhf = rhf_to_cpu(rhf_addr);
628 		u32 etype = rhf_rcv_type(rhf), qpn, bth1;
629 		u8 lnh;
630 
631 		if (ps_done(&mdata, rhf, rcd))
632 			break;
633 
634 		if (ps_skip(&mdata, rhf, rcd))
635 			goto next;
636 
637 		if (etype != RHF_RCV_TYPE_IB)
638 			goto next;
639 
640 		packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr);
641 		hdr = packet->hdr;
642 		lnh = ib_get_lnh(hdr);
643 
644 		if (lnh == HFI1_LRH_BTH) {
645 			packet->ohdr = &hdr->u.oth;
646 			packet->grh = NULL;
647 		} else if (lnh == HFI1_LRH_GRH) {
648 			packet->ohdr = &hdr->u.l.oth;
649 			packet->grh = &hdr->u.l.grh;
650 		} else {
651 			goto next; /* just in case */
652 		}
653 
654 		if (!hfi1_may_ecn(packet))
655 			goto next;
656 
657 		bth1 = be32_to_cpu(packet->ohdr->bth[1]);
658 		qpn = bth1 & RVT_QPN_MASK;
659 		rcu_read_lock();
660 		qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
661 
662 		if (!qp) {
663 			rcu_read_unlock();
664 			goto next;
665 		}
666 
667 		hfi1_process_ecn_slowpath(qp, packet, true);
668 		rcu_read_unlock();
669 
670 		/* turn off BECN, FECN */
671 		bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK);
672 		packet->ohdr->bth[1] = cpu_to_be32(bth1);
673 next:
674 		update_ps_mdata(&mdata, rcd);
675 	}
676 }
677 
678 static void process_rcv_qp_work(struct hfi1_packet *packet)
679 {
680 	struct rvt_qp *qp, *nqp;
681 	struct hfi1_ctxtdata *rcd = packet->rcd;
682 
683 	/*
684 	 * Iterate over all QPs waiting to respond.
685 	 * The list won't change since the IRQ is only run on one CPU.
686 	 */
687 	list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
688 		list_del_init(&qp->rspwait);
689 		if (qp->r_flags & RVT_R_RSP_NAK) {
690 			qp->r_flags &= ~RVT_R_RSP_NAK;
691 			packet->qp = qp;
692 			hfi1_send_rc_ack(packet, 0);
693 		}
694 		if (qp->r_flags & RVT_R_RSP_SEND) {
695 			unsigned long flags;
696 
697 			qp->r_flags &= ~RVT_R_RSP_SEND;
698 			spin_lock_irqsave(&qp->s_lock, flags);
699 			if (ib_rvt_state_ops[qp->state] &
700 					RVT_PROCESS_OR_FLUSH_SEND)
701 				hfi1_schedule_send(qp);
702 			spin_unlock_irqrestore(&qp->s_lock, flags);
703 		}
704 		rvt_put_qp(qp);
705 	}
706 }
707 
708 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread)
709 {
710 	if (thread) {
711 		if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0)
712 			/* allow defered processing */
713 			process_rcv_qp_work(packet);
714 		cond_resched();
715 		return RCV_PKT_OK;
716 	} else {
717 		this_cpu_inc(*packet->rcd->dd->rcv_limit);
718 		return RCV_PKT_LIMIT;
719 	}
720 }
721 
722 static inline int check_max_packet(struct hfi1_packet *packet, int thread)
723 {
724 	int ret = RCV_PKT_OK;
725 
726 	if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0))
727 		ret = max_packet_exceeded(packet, thread);
728 	return ret;
729 }
730 
731 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
732 {
733 	int ret;
734 
735 	packet->rcd->dd->ctx0_seq_drop++;
736 	/* Set up for the next packet */
737 	packet->rhqoff += packet->rsize;
738 	if (packet->rhqoff >= packet->maxcnt)
739 		packet->rhqoff = 0;
740 
741 	packet->numpkt++;
742 	ret = check_max_packet(packet, thread);
743 
744 	packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
745 				     packet->rcd->rhf_offset;
746 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
747 
748 	return ret;
749 }
750 
751 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
752 {
753 	int ret;
754 
755 	packet->etype = rhf_rcv_type(packet->rhf);
756 
757 	/* total length */
758 	packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
759 	/* retrieve eager buffer details */
760 	packet->ebuf = NULL;
761 	if (rhf_use_egr_bfr(packet->rhf)) {
762 		packet->etail = rhf_egr_index(packet->rhf);
763 		packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
764 				 &packet->updegr);
765 		/*
766 		 * Prefetch the contents of the eager buffer.  It is
767 		 * OK to send a negative length to prefetch_range().
768 		 * The +2 is the size of the RHF.
769 		 */
770 		prefetch_range(packet->ebuf,
771 			       packet->tlen - ((get_hdrqentsize(packet->rcd) -
772 					       (rhf_hdrq_offset(packet->rhf)
773 						+ 2)) * 4));
774 	}
775 
776 	/*
777 	 * Call a type specific handler for the packet. We
778 	 * should be able to trust that etype won't be beyond
779 	 * the range of valid indexes. If so something is really
780 	 * wrong and we can probably just let things come
781 	 * crashing down. There is no need to eat another
782 	 * comparison in this performance critical code.
783 	 */
784 	packet->rcd->rhf_rcv_function_map[packet->etype](packet);
785 	packet->numpkt++;
786 
787 	/* Set up for the next packet */
788 	packet->rhqoff += packet->rsize;
789 	if (packet->rhqoff >= packet->maxcnt)
790 		packet->rhqoff = 0;
791 
792 	ret = check_max_packet(packet, thread);
793 
794 	packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
795 				      packet->rcd->rhf_offset;
796 	packet->rhf = rhf_to_cpu(packet->rhf_addr);
797 
798 	return ret;
799 }
800 
801 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
802 {
803 	/*
804 	 * Update head regs etc., every 16 packets, if not last pkt,
805 	 * to help prevent rcvhdrq overflows, when many packets
806 	 * are processed and queue is nearly full.
807 	 * Don't request an interrupt for intermediate updates.
808 	 */
809 	if (!last && !(packet->numpkt & 0xf)) {
810 		update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
811 			       packet->etail, 0, 0);
812 		packet->updegr = 0;
813 	}
814 	packet->grh = NULL;
815 }
816 
817 static inline void finish_packet(struct hfi1_packet *packet)
818 {
819 	/*
820 	 * Nothing we need to free for the packet.
821 	 *
822 	 * The only thing we need to do is a final update and call for an
823 	 * interrupt
824 	 */
825 	update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr,
826 		       packet->etail, rcv_intr_dynamic, packet->numpkt);
827 }
828 
829 /*
830  * Handle receive interrupts when using the no dma rtail option.
831  */
832 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
833 {
834 	int last = RCV_PKT_OK;
835 	struct hfi1_packet packet;
836 
837 	init_packet(rcd, &packet);
838 	if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
839 		last = RCV_PKT_DONE;
840 		goto bail;
841 	}
842 
843 	prescan_rxq(rcd, &packet);
844 
845 	while (last == RCV_PKT_OK) {
846 		last = process_rcv_packet(&packet, thread);
847 		if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
848 			last = RCV_PKT_DONE;
849 		process_rcv_update(last, &packet);
850 	}
851 	process_rcv_qp_work(&packet);
852 	hfi1_set_rcd_head(rcd, packet.rhqoff);
853 bail:
854 	finish_packet(&packet);
855 	return last;
856 }
857 
858 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
859 {
860 	u32 hdrqtail;
861 	int last = RCV_PKT_OK;
862 	struct hfi1_packet packet;
863 
864 	init_packet(rcd, &packet);
865 	hdrqtail = get_rcvhdrtail(rcd);
866 	if (packet.rhqoff == hdrqtail) {
867 		last = RCV_PKT_DONE;
868 		goto bail;
869 	}
870 	smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
871 
872 	prescan_rxq(rcd, &packet);
873 
874 	while (last == RCV_PKT_OK) {
875 		last = process_rcv_packet(&packet, thread);
876 		if (packet.rhqoff == hdrqtail)
877 			last = RCV_PKT_DONE;
878 		process_rcv_update(last, &packet);
879 	}
880 	process_rcv_qp_work(&packet);
881 	hfi1_set_rcd_head(rcd, packet.rhqoff);
882 bail:
883 	finish_packet(&packet);
884 	return last;
885 }
886 
887 static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
888 {
889 	u16 i;
890 
891 	/*
892 	 * For dynamically allocated kernel contexts (like vnic) switch
893 	 * interrupt handler only for that context. Otherwise, switch
894 	 * interrupt handler for all statically allocated kernel contexts.
895 	 */
896 	if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) {
897 		hfi1_rcd_get(rcd);
898 		hfi1_set_fast(rcd);
899 		hfi1_rcd_put(rcd);
900 		return;
901 	}
902 
903 	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
904 		rcd = hfi1_rcd_get_by_index(dd, i);
905 		if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic))
906 			hfi1_set_fast(rcd);
907 		hfi1_rcd_put(rcd);
908 	}
909 }
910 
911 void set_all_slowpath(struct hfi1_devdata *dd)
912 {
913 	struct hfi1_ctxtdata *rcd;
914 	u16 i;
915 
916 	/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
917 	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
918 		rcd = hfi1_rcd_get_by_index(dd, i);
919 		if (!rcd)
920 			continue;
921 		if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
922 			rcd->do_interrupt = rcd->slow_handler;
923 
924 		hfi1_rcd_put(rcd);
925 	}
926 }
927 
928 static bool __set_armed_to_active(struct hfi1_packet *packet)
929 {
930 	u8 etype = rhf_rcv_type(packet->rhf);
931 	u8 sc = SC15_PACKET;
932 
933 	if (etype == RHF_RCV_TYPE_IB) {
934 		struct ib_header *hdr = hfi1_get_msgheader(packet->rcd,
935 							   packet->rhf_addr);
936 		sc = hfi1_9B_get_sc5(hdr, packet->rhf);
937 	} else if (etype == RHF_RCV_TYPE_BYPASS) {
938 		struct hfi1_16b_header *hdr = hfi1_get_16B_header(
939 						packet->rcd,
940 						packet->rhf_addr);
941 		sc = hfi1_16B_get_sc(hdr);
942 	}
943 	if (sc != SC15_PACKET) {
944 		int hwstate = driver_lstate(packet->rcd->ppd);
945 		struct work_struct *lsaw =
946 				&packet->rcd->ppd->linkstate_active_work;
947 
948 		if (hwstate != IB_PORT_ACTIVE) {
949 			dd_dev_info(packet->rcd->dd,
950 				    "Unexpected link state %s\n",
951 				    opa_lstate_name(hwstate));
952 			return false;
953 		}
954 
955 		queue_work(packet->rcd->ppd->link_wq, lsaw);
956 		return true;
957 	}
958 	return false;
959 }
960 
961 /**
962  * armed to active - the fast path for armed to active
963  * @packet: the packet structure
964  *
965  * Return true if packet processing needs to bail.
966  */
967 static bool set_armed_to_active(struct hfi1_packet *packet)
968 {
969 	if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED))
970 		return false;
971 	return __set_armed_to_active(packet);
972 }
973 
974 /*
975  * handle_receive_interrupt - receive a packet
976  * @rcd: the context
977  *
978  * Called from interrupt handler for errors or receive interrupt.
979  * This is the slow path interrupt handler.
980  */
981 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
982 {
983 	struct hfi1_devdata *dd = rcd->dd;
984 	u32 hdrqtail;
985 	int needset, last = RCV_PKT_OK;
986 	struct hfi1_packet packet;
987 	int skip_pkt = 0;
988 
989 	/* Control context will always use the slow path interrupt handler */
990 	needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
991 
992 	init_packet(rcd, &packet);
993 
994 	if (!get_dma_rtail_setting(rcd)) {
995 		if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) {
996 			last = RCV_PKT_DONE;
997 			goto bail;
998 		}
999 		hdrqtail = 0;
1000 	} else {
1001 		hdrqtail = get_rcvhdrtail(rcd);
1002 		if (packet.rhqoff == hdrqtail) {
1003 			last = RCV_PKT_DONE;
1004 			goto bail;
1005 		}
1006 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
1007 
1008 		/*
1009 		 * Control context can potentially receive an invalid
1010 		 * rhf. Drop such packets.
1011 		 */
1012 		if (rcd->ctxt == HFI1_CTRL_CTXT)
1013 			if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
1014 				skip_pkt = 1;
1015 	}
1016 
1017 	prescan_rxq(rcd, &packet);
1018 
1019 	while (last == RCV_PKT_OK) {
1020 		if (hfi1_need_drop(dd)) {
1021 			/* On to the next packet */
1022 			packet.rhqoff += packet.rsize;
1023 			packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
1024 					  packet.rhqoff +
1025 					  rcd->rhf_offset;
1026 			packet.rhf = rhf_to_cpu(packet.rhf_addr);
1027 
1028 		} else if (skip_pkt) {
1029 			last = skip_rcv_packet(&packet, thread);
1030 			skip_pkt = 0;
1031 		} else {
1032 			if (set_armed_to_active(&packet))
1033 				goto bail;
1034 			last = process_rcv_packet(&packet, thread);
1035 		}
1036 
1037 		if (!get_dma_rtail_setting(rcd)) {
1038 			if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
1039 				last = RCV_PKT_DONE;
1040 		} else {
1041 			if (packet.rhqoff == hdrqtail)
1042 				last = RCV_PKT_DONE;
1043 			/*
1044 			 * Control context can potentially receive an invalid
1045 			 * rhf. Drop such packets.
1046 			 */
1047 			if (rcd->ctxt == HFI1_CTRL_CTXT) {
1048 				bool lseq;
1049 
1050 				lseq = hfi1_seq_incr(rcd,
1051 						     rhf_rcv_seq(packet.rhf));
1052 				if (!last && lseq)
1053 					skip_pkt = 1;
1054 			}
1055 		}
1056 
1057 		if (needset) {
1058 			needset = false;
1059 			set_all_fastpath(dd, rcd);
1060 		}
1061 		process_rcv_update(last, &packet);
1062 	}
1063 
1064 	process_rcv_qp_work(&packet);
1065 	hfi1_set_rcd_head(rcd, packet.rhqoff);
1066 
1067 bail:
1068 	/*
1069 	 * Always write head at end, and setup rcv interrupt, even
1070 	 * if no packets were processed.
1071 	 */
1072 	finish_packet(&packet);
1073 	return last;
1074 }
1075 
1076 /*
1077  * We may discover in the interrupt that the hardware link state has
1078  * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
1079  * and we need to update the driver's notion of the link state.  We cannot
1080  * run set_link_state from interrupt context, so we queue this function on
1081  * a workqueue.
1082  *
1083  * We delay the regular interrupt processing until after the state changes
1084  * so that the link will be in the correct state by the time any application
1085  * we wake up attempts to send a reply to any message it received.
1086  * (Subsequent receive interrupts may possibly force the wakeup before we
1087  * update the link state.)
1088  *
1089  * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
1090  * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
1091  * so we're safe from use-after-free of the rcd.
1092  */
1093 void receive_interrupt_work(struct work_struct *work)
1094 {
1095 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
1096 						  linkstate_active_work);
1097 	struct hfi1_devdata *dd = ppd->dd;
1098 	struct hfi1_ctxtdata *rcd;
1099 	u16 i;
1100 
1101 	/* Received non-SC15 packet implies neighbor_normal */
1102 	ppd->neighbor_normal = 1;
1103 	set_link_state(ppd, HLS_UP_ACTIVE);
1104 
1105 	/*
1106 	 * Interrupt all statically allocated kernel contexts that could
1107 	 * have had an interrupt during auto activation.
1108 	 */
1109 	for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
1110 		rcd = hfi1_rcd_get_by_index(dd, i);
1111 		if (rcd)
1112 			force_recv_intr(rcd);
1113 		hfi1_rcd_put(rcd);
1114 	}
1115 }
1116 
1117 /*
1118  * Convert a given MTU size to the on-wire MAD packet enumeration.
1119  * Return -1 if the size is invalid.
1120  */
1121 int mtu_to_enum(u32 mtu, int default_if_bad)
1122 {
1123 	switch (mtu) {
1124 	case     0: return OPA_MTU_0;
1125 	case   256: return OPA_MTU_256;
1126 	case   512: return OPA_MTU_512;
1127 	case  1024: return OPA_MTU_1024;
1128 	case  2048: return OPA_MTU_2048;
1129 	case  4096: return OPA_MTU_4096;
1130 	case  8192: return OPA_MTU_8192;
1131 	case 10240: return OPA_MTU_10240;
1132 	}
1133 	return default_if_bad;
1134 }
1135 
1136 u16 enum_to_mtu(int mtu)
1137 {
1138 	switch (mtu) {
1139 	case OPA_MTU_0:     return 0;
1140 	case OPA_MTU_256:   return 256;
1141 	case OPA_MTU_512:   return 512;
1142 	case OPA_MTU_1024:  return 1024;
1143 	case OPA_MTU_2048:  return 2048;
1144 	case OPA_MTU_4096:  return 4096;
1145 	case OPA_MTU_8192:  return 8192;
1146 	case OPA_MTU_10240: return 10240;
1147 	default: return 0xffff;
1148 	}
1149 }
1150 
1151 /*
1152  * set_mtu - set the MTU
1153  * @ppd: the per port data
1154  *
1155  * We can handle "any" incoming size, the issue here is whether we
1156  * need to restrict our outgoing size.  We do not deal with what happens
1157  * to programs that are already running when the size changes.
1158  */
1159 int set_mtu(struct hfi1_pportdata *ppd)
1160 {
1161 	struct hfi1_devdata *dd = ppd->dd;
1162 	int i, drain, ret = 0, is_up = 0;
1163 
1164 	ppd->ibmtu = 0;
1165 	for (i = 0; i < ppd->vls_supported; i++)
1166 		if (ppd->ibmtu < dd->vld[i].mtu)
1167 			ppd->ibmtu = dd->vld[i].mtu;
1168 	ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
1169 
1170 	mutex_lock(&ppd->hls_lock);
1171 	if (ppd->host_link_state == HLS_UP_INIT ||
1172 	    ppd->host_link_state == HLS_UP_ARMED ||
1173 	    ppd->host_link_state == HLS_UP_ACTIVE)
1174 		is_up = 1;
1175 
1176 	drain = !is_ax(dd) && is_up;
1177 
1178 	if (drain)
1179 		/*
1180 		 * MTU is specified per-VL. To ensure that no packet gets
1181 		 * stuck (due, e.g., to the MTU for the packet's VL being
1182 		 * reduced), empty the per-VL FIFOs before adjusting MTU.
1183 		 */
1184 		ret = stop_drain_data_vls(dd);
1185 
1186 	if (ret) {
1187 		dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
1188 			   __func__);
1189 		goto err;
1190 	}
1191 
1192 	hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
1193 
1194 	if (drain)
1195 		open_fill_data_vls(dd); /* reopen all VLs */
1196 
1197 err:
1198 	mutex_unlock(&ppd->hls_lock);
1199 
1200 	return ret;
1201 }
1202 
1203 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
1204 {
1205 	struct hfi1_devdata *dd = ppd->dd;
1206 
1207 	ppd->lid = lid;
1208 	ppd->lmc = lmc;
1209 	hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
1210 
1211 	dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
1212 
1213 	return 0;
1214 }
1215 
1216 void shutdown_led_override(struct hfi1_pportdata *ppd)
1217 {
1218 	struct hfi1_devdata *dd = ppd->dd;
1219 
1220 	/*
1221 	 * This pairs with the memory barrier in hfi1_start_led_override to
1222 	 * ensure that we read the correct state of LED beaconing represented
1223 	 * by led_override_timer_active
1224 	 */
1225 	smp_rmb();
1226 	if (atomic_read(&ppd->led_override_timer_active)) {
1227 		del_timer_sync(&ppd->led_override_timer);
1228 		atomic_set(&ppd->led_override_timer_active, 0);
1229 		/* Ensure the atomic_set is visible to all CPUs */
1230 		smp_wmb();
1231 	}
1232 
1233 	/* Hand control of the LED to the DC for normal operation */
1234 	write_csr(dd, DCC_CFG_LED_CNTRL, 0);
1235 }
1236 
1237 static void run_led_override(struct timer_list *t)
1238 {
1239 	struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
1240 	struct hfi1_devdata *dd = ppd->dd;
1241 	unsigned long timeout;
1242 	int phase_idx;
1243 
1244 	if (!(dd->flags & HFI1_INITTED))
1245 		return;
1246 
1247 	phase_idx = ppd->led_override_phase & 1;
1248 
1249 	setextled(dd, phase_idx);
1250 
1251 	timeout = ppd->led_override_vals[phase_idx];
1252 
1253 	/* Set up for next phase */
1254 	ppd->led_override_phase = !ppd->led_override_phase;
1255 
1256 	mod_timer(&ppd->led_override_timer, jiffies + timeout);
1257 }
1258 
1259 /*
1260  * To have the LED blink in a particular pattern, provide timeon and timeoff
1261  * in milliseconds.
1262  * To turn off custom blinking and return to normal operation, use
1263  * shutdown_led_override()
1264  */
1265 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1266 			     unsigned int timeoff)
1267 {
1268 	if (!(ppd->dd->flags & HFI1_INITTED))
1269 		return;
1270 
1271 	/* Convert to jiffies for direct use in timer */
1272 	ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
1273 	ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
1274 
1275 	/* Arbitrarily start from LED on phase */
1276 	ppd->led_override_phase = 1;
1277 
1278 	/*
1279 	 * If the timer has not already been started, do so. Use a "quick"
1280 	 * timeout so the handler will be called soon to look at our request.
1281 	 */
1282 	if (!timer_pending(&ppd->led_override_timer)) {
1283 		timer_setup(&ppd->led_override_timer, run_led_override, 0);
1284 		ppd->led_override_timer.expires = jiffies + 1;
1285 		add_timer(&ppd->led_override_timer);
1286 		atomic_set(&ppd->led_override_timer_active, 1);
1287 		/* Ensure the atomic_set is visible to all CPUs */
1288 		smp_wmb();
1289 	}
1290 }
1291 
1292 /**
1293  * hfi1_reset_device - reset the chip if possible
1294  * @unit: the device to reset
1295  *
1296  * Whether or not reset is successful, we attempt to re-initialize the chip
1297  * (that is, much like a driver unload/reload).  We clear the INITTED flag
1298  * so that the various entry points will fail until we reinitialize.  For
1299  * now, we only allow this if no user contexts are open that use chip resources
1300  */
1301 int hfi1_reset_device(int unit)
1302 {
1303 	int ret;
1304 	struct hfi1_devdata *dd = hfi1_lookup(unit);
1305 	struct hfi1_pportdata *ppd;
1306 	int pidx;
1307 
1308 	if (!dd) {
1309 		ret = -ENODEV;
1310 		goto bail;
1311 	}
1312 
1313 	dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1314 
1315 	if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
1316 		dd_dev_info(dd,
1317 			    "Invalid unit number %u or not initialized or not present\n",
1318 			    unit);
1319 		ret = -ENXIO;
1320 		goto bail;
1321 	}
1322 
1323 	/* If there are any user/vnic contexts, we cannot reset */
1324 	mutex_lock(&hfi1_mutex);
1325 	if (dd->rcd)
1326 		if (hfi1_stats.sps_ctxts) {
1327 			mutex_unlock(&hfi1_mutex);
1328 			ret = -EBUSY;
1329 			goto bail;
1330 		}
1331 	mutex_unlock(&hfi1_mutex);
1332 
1333 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1334 		ppd = dd->pport + pidx;
1335 
1336 		shutdown_led_override(ppd);
1337 	}
1338 	if (dd->flags & HFI1_HAS_SEND_DMA)
1339 		sdma_exit(dd);
1340 
1341 	hfi1_reset_cpu_counters(dd);
1342 
1343 	ret = hfi1_init(dd, 1);
1344 
1345 	if (ret)
1346 		dd_dev_err(dd,
1347 			   "Reinitialize unit %u after reset failed with %d\n",
1348 			   unit, ret);
1349 	else
1350 		dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1351 			    unit);
1352 
1353 bail:
1354 	return ret;
1355 }
1356 
1357 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
1358 {
1359 	packet->hdr = (struct hfi1_ib_message_header *)
1360 			hfi1_get_msgheader(packet->rcd,
1361 					   packet->rhf_addr);
1362 	packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
1363 }
1364 
1365 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
1366 {
1367 	struct hfi1_pportdata *ppd = packet->rcd->ppd;
1368 
1369 	/* slid and dlid cannot be 0 */
1370 	if ((!packet->slid) || (!packet->dlid))
1371 		return -EINVAL;
1372 
1373 	/* Compare port lid with incoming packet dlid */
1374 	if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
1375 	    (packet->dlid !=
1376 		opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
1377 		if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
1378 			return -EINVAL;
1379 	}
1380 
1381 	/* No multicast packets with SC15 */
1382 	if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
1383 		return -EINVAL;
1384 
1385 	/* Packets with permissive DLID always on SC15 */
1386 	if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
1387 					 16B)) &&
1388 	    (packet->sc != 0xF))
1389 		return -EINVAL;
1390 
1391 	return 0;
1392 }
1393 
1394 static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
1395 {
1396 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1397 	struct ib_header *hdr;
1398 	u8 lnh;
1399 
1400 	hfi1_setup_ib_header(packet);
1401 	hdr = packet->hdr;
1402 
1403 	lnh = ib_get_lnh(hdr);
1404 	if (lnh == HFI1_LRH_BTH) {
1405 		packet->ohdr = &hdr->u.oth;
1406 		packet->grh = NULL;
1407 	} else if (lnh == HFI1_LRH_GRH) {
1408 		u32 vtf;
1409 
1410 		packet->ohdr = &hdr->u.l.oth;
1411 		packet->grh = &hdr->u.l.grh;
1412 		if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1413 			goto drop;
1414 		vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1415 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1416 			goto drop;
1417 	} else {
1418 		goto drop;
1419 	}
1420 
1421 	/* Query commonly used fields from packet header */
1422 	packet->payload = packet->ebuf;
1423 	packet->opcode = ib_bth_get_opcode(packet->ohdr);
1424 	packet->slid = ib_get_slid(hdr);
1425 	packet->dlid = ib_get_dlid(hdr);
1426 	if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
1427 		     (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
1428 		packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1429 				be16_to_cpu(IB_MULTICAST_LID_BASE);
1430 	packet->sl = ib_get_sl(hdr);
1431 	packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
1432 	packet->pad = ib_bth_get_pad(packet->ohdr);
1433 	packet->extra_byte = 0;
1434 	packet->pkey = ib_bth_get_pkey(packet->ohdr);
1435 	packet->migrated = ib_bth_is_migration(packet->ohdr);
1436 
1437 	return 0;
1438 drop:
1439 	ibp->rvp.n_pkt_drops++;
1440 	return -EINVAL;
1441 }
1442 
1443 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
1444 {
1445 	/*
1446 	 * Bypass packets have a different header/payload split
1447 	 * compared to an IB packet.
1448 	 * Current split is set such that 16 bytes of the actual
1449 	 * header is in the header buffer and the remining is in
1450 	 * the eager buffer. We chose 16 since hfi1 driver only
1451 	 * supports 16B bypass packets and we will be able to
1452 	 * receive the entire LRH with such a split.
1453 	 */
1454 
1455 	struct hfi1_ctxtdata *rcd = packet->rcd;
1456 	struct hfi1_pportdata *ppd = rcd->ppd;
1457 	struct hfi1_ibport *ibp = &ppd->ibport_data;
1458 	u8 l4;
1459 
1460 	packet->hdr = (struct hfi1_16b_header *)
1461 			hfi1_get_16B_header(packet->rcd,
1462 					    packet->rhf_addr);
1463 	l4 = hfi1_16B_get_l4(packet->hdr);
1464 	if (l4 == OPA_16B_L4_IB_LOCAL) {
1465 		packet->ohdr = packet->ebuf;
1466 		packet->grh = NULL;
1467 		packet->opcode = ib_bth_get_opcode(packet->ohdr);
1468 		packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1469 		/* hdr_len_by_opcode already has an IB LRH factored in */
1470 		packet->hlen = hdr_len_by_opcode[packet->opcode] +
1471 			(LRH_16B_BYTES - LRH_9B_BYTES);
1472 		packet->migrated = opa_bth_is_migration(packet->ohdr);
1473 	} else if (l4 == OPA_16B_L4_IB_GLOBAL) {
1474 		u32 vtf;
1475 		u8 grh_len = sizeof(struct ib_grh);
1476 
1477 		packet->ohdr = packet->ebuf + grh_len;
1478 		packet->grh = packet->ebuf;
1479 		packet->opcode = ib_bth_get_opcode(packet->ohdr);
1480 		packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
1481 		/* hdr_len_by_opcode already has an IB LRH factored in */
1482 		packet->hlen = hdr_len_by_opcode[packet->opcode] +
1483 			(LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
1484 		packet->migrated = opa_bth_is_migration(packet->ohdr);
1485 
1486 		if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
1487 			goto drop;
1488 		vtf = be32_to_cpu(packet->grh->version_tclass_flow);
1489 		if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
1490 			goto drop;
1491 	} else if (l4 == OPA_16B_L4_FM) {
1492 		packet->mgmt = packet->ebuf;
1493 		packet->ohdr = NULL;
1494 		packet->grh = NULL;
1495 		packet->opcode = IB_OPCODE_UD_SEND_ONLY;
1496 		packet->pad = OPA_16B_L4_FM_PAD;
1497 		packet->hlen = OPA_16B_L4_FM_HLEN;
1498 		packet->migrated = false;
1499 	} else {
1500 		goto drop;
1501 	}
1502 
1503 	/* Query commonly used fields from packet header */
1504 	packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
1505 	packet->slid = hfi1_16B_get_slid(packet->hdr);
1506 	packet->dlid = hfi1_16B_get_dlid(packet->hdr);
1507 	if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
1508 		packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
1509 				opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
1510 					    16B);
1511 	packet->sc = hfi1_16B_get_sc(packet->hdr);
1512 	packet->sl = ibp->sc_to_sl[packet->sc];
1513 	packet->extra_byte = SIZE_OF_LT;
1514 	packet->pkey = hfi1_16B_get_pkey(packet->hdr);
1515 
1516 	if (hfi1_bypass_ingress_pkt_check(packet))
1517 		goto drop;
1518 
1519 	return 0;
1520 drop:
1521 	hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
1522 	ibp->rvp.n_pkt_drops++;
1523 	return -EINVAL;
1524 }
1525 
1526 static void show_eflags_errs(struct hfi1_packet *packet)
1527 {
1528 	struct hfi1_ctxtdata *rcd = packet->rcd;
1529 	u32 rte = rhf_rcv_type_err(packet->rhf);
1530 
1531 	dd_dev_err(rcd->dd,
1532 		   "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n",
1533 		   rcd->ctxt, packet->rhf,
1534 		   packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1535 		   packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1536 		   packet->rhf & RHF_DC_ERR ? "dc " : "",
1537 		   packet->rhf & RHF_TID_ERR ? "tid " : "",
1538 		   packet->rhf & RHF_LEN_ERR ? "len " : "",
1539 		   packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1540 		   packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1541 		   rte);
1542 }
1543 
1544 void handle_eflags(struct hfi1_packet *packet)
1545 {
1546 	struct hfi1_ctxtdata *rcd = packet->rcd;
1547 
1548 	rcv_hdrerr(rcd, rcd->ppd, packet);
1549 	if (rhf_err_flags(packet->rhf))
1550 		show_eflags_errs(packet);
1551 }
1552 
1553 /*
1554  * The following functions are called by the interrupt handler. They are type
1555  * specific handlers for each packet type.
1556  */
1557 static void process_receive_ib(struct hfi1_packet *packet)
1558 {
1559 	if (hfi1_setup_9B_packet(packet))
1560 		return;
1561 
1562 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1563 		return;
1564 
1565 	trace_hfi1_rcvhdr(packet);
1566 
1567 	if (unlikely(rhf_err_flags(packet->rhf))) {
1568 		handle_eflags(packet);
1569 		return;
1570 	}
1571 
1572 	hfi1_ib_rcv(packet);
1573 }
1574 
1575 static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
1576 {
1577 	/* Packet received in VNIC context via RSM */
1578 	if (packet->rcd->is_vnic)
1579 		return true;
1580 
1581 	if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
1582 	    (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
1583 		return true;
1584 
1585 	return false;
1586 }
1587 
1588 static void process_receive_bypass(struct hfi1_packet *packet)
1589 {
1590 	struct hfi1_devdata *dd = packet->rcd->dd;
1591 
1592 	if (hfi1_is_vnic_packet(packet)) {
1593 		hfi1_vnic_bypass_rcv(packet);
1594 		return;
1595 	}
1596 
1597 	if (hfi1_setup_bypass_packet(packet))
1598 		return;
1599 
1600 	trace_hfi1_rcvhdr(packet);
1601 
1602 	if (unlikely(rhf_err_flags(packet->rhf))) {
1603 		handle_eflags(packet);
1604 		return;
1605 	}
1606 
1607 	if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
1608 		hfi1_16B_rcv(packet);
1609 	} else {
1610 		dd_dev_err(dd,
1611 			   "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
1612 		incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
1613 		if (!(dd->err_info_rcvport.status_and_code &
1614 		      OPA_EI_STATUS_SMASK)) {
1615 			u64 *flits = packet->ebuf;
1616 
1617 			if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1618 				dd->err_info_rcvport.packet_flit1 = flits[0];
1619 				dd->err_info_rcvport.packet_flit2 =
1620 					packet->tlen > sizeof(flits[0]) ?
1621 					flits[1] : 0;
1622 			}
1623 			dd->err_info_rcvport.status_and_code |=
1624 				(OPA_EI_STATUS_SMASK | BAD_L2_ERR);
1625 		}
1626 	}
1627 }
1628 
1629 static void process_receive_error(struct hfi1_packet *packet)
1630 {
1631 	/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
1632 	if (unlikely(
1633 		 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
1634 		 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
1635 		  packet->rhf & RHF_DC_ERR)))
1636 		return;
1637 
1638 	hfi1_setup_ib_header(packet);
1639 	handle_eflags(packet);
1640 
1641 	if (unlikely(rhf_err_flags(packet->rhf)))
1642 		dd_dev_err(packet->rcd->dd,
1643 			   "Unhandled error packet received. Dropping.\n");
1644 }
1645 
1646 static void kdeth_process_expected(struct hfi1_packet *packet)
1647 {
1648 	hfi1_setup_9B_packet(packet);
1649 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1650 		return;
1651 
1652 	if (unlikely(rhf_err_flags(packet->rhf))) {
1653 		struct hfi1_ctxtdata *rcd = packet->rcd;
1654 
1655 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1656 			return;
1657 	}
1658 
1659 	hfi1_kdeth_expected_rcv(packet);
1660 }
1661 
1662 static void kdeth_process_eager(struct hfi1_packet *packet)
1663 {
1664 	hfi1_setup_9B_packet(packet);
1665 	if (unlikely(hfi1_dbg_should_fault_rx(packet)))
1666 		return;
1667 
1668 	trace_hfi1_rcvhdr(packet);
1669 	if (unlikely(rhf_err_flags(packet->rhf))) {
1670 		struct hfi1_ctxtdata *rcd = packet->rcd;
1671 
1672 		show_eflags_errs(packet);
1673 		if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet))
1674 			return;
1675 	}
1676 
1677 	hfi1_kdeth_eager_rcv(packet);
1678 }
1679 
1680 static void process_receive_invalid(struct hfi1_packet *packet)
1681 {
1682 	dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1683 		   rhf_rcv_type(packet->rhf));
1684 }
1685 
1686 #define HFI1_RCVHDR_DUMP_MAX	5
1687 
1688 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
1689 {
1690 	struct hfi1_packet packet;
1691 	struct ps_mdata mdata;
1692 	int i;
1693 
1694 	seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu  sw head %u\n",
1695 		   rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd),
1696 		   get_dma_rtail_setting(rcd) ?
1697 		   "dma_rtail" : "nodma_rtail",
1698 		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL),
1699 		   read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS),
1700 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
1701 		   RCV_HDR_HEAD_HEAD_MASK,
1702 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL),
1703 		   rcd->head);
1704 
1705 	init_packet(rcd, &packet);
1706 	init_ps_mdata(&mdata, &packet);
1707 
1708 	for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) {
1709 		__le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
1710 					 rcd->rhf_offset;
1711 		struct ib_header *hdr;
1712 		u64 rhf = rhf_to_cpu(rhf_addr);
1713 		u32 etype = rhf_rcv_type(rhf), qpn;
1714 		u8 opcode;
1715 		u32 psn;
1716 		u8 lnh;
1717 
1718 		if (ps_done(&mdata, rhf, rcd))
1719 			break;
1720 
1721 		if (ps_skip(&mdata, rhf, rcd))
1722 			goto next;
1723 
1724 		if (etype > RHF_RCV_TYPE_IB)
1725 			goto next;
1726 
1727 		packet.hdr = hfi1_get_msgheader(rcd, rhf_addr);
1728 		hdr = packet.hdr;
1729 
1730 		lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1731 
1732 		if (lnh == HFI1_LRH_BTH)
1733 			packet.ohdr = &hdr->u.oth;
1734 		else if (lnh == HFI1_LRH_GRH)
1735 			packet.ohdr = &hdr->u.l.oth;
1736 		else
1737 			goto next; /* just in case */
1738 
1739 		opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
1740 		qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
1741 		psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
1742 
1743 		seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
1744 			   mdata.ps_head, opcode, qpn, psn);
1745 next:
1746 		update_ps_mdata(&mdata, rcd);
1747 	}
1748 }
1749 
1750 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
1751 	[RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected,
1752 	[RHF_RCV_TYPE_EAGER] = kdeth_process_eager,
1753 	[RHF_RCV_TYPE_IB] = process_receive_ib,
1754 	[RHF_RCV_TYPE_ERROR] = process_receive_error,
1755 	[RHF_RCV_TYPE_BYPASS] = process_receive_bypass,
1756 	[RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
1757 	[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
1758 	[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
1759 };
1760