xref: /openbmc/linux/drivers/infiniband/hw/hfi1/hfi.h (revision d3964221)
1 #ifndef _HFI1_KERNEL_H
2 #define _HFI1_KERNEL_H
3 /*
4  * Copyright(c) 2015-2017 Intel Corporation.
5  *
6  * This file is provided under a dual BSD/GPLv2 license.  When using or
7  * redistributing this file, you may do so under either license.
8  *
9  * GPL LICENSE SUMMARY
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * BSD LICENSE
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions
24  * are met:
25  *
26  *  - Redistributions of source code must retain the above copyright
27  *    notice, this list of conditions and the following disclaimer.
28  *  - Redistributions in binary form must reproduce the above copyright
29  *    notice, this list of conditions and the following disclaimer in
30  *    the documentation and/or other materials provided with the
31  *    distribution.
32  *  - Neither the name of Intel Corporation nor the names of its
33  *    contributors may be used to endorse or promote products derived
34  *    from this software without specific prior written permission.
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47  *
48  */
49 
50 #include <linux/interrupt.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/mutex.h>
54 #include <linux/list.h>
55 #include <linux/scatterlist.h>
56 #include <linux/slab.h>
57 #include <linux/idr.h>
58 #include <linux/io.h>
59 #include <linux/fs.h>
60 #include <linux/completion.h>
61 #include <linux/kref.h>
62 #include <linux/sched.h>
63 #include <linux/cdev.h>
64 #include <linux/delay.h>
65 #include <linux/kthread.h>
66 #include <linux/i2c.h>
67 #include <linux/i2c-algo-bit.h>
68 #include <rdma/ib_hdrs.h>
69 #include <rdma/opa_addr.h>
70 #include <linux/rhashtable.h>
71 #include <linux/netdevice.h>
72 #include <rdma/rdma_vt.h>
73 #include <rdma/opa_addr.h>
74 
75 #include "chip_registers.h"
76 #include "common.h"
77 #include "verbs.h"
78 #include "pio.h"
79 #include "chip.h"
80 #include "mad.h"
81 #include "qsfp.h"
82 #include "platform.h"
83 #include "affinity.h"
84 
85 /* bumped 1 from s/w major version of TrueScale */
86 #define HFI1_CHIP_VERS_MAJ 3U
87 
88 /* don't care about this except printing */
89 #define HFI1_CHIP_VERS_MIN 0U
90 
91 /* The Organization Unique Identifier (Mfg code), and its position in GUID */
92 #define HFI1_OUI 0x001175
93 #define HFI1_OUI_LSB 40
94 
95 #define DROP_PACKET_OFF		0
96 #define DROP_PACKET_ON		1
97 
98 extern unsigned long hfi1_cap_mask;
99 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
100 #define HFI1_CAP_UGET_MASK(mask, cap) \
101 	(((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
102 #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
103 #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
104 #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
105 #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
106 #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
107 			HFI1_CAP_MISC_MASK)
108 /* Offline Disabled Reason is 4-bits */
109 #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
110 
111 /*
112  * Control context is always 0 and handles the error packets.
113  * It also handles the VL15 and multicast packets.
114  */
115 #define HFI1_CTRL_CTXT    0
116 
117 /*
118  * Driver context will store software counters for each of the events
119  * associated with these status registers
120  */
121 #define NUM_CCE_ERR_STATUS_COUNTERS 41
122 #define NUM_RCV_ERR_STATUS_COUNTERS 64
123 #define NUM_MISC_ERR_STATUS_COUNTERS 13
124 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
125 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
126 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
127 #define NUM_SEND_ERR_STATUS_COUNTERS 3
128 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
129 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
130 
131 /*
132  * per driver stats, either not device nor port-specific, or
133  * summed over all of the devices and ports.
134  * They are described by name via ipathfs filesystem, so layout
135  * and number of elements can change without breaking compatibility.
136  * If members are added or deleted hfi1_statnames[] in debugfs.c must
137  * change to match.
138  */
139 struct hfi1_ib_stats {
140 	__u64 sps_ints; /* number of interrupts handled */
141 	__u64 sps_errints; /* number of error interrupts */
142 	__u64 sps_txerrs; /* tx-related packet errors */
143 	__u64 sps_rcverrs; /* non-crc rcv packet errors */
144 	__u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
145 	__u64 sps_nopiobufs; /* no pio bufs avail from kernel */
146 	__u64 sps_ctxts; /* number of contexts currently open */
147 	__u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
148 	__u64 sps_buffull;
149 	__u64 sps_hdrfull;
150 };
151 
152 extern struct hfi1_ib_stats hfi1_stats;
153 extern const struct pci_error_handlers hfi1_pci_err_handler;
154 
155 /*
156  * First-cut criterion for "device is active" is
157  * two thousand dwords combined Tx, Rx traffic per
158  * 5-second interval. SMA packets are 64 dwords,
159  * and occur "a few per second", presumably each way.
160  */
161 #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
162 
163 /*
164  * Below contains all data related to a single context (formerly called port).
165  */
166 
167 #ifdef CONFIG_DEBUG_FS
168 struct hfi1_opcode_stats_perctx;
169 #endif
170 
171 struct ctxt_eager_bufs {
172 	ssize_t size;            /* total size of eager buffers */
173 	u32 count;               /* size of buffers array */
174 	u32 numbufs;             /* number of buffers allocated */
175 	u32 alloced;             /* number of rcvarray entries used */
176 	u32 rcvtid_size;         /* size of each eager rcv tid */
177 	u32 threshold;           /* head update threshold */
178 	struct eager_buffer {
179 		void *addr;
180 		dma_addr_t dma;
181 		ssize_t len;
182 	} *buffers;
183 	struct {
184 		void *addr;
185 		dma_addr_t dma;
186 	} *rcvtids;
187 };
188 
189 struct exp_tid_set {
190 	struct list_head list;
191 	u32 count;
192 };
193 
194 struct hfi1_ctxtdata {
195 	/* shadow the ctxt's RcvCtrl register */
196 	u64 rcvctrl;
197 	/* rcvhdrq base, needs mmap before useful */
198 	void *rcvhdrq;
199 	/* kernel virtual address where hdrqtail is updated */
200 	volatile __le64 *rcvhdrtail_kvaddr;
201 	/* when waiting for rcv or pioavail */
202 	wait_queue_head_t wait;
203 	/* rcvhdrq size (for freeing) */
204 	size_t rcvhdrq_size;
205 	/* number of rcvhdrq entries */
206 	u16 rcvhdrq_cnt;
207 	/* size of each of the rcvhdrq entries */
208 	u16 rcvhdrqentsize;
209 	/* mmap of hdrq, must fit in 44 bits */
210 	dma_addr_t rcvhdrq_dma;
211 	dma_addr_t rcvhdrqtailaddr_dma;
212 	struct ctxt_eager_bufs egrbufs;
213 	/* this receive context's assigned PIO ACK send context */
214 	struct send_context *sc;
215 
216 	/* dynamic receive available interrupt timeout */
217 	u32 rcvavail_timeout;
218 	/* Reference count the base context usage */
219 	struct kref kref;
220 
221 	/* Device context index */
222 	u16 ctxt;
223 	/*
224 	 * non-zero if ctxt can be shared, and defines the maximum number of
225 	 * sub-contexts for this device context.
226 	 */
227 	u16 subctxt_cnt;
228 	/* non-zero if ctxt is being shared. */
229 	u16 subctxt_id;
230 	u8 uuid[16];
231 	/* job key */
232 	u16 jkey;
233 	/* number of RcvArray groups for this context. */
234 	u32 rcv_array_groups;
235 	/* index of first eager TID entry. */
236 	u32 eager_base;
237 	/* number of expected TID entries */
238 	u32 expected_count;
239 	/* index of first expected TID entry. */
240 	u32 expected_base;
241 
242 	struct exp_tid_set tid_group_list;
243 	struct exp_tid_set tid_used_list;
244 	struct exp_tid_set tid_full_list;
245 
246 	/* lock protecting all Expected TID data */
247 	struct mutex exp_lock;
248 	/* per-context configuration flags */
249 	unsigned long flags;
250 	/* per-context event flags for fileops/intr communication */
251 	unsigned long event_flags;
252 	/* total number of polled urgent packets */
253 	u32 urgent;
254 	/* saved total number of polled urgent packets for poll edge trigger */
255 	u32 urgent_poll;
256 	/* same size as task_struct .comm[], command that opened context */
257 	char comm[TASK_COMM_LEN];
258 	/* so file ops can get at unit */
259 	struct hfi1_devdata *dd;
260 	/* so functions that need physical port can get it easily */
261 	struct hfi1_pportdata *ppd;
262 	/* associated msix interrupt */
263 	u32 msix_intr;
264 	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
265 	void *subctxt_uregbase;
266 	/* An array of pages for the eager receive buffers * N */
267 	void *subctxt_rcvegrbuf;
268 	/* An array of pages for the eager header queue entries * N */
269 	void *subctxt_rcvhdr_base;
270 	/* Bitmask of in use context(s) */
271 	DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
272 	/* The version of the library which opened this ctxt */
273 	u32 userversion;
274 	/* Type of packets or conditions we want to poll for */
275 	u16 poll_type;
276 	/* receive packet sequence counter */
277 	u8 seq_cnt;
278 	/* ctxt rcvhdrq head offset */
279 	u32 head;
280 	/* QPs waiting for context processing */
281 	struct list_head qp_wait_list;
282 	/* interrupt handling */
283 	u64 imask;	/* clear interrupt mask */
284 	int ireg;	/* clear interrupt register */
285 	unsigned numa_id; /* numa node of this context */
286 	/* verbs stats per CTX */
287 	struct hfi1_opcode_stats_perctx *opstats;
288 
289 	/* Is ASPM interrupt supported for this context */
290 	bool aspm_intr_supported;
291 	/* ASPM state (enabled/disabled) for this context */
292 	bool aspm_enabled;
293 	/* Timer for re-enabling ASPM if interrupt activity quietens down */
294 	struct timer_list aspm_timer;
295 	/* Lock to serialize between intr, timer intr and user threads */
296 	spinlock_t aspm_lock;
297 	/* Is ASPM processing enabled for this context (in intr context) */
298 	bool aspm_intr_enable;
299 	/* Last interrupt timestamp */
300 	ktime_t aspm_ts_last_intr;
301 	/* Last timestamp at which we scheduled a timer for this context */
302 	ktime_t aspm_ts_timer_sched;
303 
304 	/*
305 	 * The interrupt handler for a particular receive context can vary
306 	 * throughout it's lifetime. This is not a lock protected data member so
307 	 * it must be updated atomically and the prev and new value must always
308 	 * be valid. Worst case is we process an extra interrupt and up to 64
309 	 * packets with the wrong interrupt handler.
310 	 */
311 	int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
312 
313 	/* Indicates that this is vnic context */
314 	bool is_vnic;
315 
316 	/* vnic queue index this context is mapped to */
317 	u8 vnic_q_idx;
318 };
319 
320 /*
321  * Represents a single packet at a high level. Put commonly computed things in
322  * here so we do not have to keep doing them over and over. The rule of thumb is
323  * if something is used one time to derive some value, store that something in
324  * here. If it is used multiple times, then store the result of that derivation
325  * in here.
326  */
327 struct hfi1_packet {
328 	void *ebuf;
329 	void *hdr;
330 	void *payload;
331 	struct hfi1_ctxtdata *rcd;
332 	__le32 *rhf_addr;
333 	struct rvt_qp *qp;
334 	struct ib_other_headers *ohdr;
335 	struct ib_grh *grh;
336 	u64 rhf;
337 	u32 maxcnt;
338 	u32 rhqoff;
339 	u32 dlid;
340 	u32 slid;
341 	u16 tlen;
342 	s16 etail;
343 	u8 hlen;
344 	u8 numpkt;
345 	u8 rsize;
346 	u8 updegr;
347 	u8 etype;
348 	u8 extra_byte;
349 	u8 pad;
350 	u8 sc;
351 	u8 sl;
352 	u8 opcode;
353 	bool becn;
354 	bool fecn;
355 };
356 
357 /* Packet types */
358 #define HFI1_PKT_TYPE_9B  0
359 #define HFI1_PKT_TYPE_16B 1
360 
361 /*
362  * OPA 16B Header
363  */
364 #define OPA_16B_L4_MASK		0xFFull
365 #define OPA_16B_SC_MASK		0x1F00000ull
366 #define OPA_16B_SC_SHIFT	20
367 #define OPA_16B_LID_MASK	0xFFFFFull
368 #define OPA_16B_DLID_MASK	0xF000ull
369 #define OPA_16B_DLID_SHIFT	20
370 #define OPA_16B_DLID_HIGH_SHIFT	12
371 #define OPA_16B_SLID_MASK	0xF00ull
372 #define OPA_16B_SLID_SHIFT	20
373 #define OPA_16B_SLID_HIGH_SHIFT	8
374 #define OPA_16B_BECN_MASK       0x80000000ull
375 #define OPA_16B_BECN_SHIFT      31
376 #define OPA_16B_FECN_MASK       0x10000000ull
377 #define OPA_16B_FECN_SHIFT      28
378 #define OPA_16B_L2_MASK		0x60000000ull
379 #define OPA_16B_L2_SHIFT	29
380 #define OPA_16B_PKEY_MASK	0xFFFF0000ull
381 #define OPA_16B_PKEY_SHIFT	16
382 #define OPA_16B_LEN_MASK	0x7FF00000ull
383 #define OPA_16B_LEN_SHIFT	20
384 #define OPA_16B_RC_MASK		0xE000000ull
385 #define OPA_16B_RC_SHIFT	25
386 #define OPA_16B_AGE_MASK	0xFF0000ull
387 #define OPA_16B_AGE_SHIFT	16
388 #define OPA_16B_ENTROPY_MASK	0xFFFFull
389 
390 /*
391  * OPA 16B L2/L4 Encodings
392  */
393 #define OPA_16B_L2_TYPE		0x02
394 #define OPA_16B_L4_IB_LOCAL	0x09
395 #define OPA_16B_L4_IB_GLOBAL	0x0A
396 #define OPA_16B_L4_ETHR		OPA_VNIC_L4_ETHR
397 
398 static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
399 {
400 	return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
401 }
402 
403 static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
404 {
405 	return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT);
406 }
407 
408 static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
409 {
410 	return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) |
411 		     (((hdr->lrh[2] & OPA_16B_DLID_MASK) >>
412 		     OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT));
413 }
414 
415 static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
416 {
417 	return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) |
418 		     (((hdr->lrh[2] & OPA_16B_SLID_MASK) >>
419 		     OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT));
420 }
421 
422 static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
423 {
424 	return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT);
425 }
426 
427 static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
428 {
429 	return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT);
430 }
431 
432 static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
433 {
434 	return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT);
435 }
436 
437 static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
438 {
439 	return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT);
440 }
441 
442 static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
443 {
444 	return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT);
445 }
446 
447 static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
448 {
449 	return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT);
450 }
451 
452 static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
453 {
454 	return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT);
455 }
456 
457 static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
458 {
459 	return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK);
460 }
461 
462 #define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
463 
464 /*
465  * BTH
466  */
467 #define OPA_16B_BTH_PAD_MASK	7
468 static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
469 {
470 	return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
471 		   OPA_16B_BTH_PAD_MASK);
472 }
473 
474 struct rvt_sge_state;
475 
476 /*
477  * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
478  * Mostly for MADs that set or query link parameters, also ipath
479  * config interfaces
480  */
481 #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
482 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
483 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
484 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
485 #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
486 #define HFI1_IB_CFG_SPD 5 /* current Link spd */
487 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
488 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
489 #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
490 #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
491 #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
492 #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
493 #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
494 #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
495 #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
496 #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
497 #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
498 #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
499 #define HFI1_IB_CFG_VL_HIGH_LIMIT 19
500 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
501 #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
502 
503 /*
504  * HFI or Host Link States
505  *
506  * These describe the states the driver thinks the logical and physical
507  * states are in.  Used as an argument to set_link_state().  Implemented
508  * as bits for easy multi-state checking.  The actual state can only be
509  * one.
510  */
511 #define __HLS_UP_INIT_BP	0
512 #define __HLS_UP_ARMED_BP	1
513 #define __HLS_UP_ACTIVE_BP	2
514 #define __HLS_DN_DOWNDEF_BP	3	/* link down default */
515 #define __HLS_DN_POLL_BP	4
516 #define __HLS_DN_DISABLE_BP	5
517 #define __HLS_DN_OFFLINE_BP	6
518 #define __HLS_VERIFY_CAP_BP	7
519 #define __HLS_GOING_UP_BP	8
520 #define __HLS_GOING_OFFLINE_BP  9
521 #define __HLS_LINK_COOLDOWN_BP 10
522 
523 #define HLS_UP_INIT	  BIT(__HLS_UP_INIT_BP)
524 #define HLS_UP_ARMED	  BIT(__HLS_UP_ARMED_BP)
525 #define HLS_UP_ACTIVE	  BIT(__HLS_UP_ACTIVE_BP)
526 #define HLS_DN_DOWNDEF	  BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
527 #define HLS_DN_POLL	  BIT(__HLS_DN_POLL_BP)
528 #define HLS_DN_DISABLE	  BIT(__HLS_DN_DISABLE_BP)
529 #define HLS_DN_OFFLINE	  BIT(__HLS_DN_OFFLINE_BP)
530 #define HLS_VERIFY_CAP	  BIT(__HLS_VERIFY_CAP_BP)
531 #define HLS_GOING_UP	  BIT(__HLS_GOING_UP_BP)
532 #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
533 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
534 
535 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
536 #define HLS_DOWN ~(HLS_UP)
537 
538 /* use this MTU size if none other is given */
539 #define HFI1_DEFAULT_ACTIVE_MTU 10240
540 /* use this MTU size as the default maximum */
541 #define HFI1_DEFAULT_MAX_MTU 10240
542 /* default partition key */
543 #define DEFAULT_PKEY 0xffff
544 
545 /*
546  * Possible fabric manager config parameters for fm_{get,set}_table()
547  */
548 #define FM_TBL_VL_HIGH_ARB		1 /* Get/set VL high prio weights */
549 #define FM_TBL_VL_LOW_ARB		2 /* Get/set VL low prio weights */
550 #define FM_TBL_BUFFER_CONTROL		3 /* Get/set Buffer Control */
551 #define FM_TBL_SC2VLNT			4 /* Get/set SC->VLnt */
552 #define FM_TBL_VL_PREEMPT_ELEMS		5 /* Get (no set) VL preempt elems */
553 #define FM_TBL_VL_PREEMPT_MATRIX	6 /* Get (no set) VL preempt matrix */
554 
555 /*
556  * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
557  * these are bits so they can be combined, e.g.
558  * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
559  */
560 #define HFI1_RCVCTRL_TAILUPD_ENB 0x01
561 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02
562 #define HFI1_RCVCTRL_CTXT_ENB 0x04
563 #define HFI1_RCVCTRL_CTXT_DIS 0x08
564 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
565 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
566 #define HFI1_RCVCTRL_PKEY_ENB 0x40  /* Note, default is enabled */
567 #define HFI1_RCVCTRL_PKEY_DIS 0x80
568 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
569 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
570 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
571 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
572 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
573 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
574 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
575 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
576 
577 /* partition enforcement flags */
578 #define HFI1_PART_ENFORCE_IN	0x1
579 #define HFI1_PART_ENFORCE_OUT	0x2
580 
581 /* how often we check for synthetic counter wrap around */
582 #define SYNTH_CNT_TIME 3
583 
584 /* Counter flags */
585 #define CNTR_NORMAL		0x0 /* Normal counters, just read register */
586 #define CNTR_SYNTH		0x1 /* Synthetic counters, saturate at all 1s */
587 #define CNTR_DISABLED		0x2 /* Disable this counter */
588 #define CNTR_32BIT		0x4 /* Simulate 64 bits for this counter */
589 #define CNTR_VL			0x8 /* Per VL counter */
590 #define CNTR_SDMA              0x10
591 #define CNTR_INVALID_VL		-1  /* Specifies invalid VL */
592 #define CNTR_MODE_W		0x0
593 #define CNTR_MODE_R		0x1
594 
595 /* VLs Supported/Operational */
596 #define HFI1_MIN_VLS_SUPPORTED 1
597 #define HFI1_MAX_VLS_SUPPORTED 8
598 
599 #define HFI1_GUIDS_PER_PORT  5
600 #define HFI1_PORT_GUID_INDEX 0
601 
602 static inline void incr_cntr64(u64 *cntr)
603 {
604 	if (*cntr < (u64)-1LL)
605 		(*cntr)++;
606 }
607 
608 static inline void incr_cntr32(u32 *cntr)
609 {
610 	if (*cntr < (u32)-1LL)
611 		(*cntr)++;
612 }
613 
614 #define MAX_NAME_SIZE 64
615 struct hfi1_msix_entry {
616 	enum irq_type type;
617 	int irq;
618 	void *arg;
619 	char name[MAX_NAME_SIZE];
620 	cpumask_t mask;
621 	struct irq_affinity_notify notify;
622 };
623 
624 /* per-SL CCA information */
625 struct cca_timer {
626 	struct hrtimer hrtimer;
627 	struct hfi1_pportdata *ppd; /* read-only */
628 	int sl; /* read-only */
629 	u16 ccti; /* read/write - current value of CCTI */
630 };
631 
632 struct link_down_reason {
633 	/*
634 	 * SMA-facing value.  Should be set from .latest when
635 	 * HLS_UP_* -> HLS_DN_* transition actually occurs.
636 	 */
637 	u8 sma;
638 	u8 latest;
639 };
640 
641 enum {
642 	LO_PRIO_TABLE,
643 	HI_PRIO_TABLE,
644 	MAX_PRIO_TABLE
645 };
646 
647 struct vl_arb_cache {
648 	/* protect vl arb cache */
649 	spinlock_t lock;
650 	struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
651 };
652 
653 /*
654  * The structure below encapsulates data relevant to a physical IB Port.
655  * Current chips support only one such port, but the separation
656  * clarifies things a bit. Note that to conform to IB conventions,
657  * port-numbers are one-based. The first or only port is port1.
658  */
659 struct hfi1_pportdata {
660 	struct hfi1_ibport ibport_data;
661 
662 	struct hfi1_devdata *dd;
663 	struct kobject pport_cc_kobj;
664 	struct kobject sc2vl_kobj;
665 	struct kobject sl2sc_kobj;
666 	struct kobject vl2mtu_kobj;
667 
668 	/* PHY support */
669 	struct qsfp_data qsfp_info;
670 	/* Values for SI tuning of SerDes */
671 	u32 port_type;
672 	u32 tx_preset_eq;
673 	u32 tx_preset_noeq;
674 	u32 rx_preset;
675 	u8  local_atten;
676 	u8  remote_atten;
677 	u8  default_atten;
678 	u8  max_power_class;
679 
680 	/* did we read platform config from scratch registers? */
681 	bool config_from_scratch;
682 
683 	/* GUIDs for this interface, in host order, guids[0] is a port guid */
684 	u64 guids[HFI1_GUIDS_PER_PORT];
685 
686 	/* GUID for peer interface, in host order */
687 	u64 neighbor_guid;
688 
689 	/* up or down physical link state */
690 	u32 linkup;
691 
692 	/*
693 	 * this address is mapped read-only into user processes so they can
694 	 * get status cheaply, whenever they want.  One qword of status per port
695 	 */
696 	u64 *statusp;
697 
698 	/* SendDMA related entries */
699 
700 	struct workqueue_struct *hfi1_wq;
701 	struct workqueue_struct *link_wq;
702 
703 	/* move out of interrupt context */
704 	struct work_struct link_vc_work;
705 	struct work_struct link_up_work;
706 	struct work_struct link_down_work;
707 	struct work_struct sma_message_work;
708 	struct work_struct freeze_work;
709 	struct work_struct link_downgrade_work;
710 	struct work_struct link_bounce_work;
711 	struct delayed_work start_link_work;
712 	/* host link state variables */
713 	struct mutex hls_lock;
714 	u32 host_link_state;
715 
716 	/* these are the "32 bit" regs */
717 
718 	u32 ibmtu; /* The MTU programmed for this unit */
719 	/*
720 	 * Current max size IB packet (in bytes) including IB headers, that
721 	 * we can send. Changes when ibmtu changes.
722 	 */
723 	u32 ibmaxlen;
724 	u32 current_egress_rate; /* units [10^6 bits/sec] */
725 	/* LID programmed for this instance */
726 	u32 lid;
727 	/* list of pkeys programmed; 0 if not set */
728 	u16 pkeys[MAX_PKEY_VALUES];
729 	u16 link_width_supported;
730 	u16 link_width_downgrade_supported;
731 	u16 link_speed_supported;
732 	u16 link_width_enabled;
733 	u16 link_width_downgrade_enabled;
734 	u16 link_speed_enabled;
735 	u16 link_width_active;
736 	u16 link_width_downgrade_tx_active;
737 	u16 link_width_downgrade_rx_active;
738 	u16 link_speed_active;
739 	u8 vls_supported;
740 	u8 vls_operational;
741 	u8 actual_vls_operational;
742 	/* LID mask control */
743 	u8 lmc;
744 	/* Rx Polarity inversion (compensate for ~tx on partner) */
745 	u8 rx_pol_inv;
746 
747 	u8 hw_pidx;     /* physical port index */
748 	u8 port;        /* IB port number and index into dd->pports - 1 */
749 	/* type of neighbor node */
750 	u8 neighbor_type;
751 	u8 neighbor_normal;
752 	u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
753 	u8 neighbor_port_number;
754 	u8 is_sm_config_started;
755 	u8 offline_disabled_reason;
756 	u8 is_active_optimize_enabled;
757 	u8 driver_link_ready;	/* driver ready for active link */
758 	u8 link_enabled;	/* link enabled? */
759 	u8 linkinit_reason;
760 	u8 local_tx_rate;	/* rate given to 8051 firmware */
761 	u8 qsfp_retry_count;
762 
763 	/* placeholders for IB MAD packet settings */
764 	u8 overrun_threshold;
765 	u8 phy_error_threshold;
766 	unsigned int is_link_down_queued;
767 
768 	/* Used to override LED behavior for things like maintenance beaconing*/
769 	/*
770 	 * Alternates per phase of blink
771 	 * [0] holds LED off duration, [1] holds LED on duration
772 	 */
773 	unsigned long led_override_vals[2];
774 	u8 led_override_phase; /* LSB picks from vals[] */
775 	atomic_t led_override_timer_active;
776 	/* Used to flash LEDs in override mode */
777 	struct timer_list led_override_timer;
778 
779 	u32 sm_trap_qp;
780 	u32 sa_qp;
781 
782 	/*
783 	 * cca_timer_lock protects access to the per-SL cca_timer
784 	 * structures (specifically the ccti member).
785 	 */
786 	spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
787 	struct cca_timer cca_timer[OPA_MAX_SLS];
788 
789 	/* List of congestion control table entries */
790 	struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
791 
792 	/* congestion entries, each entry corresponding to a SL */
793 	struct opa_congestion_setting_entry_shadow
794 		congestion_entries[OPA_MAX_SLS];
795 
796 	/*
797 	 * cc_state_lock protects (write) access to the per-port
798 	 * struct cc_state.
799 	 */
800 	spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
801 
802 	struct cc_state __rcu *cc_state;
803 
804 	/* Total number of congestion control table entries */
805 	u16 total_cct_entry;
806 
807 	/* Bit map identifying service level */
808 	u32 cc_sl_control_map;
809 
810 	/* CA's max number of 64 entry units in the congestion control table */
811 	u8 cc_max_table_entries;
812 
813 	/*
814 	 * begin congestion log related entries
815 	 * cc_log_lock protects all congestion log related data
816 	 */
817 	spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
818 	u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
819 	u16 threshold_event_counter;
820 	struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
821 	int cc_log_idx; /* index for logging events */
822 	int cc_mad_idx; /* index for reporting events */
823 	/* end congestion log related entries */
824 
825 	struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
826 
827 	/* port relative counter buffer */
828 	u64 *cntrs;
829 	/* port relative synthetic counter buffer */
830 	u64 *scntrs;
831 	/* port_xmit_discards are synthesized from different egress errors */
832 	u64 port_xmit_discards;
833 	u64 port_xmit_discards_vl[C_VL_COUNT];
834 	u64 port_xmit_constraint_errors;
835 	u64 port_rcv_constraint_errors;
836 	/* count of 'link_err' interrupts from DC */
837 	u64 link_downed;
838 	/* number of times link retrained successfully */
839 	u64 link_up;
840 	/* number of times a link unknown frame was reported */
841 	u64 unknown_frame_count;
842 	/* port_ltp_crc_mode is returned in 'portinfo' MADs */
843 	u16 port_ltp_crc_mode;
844 	/* port_crc_mode_enabled is the crc we support */
845 	u8 port_crc_mode_enabled;
846 	/* mgmt_allowed is also returned in 'portinfo' MADs */
847 	u8 mgmt_allowed;
848 	u8 part_enforce; /* partition enforcement flags */
849 	struct link_down_reason local_link_down_reason;
850 	struct link_down_reason neigh_link_down_reason;
851 	/* Value to be sent to link peer on LinkDown .*/
852 	u8 remote_link_down_reason;
853 	/* Error events that will cause a port bounce. */
854 	u32 port_error_action;
855 	struct work_struct linkstate_active_work;
856 	/* Does this port need to prescan for FECNs */
857 	bool cc_prescan;
858 };
859 
860 typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
861 
862 typedef void (*opcode_handler)(struct hfi1_packet *packet);
863 typedef void (*hfi1_make_req)(struct rvt_qp *qp,
864 			      struct hfi1_pkt_state *ps,
865 			      struct rvt_swqe *wqe);
866 
867 
868 /* return values for the RHF receive functions */
869 #define RHF_RCV_CONTINUE  0	/* keep going */
870 #define RHF_RCV_DONE	  1	/* stop, this packet processed */
871 #define RHF_RCV_REPROCESS 2	/* stop. retain this packet */
872 
873 struct rcv_array_data {
874 	u8 group_size;
875 	u16 ngroups;
876 	u16 nctxt_extra;
877 };
878 
879 struct per_vl_data {
880 	u16 mtu;
881 	struct send_context *sc;
882 };
883 
884 /* 16 to directly index */
885 #define PER_VL_SEND_CONTEXTS 16
886 
887 struct err_info_rcvport {
888 	u8 status_and_code;
889 	u64 packet_flit1;
890 	u64 packet_flit2;
891 };
892 
893 struct err_info_constraint {
894 	u8 status;
895 	u16 pkey;
896 	u32 slid;
897 };
898 
899 struct hfi1_temp {
900 	unsigned int curr;       /* current temperature */
901 	unsigned int lo_lim;     /* low temperature limit */
902 	unsigned int hi_lim;     /* high temperature limit */
903 	unsigned int crit_lim;   /* critical temperature limit */
904 	u8 triggers;      /* temperature triggers */
905 };
906 
907 struct hfi1_i2c_bus {
908 	struct hfi1_devdata *controlling_dd; /* current controlling device */
909 	struct i2c_adapter adapter;	/* bus details */
910 	struct i2c_algo_bit_data algo;	/* bus algorithm details */
911 	int num;			/* bus number, 0 or 1 */
912 };
913 
914 /* common data between shared ASIC HFIs */
915 struct hfi1_asic_data {
916 	struct hfi1_devdata *dds[2];	/* back pointers */
917 	struct mutex asic_resource_mutex;
918 	struct hfi1_i2c_bus *i2c_bus0;
919 	struct hfi1_i2c_bus *i2c_bus1;
920 };
921 
922 /* sizes for both the QP and RSM map tables */
923 #define NUM_MAP_ENTRIES	 256
924 #define NUM_MAP_REGS      32
925 
926 /*
927  * Number of VNIC contexts used. Ensure it is less than or equal to
928  * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
929  */
930 #define HFI1_NUM_VNIC_CTXT   8
931 
932 /* Number of VNIC RSM entries */
933 #define NUM_VNIC_MAP_ENTRIES 8
934 
935 /* Virtual NIC information */
936 struct hfi1_vnic_data {
937 	struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
938 	struct kmem_cache *txreq_cache;
939 	u8 num_vports;
940 	struct idr vesw_idr;
941 	u8 rmt_start;
942 	u8 num_ctxt;
943 	u32 msix_idx;
944 };
945 
946 struct hfi1_vnic_vport_info;
947 
948 /* device data struct now contains only "general per-device" info.
949  * fields related to a physical IB port are in a hfi1_pportdata struct.
950  */
951 struct sdma_engine;
952 struct sdma_vl_map;
953 
954 #define BOARD_VERS_MAX 96 /* how long the version string can be */
955 #define SERIAL_MAX 16 /* length of the serial number */
956 
957 typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
958 struct hfi1_devdata {
959 	struct hfi1_ibdev verbs_dev;     /* must be first */
960 	struct list_head list;
961 	/* pointers to related structs for this device */
962 	/* pci access data structure */
963 	struct pci_dev *pcidev;
964 	struct cdev user_cdev;
965 	struct cdev diag_cdev;
966 	struct cdev ui_cdev;
967 	struct device *user_device;
968 	struct device *diag_device;
969 	struct device *ui_device;
970 
971 	/* first mapping up to RcvArray */
972 	u8 __iomem *kregbase1;
973 	resource_size_t physaddr;
974 
975 	/* second uncached mapping from RcvArray to pio send buffers */
976 	u8 __iomem *kregbase2;
977 	/* for detecting offset above kregbase2 address */
978 	u32 base2_start;
979 
980 	/* Per VL data. Enough for all VLs but not all elements are set/used. */
981 	struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
982 	/* send context data */
983 	struct send_context_info *send_contexts;
984 	/* map hardware send contexts to software index */
985 	u8 *hw_to_sw;
986 	/* spinlock for allocating and releasing send context resources */
987 	spinlock_t sc_lock;
988 	/* lock for pio_map */
989 	spinlock_t pio_map_lock;
990 	/* Send Context initialization lock. */
991 	spinlock_t sc_init_lock;
992 	/* lock for sdma_map */
993 	spinlock_t                          sde_map_lock;
994 	/* array of kernel send contexts */
995 	struct send_context **kernel_send_context;
996 	/* array of vl maps */
997 	struct pio_vl_map __rcu *pio_map;
998 	/* default flags to last descriptor */
999 	u64 default_desc1;
1000 
1001 	/* fields common to all SDMA engines */
1002 
1003 	volatile __le64                    *sdma_heads_dma; /* DMA'ed by chip */
1004 	dma_addr_t                          sdma_heads_phys;
1005 	void                               *sdma_pad_dma; /* DMA'ed by chip */
1006 	dma_addr_t                          sdma_pad_phys;
1007 	/* for deallocation */
1008 	size_t                              sdma_heads_size;
1009 	/* number from the chip */
1010 	u32                                 chip_sdma_engines;
1011 	/* num used */
1012 	u32                                 num_sdma;
1013 	/* array of engines sized by num_sdma */
1014 	struct sdma_engine                 *per_sdma;
1015 	/* array of vl maps */
1016 	struct sdma_vl_map __rcu           *sdma_map;
1017 	/* SPC freeze waitqueue and variable */
1018 	wait_queue_head_t		  sdma_unfreeze_wq;
1019 	atomic_t			  sdma_unfreeze_count;
1020 
1021 	u32 lcb_access_count;		/* count of LCB users */
1022 
1023 	/* common data between shared ASIC HFIs in this OS */
1024 	struct hfi1_asic_data *asic_data;
1025 
1026 	/* mem-mapped pointer to base of PIO buffers */
1027 	void __iomem *piobase;
1028 	/*
1029 	 * write-combining mem-mapped pointer to base of RcvArray
1030 	 * memory.
1031 	 */
1032 	void __iomem *rcvarray_wc;
1033 	/*
1034 	 * credit return base - a per-NUMA range of DMA address that
1035 	 * the chip will use to update the per-context free counter
1036 	 */
1037 	struct credit_return_base *cr_base;
1038 
1039 	/* send context numbers and sizes for each type */
1040 	struct sc_config_sizes sc_sizes[SC_MAX];
1041 
1042 	char *boardname; /* human readable board info */
1043 
1044 	/* reset value */
1045 	u64 z_int_counter;
1046 	u64 z_rcv_limit;
1047 	u64 z_send_schedule;
1048 
1049 	u64 __percpu *send_schedule;
1050 	/* number of receive contexts in use by the driver */
1051 	u32 num_rcv_contexts;
1052 	/* number of pio send contexts in use by the driver */
1053 	u32 num_send_contexts;
1054 	/*
1055 	 * number of ctxts available for PSM open
1056 	 */
1057 	u32 freectxts;
1058 	/* total number of available user/PSM contexts */
1059 	u32 num_user_contexts;
1060 	/* base receive interrupt timeout, in CSR units */
1061 	u32 rcv_intr_timeout_csr;
1062 
1063 	u32 freezelen; /* max length of freezemsg */
1064 	u64 __iomem *egrtidbase;
1065 	spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
1066 	spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
1067 	spinlock_t uctxt_lock; /* protect rcd changes */
1068 	struct mutex dc8051_lock; /* exclusive access to 8051 */
1069 	struct workqueue_struct *update_cntr_wq;
1070 	struct work_struct update_cntr_work;
1071 	/* exclusive access to 8051 memory */
1072 	spinlock_t dc8051_memlock;
1073 	int dc8051_timed_out;	/* remember if the 8051 timed out */
1074 	/*
1075 	 * A page that will hold event notification bitmaps for all
1076 	 * contexts. This page will be mapped into all processes.
1077 	 */
1078 	unsigned long *events;
1079 	/*
1080 	 * per unit status, see also portdata statusp
1081 	 * mapped read-only into user processes so they can get unit and
1082 	 * IB link status cheaply
1083 	 */
1084 	struct hfi1_status *status;
1085 
1086 	/* revision register shadow */
1087 	u64 revision;
1088 	/* Base GUID for device (network order) */
1089 	u64 base_guid;
1090 
1091 	/* these are the "32 bit" regs */
1092 
1093 	/* value we put in kr_rcvhdrsize */
1094 	u32 rcvhdrsize;
1095 	/* number of receive contexts the chip supports */
1096 	u32 chip_rcv_contexts;
1097 	/* number of receive array entries */
1098 	u32 chip_rcv_array_count;
1099 	/* number of PIO send contexts the chip supports */
1100 	u32 chip_send_contexts;
1101 	/* number of bytes in the PIO memory buffer */
1102 	u32 chip_pio_mem_size;
1103 	/* number of bytes in the SDMA memory buffer */
1104 	u32 chip_sdma_mem_size;
1105 
1106 	/* size of each rcvegrbuffer */
1107 	u32 rcvegrbufsize;
1108 	/* log2 of above */
1109 	u16 rcvegrbufsize_shift;
1110 	/* both sides of the PCIe link are gen3 capable */
1111 	u8 link_gen3_capable;
1112 	/* default link down value (poll/sleep) */
1113 	u8 link_default;
1114 	/* localbus width (1, 2,4,8,16,32) from config space  */
1115 	u32 lbus_width;
1116 	/* localbus speed in MHz */
1117 	u32 lbus_speed;
1118 	int unit; /* unit # of this chip */
1119 	int node; /* home node of this chip */
1120 
1121 	/* save these PCI fields to restore after a reset */
1122 	u32 pcibar0;
1123 	u32 pcibar1;
1124 	u32 pci_rom;
1125 	u16 pci_command;
1126 	u16 pcie_devctl;
1127 	u16 pcie_lnkctl;
1128 	u16 pcie_devctl2;
1129 	u32 pci_msix0;
1130 	u32 pci_lnkctl3;
1131 	u32 pci_tph2;
1132 
1133 	/*
1134 	 * ASCII serial number, from flash, large enough for original
1135 	 * all digit strings, and longer serial number format
1136 	 */
1137 	u8 serial[SERIAL_MAX];
1138 	/* human readable board version */
1139 	u8 boardversion[BOARD_VERS_MAX];
1140 	u8 lbus_info[32]; /* human readable localbus info */
1141 	/* chip major rev, from CceRevision */
1142 	u8 majrev;
1143 	/* chip minor rev, from CceRevision */
1144 	u8 minrev;
1145 	/* hardware ID */
1146 	u8 hfi1_id;
1147 	/* implementation code */
1148 	u8 icode;
1149 	/* vAU of this device */
1150 	u8 vau;
1151 	/* vCU of this device */
1152 	u8 vcu;
1153 	/* link credits of this device */
1154 	u16 link_credits;
1155 	/* initial vl15 credits to use */
1156 	u16 vl15_init;
1157 
1158 	/*
1159 	 * Cached value for vl15buf, read during verify cap interrupt. VL15
1160 	 * credits are to be kept at 0 and set when handling the link-up
1161 	 * interrupt. This removes the possibility of receiving VL15 MAD
1162 	 * packets before this HFI is ready.
1163 	 */
1164 	u16 vl15buf_cached;
1165 
1166 	/* Misc small ints */
1167 	u8 n_krcv_queues;
1168 	u8 qos_shift;
1169 
1170 	u16 irev;	/* implementation revision */
1171 	u32 dc8051_ver; /* 8051 firmware version */
1172 
1173 	spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
1174 	struct platform_config platform_config;
1175 	struct platform_config_cache pcfg_cache;
1176 
1177 	struct diag_client *diag_client;
1178 
1179 	/* MSI-X information */
1180 	struct hfi1_msix_entry *msix_entries;
1181 	u32 num_msix_entries;
1182 	u32 first_dyn_msix_idx;
1183 
1184 	/* INTx information */
1185 	u32 requested_intx_irq;		/* did we request one? */
1186 	char intx_name[MAX_NAME_SIZE];	/* INTx name */
1187 
1188 	/* general interrupt: mask of handled interrupts */
1189 	u64 gi_mask[CCE_NUM_INT_CSRS];
1190 
1191 	struct rcv_array_data rcv_entries;
1192 
1193 	/* cycle length of PS* counters in HW (in picoseconds) */
1194 	u16 psxmitwait_check_rate;
1195 
1196 	/*
1197 	 * 64 bit synthetic counters
1198 	 */
1199 	struct timer_list synth_stats_timer;
1200 
1201 	/*
1202 	 * device counters
1203 	 */
1204 	char *cntrnames;
1205 	size_t cntrnameslen;
1206 	size_t ndevcntrs;
1207 	u64 *cntrs;
1208 	u64 *scntrs;
1209 
1210 	/*
1211 	 * remembered values for synthetic counters
1212 	 */
1213 	u64 last_tx;
1214 	u64 last_rx;
1215 
1216 	/*
1217 	 * per-port counters
1218 	 */
1219 	size_t nportcntrs;
1220 	char *portcntrnames;
1221 	size_t portcntrnameslen;
1222 
1223 	struct err_info_rcvport err_info_rcvport;
1224 	struct err_info_constraint err_info_rcv_constraint;
1225 	struct err_info_constraint err_info_xmit_constraint;
1226 
1227 	atomic_t drop_packet;
1228 	u8 do_drop;
1229 	u8 err_info_uncorrectable;
1230 	u8 err_info_fmconfig;
1231 
1232 	/*
1233 	 * Software counters for the status bits defined by the
1234 	 * associated error status registers
1235 	 */
1236 	u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
1237 	u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
1238 	u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
1239 	u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
1240 	u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
1241 	u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
1242 	u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
1243 
1244 	/* Software counter that spans all contexts */
1245 	u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
1246 	/* Software counter that spans all DMA engines */
1247 	u64 sw_send_dma_eng_err_status_cnt[
1248 		NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
1249 	/* Software counter that aggregates all cce_err_status errors */
1250 	u64 sw_cce_err_status_aggregate;
1251 	/* Software counter that aggregates all bypass packet rcv errors */
1252 	u64 sw_rcv_bypass_packet_errors;
1253 	/* receive interrupt function */
1254 	rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
1255 
1256 	/* Save the enabled LCB error bits */
1257 	u64 lcb_err_en;
1258 
1259 	/*
1260 	 * Capability to have different send engines simply by changing a
1261 	 * pointer value.
1262 	 */
1263 	send_routine process_pio_send ____cacheline_aligned_in_smp;
1264 	send_routine process_dma_send;
1265 	void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1266 				u64 pbc, const void *from, size_t count);
1267 	int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx,
1268 				     struct hfi1_vnic_vport_info *vinfo,
1269 				     struct sk_buff *skb, u64 pbc, u8 plen);
1270 	/* hfi1_pportdata, points to array of (physical) port-specific
1271 	 * data structs, indexed by pidx (0..n-1)
1272 	 */
1273 	struct hfi1_pportdata *pport;
1274 	/* receive context data */
1275 	struct hfi1_ctxtdata **rcd;
1276 	u64 __percpu *int_counter;
1277 	/* device (not port) flags, basically device capabilities */
1278 	u16 flags;
1279 	/* Number of physical ports available */
1280 	u8 num_pports;
1281 	/* Lowest context number which can be used by user processes or VNIC */
1282 	u8 first_dyn_alloc_ctxt;
1283 	/* adding a new field here would make it part of this cacheline */
1284 
1285 	/* seqlock for sc2vl */
1286 	seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
1287 	u64 sc2vl[4];
1288 	/* receive interrupt functions */
1289 	rhf_rcv_function_ptr *rhf_rcv_function_map;
1290 	u64 __percpu *rcv_limit;
1291 	u16 rhf_offset; /* offset of RHF within receive header entry */
1292 	/* adding a new field here would make it part of this cacheline */
1293 
1294 	/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
1295 	u8 oui1;
1296 	u8 oui2;
1297 	u8 oui3;
1298 	u8 dc_shutdown;
1299 
1300 	/* Timer and counter used to detect RcvBufOvflCnt changes */
1301 	struct timer_list rcverr_timer;
1302 
1303 	wait_queue_head_t event_queue;
1304 
1305 	/* receive context tail dummy address */
1306 	__le64 *rcvhdrtail_dummy_kvaddr;
1307 	dma_addr_t rcvhdrtail_dummy_dma;
1308 
1309 	u32 rcv_ovfl_cnt;
1310 	/* Serialize ASPM enable/disable between multiple verbs contexts */
1311 	spinlock_t aspm_lock;
1312 	/* Number of verbs contexts which have disabled ASPM */
1313 	atomic_t aspm_disabled_cnt;
1314 	/* Keeps track of user space clients */
1315 	atomic_t user_refcount;
1316 	/* Used to wait for outstanding user space clients before dev removal */
1317 	struct completion user_comp;
1318 
1319 	bool eprom_available;	/* true if EPROM is available for this device */
1320 	bool aspm_supported;	/* Does HW support ASPM */
1321 	bool aspm_enabled;	/* ASPM state: enabled/disabled */
1322 	struct rhashtable *sdma_rht;
1323 
1324 	struct kobject kobj;
1325 
1326 	/* vnic data */
1327 	struct hfi1_vnic_data vnic;
1328 };
1329 
1330 static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
1331 {
1332 	return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
1333 }
1334 
1335 /* 8051 firmware version helper */
1336 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
1337 #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
1338 #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
1339 #define dc8051_ver_patch(a) ((a) & 0x0000ff)
1340 
1341 /* f_put_tid types */
1342 #define PT_EXPECTED       0
1343 #define PT_EAGER          1
1344 #define PT_INVALID_FLUSH  2
1345 #define PT_INVALID        3
1346 
1347 struct tid_rb_node;
1348 struct mmu_rb_node;
1349 struct mmu_rb_handler;
1350 
1351 /* Private data for file operations */
1352 struct hfi1_filedata {
1353 	struct hfi1_devdata *dd;
1354 	struct hfi1_ctxtdata *uctxt;
1355 	struct hfi1_user_sdma_comp_q *cq;
1356 	struct hfi1_user_sdma_pkt_q *pq;
1357 	u16 subctxt;
1358 	/* for cpu affinity; -1 if none */
1359 	int rec_cpu_num;
1360 	u32 tid_n_pinned;
1361 	struct mmu_rb_handler *handler;
1362 	struct tid_rb_node **entry_to_rb;
1363 	spinlock_t tid_lock; /* protect tid_[limit,used] counters */
1364 	u32 tid_limit;
1365 	u32 tid_used;
1366 	u32 *invalid_tids;
1367 	u32 invalid_tid_idx;
1368 	/* protect invalid_tids array and invalid_tid_idx */
1369 	spinlock_t invalid_lock;
1370 	struct mm_struct *mm;
1371 };
1372 
1373 extern struct list_head hfi1_dev_list;
1374 extern spinlock_t hfi1_devs_lock;
1375 struct hfi1_devdata *hfi1_lookup(int unit);
1376 extern u32 hfi1_cpulist_count;
1377 extern unsigned long *hfi1_cpulist;
1378 
1379 int hfi1_init(struct hfi1_devdata *dd, int reinit);
1380 int hfi1_count_active_units(void);
1381 
1382 int hfi1_diag_add(struct hfi1_devdata *dd);
1383 void hfi1_diag_remove(struct hfi1_devdata *dd);
1384 void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
1385 
1386 void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
1387 
1388 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1389 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
1390 int hfi1_create_kctxts(struct hfi1_devdata *dd);
1391 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
1392 			 struct hfi1_ctxtdata **rcd);
1393 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
1394 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
1395 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
1396 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1397 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
1398 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
1399 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
1400 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
1401 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1402 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1403 void set_all_slowpath(struct hfi1_devdata *dd);
1404 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
1405 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
1406 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
1407 
1408 extern const struct pci_device_id hfi1_pci_tbl[];
1409 void hfi1_make_ud_req_9B(struct rvt_qp *qp,
1410 			 struct hfi1_pkt_state *ps,
1411 			 struct rvt_swqe *wqe);
1412 
1413 void hfi1_make_ud_req_16B(struct rvt_qp *qp,
1414 			  struct hfi1_pkt_state *ps,
1415 			  struct rvt_swqe *wqe);
1416 
1417 /* receive packet handler dispositions */
1418 #define RCV_PKT_OK      0x0 /* keep going */
1419 #define RCV_PKT_LIMIT   0x1 /* stop, hit limit, start thread */
1420 #define RCV_PKT_DONE    0x2 /* stop, no more packets detected */
1421 
1422 /* calculate the current RHF address */
1423 static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
1424 {
1425 	return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset;
1426 }
1427 
1428 int hfi1_reset_device(int);
1429 
1430 void receive_interrupt_work(struct work_struct *work);
1431 
1432 /* extract service channel from header and rhf */
1433 static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf)
1434 {
1435 	return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4);
1436 }
1437 
1438 #define HFI1_JKEY_WIDTH       16
1439 #define HFI1_JKEY_MASK        (BIT(16) - 1)
1440 #define HFI1_ADMIN_JKEY_RANGE 32
1441 
1442 /*
1443  * J_KEYs are split and allocated in the following groups:
1444  *   0 - 31    - users with administrator privileges
1445  *  32 - 63    - kernel protocols using KDETH packets
1446  *  64 - 65535 - all other users using KDETH packets
1447  */
1448 static inline u16 generate_jkey(kuid_t uid)
1449 {
1450 	u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
1451 
1452 	if (capable(CAP_SYS_ADMIN))
1453 		jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
1454 	else if (jkey < 64)
1455 		jkey |= BIT(HFI1_JKEY_WIDTH - 1);
1456 
1457 	return jkey;
1458 }
1459 
1460 /*
1461  * active_egress_rate
1462  *
1463  * returns the active egress rate in units of [10^6 bits/sec]
1464  */
1465 static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
1466 {
1467 	u16 link_speed = ppd->link_speed_active;
1468 	u16 link_width = ppd->link_width_active;
1469 	u32 egress_rate;
1470 
1471 	if (link_speed == OPA_LINK_SPEED_25G)
1472 		egress_rate = 25000;
1473 	else /* assume OPA_LINK_SPEED_12_5G */
1474 		egress_rate = 12500;
1475 
1476 	switch (link_width) {
1477 	case OPA_LINK_WIDTH_4X:
1478 		egress_rate *= 4;
1479 		break;
1480 	case OPA_LINK_WIDTH_3X:
1481 		egress_rate *= 3;
1482 		break;
1483 	case OPA_LINK_WIDTH_2X:
1484 		egress_rate *= 2;
1485 		break;
1486 	default:
1487 		/* assume IB_WIDTH_1X */
1488 		break;
1489 	}
1490 
1491 	return egress_rate;
1492 }
1493 
1494 /*
1495  * egress_cycles
1496  *
1497  * Returns the number of 'fabric clock cycles' to egress a packet
1498  * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
1499  * rate is (approximately) 805 MHz, the units of the returned value
1500  * are (1/805 MHz).
1501  */
1502 static inline u32 egress_cycles(u32 len, u32 rate)
1503 {
1504 	u32 cycles;
1505 
1506 	/*
1507 	 * cycles is:
1508 	 *
1509 	 *          (length) [bits] / (rate) [bits/sec]
1510 	 *  ---------------------------------------------------
1511 	 *  fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
1512 	 */
1513 
1514 	cycles = len * 8; /* bits */
1515 	cycles *= 805;
1516 	cycles /= rate;
1517 
1518 	return cycles;
1519 }
1520 
1521 void set_link_ipg(struct hfi1_pportdata *ppd);
1522 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
1523 		  u32 rqpn, u8 svc_type);
1524 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
1525 		u32 pkey, u32 slid, u32 dlid, u8 sc5,
1526 		const struct ib_grh *old_grh);
1527 void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1528 		    u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
1529 		    u8 sc5, const struct ib_grh *old_grh);
1530 typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1531 				u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
1532 				u8 sc5, const struct ib_grh *old_grh);
1533 
1534 /* We support only two types - 9B and 16B for now */
1535 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
1536 	[HFI1_PKT_TYPE_9B] = &return_cnp,
1537 	[HFI1_PKT_TYPE_16B] = &return_cnp_16B
1538 };
1539 #define PKEY_CHECK_INVALID -1
1540 int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
1541 		      u8 sc5, int8_t s_pkey_index);
1542 
1543 #define PACKET_EGRESS_TIMEOUT 350
1544 static inline void pause_for_credit_return(struct hfi1_devdata *dd)
1545 {
1546 	/* Pause at least 1us, to ensure chip returns all credits */
1547 	u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
1548 
1549 	udelay(usec ? usec : 1);
1550 }
1551 
1552 /**
1553  * sc_to_vlt() reverse lookup sc to vl
1554  * @dd - devdata
1555  * @sc5 - 5 bit sc
1556  */
1557 static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
1558 {
1559 	unsigned seq;
1560 	u8 rval;
1561 
1562 	if (sc5 >= OPA_MAX_SCS)
1563 		return (u8)(0xff);
1564 
1565 	do {
1566 		seq = read_seqbegin(&dd->sc2vl_lock);
1567 		rval = *(((u8 *)dd->sc2vl) + sc5);
1568 	} while (read_seqretry(&dd->sc2vl_lock, seq));
1569 
1570 	return rval;
1571 }
1572 
1573 #define PKEY_MEMBER_MASK 0x8000
1574 #define PKEY_LOW_15_MASK 0x7fff
1575 
1576 /*
1577  * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1578  * being an entry from the ingress partition key table), return 0
1579  * otherwise. Use the matching criteria for ingress partition keys
1580  * specified in the OPAv1 spec., section 9.10.14.
1581  */
1582 static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
1583 {
1584 	u16 mkey = pkey & PKEY_LOW_15_MASK;
1585 	u16 ment = ent & PKEY_LOW_15_MASK;
1586 
1587 	if (mkey == ment) {
1588 		/*
1589 		 * If pkey[15] is clear (limited partition member),
1590 		 * is bit 15 in the corresponding table element
1591 		 * clear (limited member)?
1592 		 */
1593 		if (!(pkey & PKEY_MEMBER_MASK))
1594 			return !!(ent & PKEY_MEMBER_MASK);
1595 		return 1;
1596 	}
1597 	return 0;
1598 }
1599 
1600 /*
1601  * ingress_pkey_table_search - search the entire pkey table for
1602  * an entry which matches 'pkey'. return 0 if a match is found,
1603  * and 1 otherwise.
1604  */
1605 static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
1606 {
1607 	int i;
1608 
1609 	for (i = 0; i < MAX_PKEY_VALUES; i++) {
1610 		if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1611 			return 0;
1612 	}
1613 	return 1;
1614 }
1615 
1616 /*
1617  * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1618  * i.e., increment port_rcv_constraint_errors for the port, and record
1619  * the 'error info' for this failure.
1620  */
1621 static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
1622 				    u16 slid)
1623 {
1624 	struct hfi1_devdata *dd = ppd->dd;
1625 
1626 	incr_cntr64(&ppd->port_rcv_constraint_errors);
1627 	if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
1628 		dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
1629 		dd->err_info_rcv_constraint.slid = slid;
1630 		dd->err_info_rcv_constraint.pkey = pkey;
1631 	}
1632 }
1633 
1634 /*
1635  * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1636  * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
1637  * is a hint as to the best place in the partition key table to begin
1638  * searching. This function should not be called on the data path because
1639  * of performance reasons. On datapath pkey check is expected to be done
1640  * by HW and rcv_pkey_check function should be called instead.
1641  */
1642 static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1643 				     u8 sc5, u8 idx, u32 slid, bool force)
1644 {
1645 	if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1646 		return 0;
1647 
1648 	/* If SC15, pkey[0:14] must be 0x7fff */
1649 	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1650 		goto bad;
1651 
1652 	/* Is the pkey = 0x0, or 0x8000? */
1653 	if ((pkey & PKEY_LOW_15_MASK) == 0)
1654 		goto bad;
1655 
1656 	/* The most likely matching pkey has index 'idx' */
1657 	if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
1658 		return 0;
1659 
1660 	/* no match - try the whole table */
1661 	if (!ingress_pkey_table_search(ppd, pkey))
1662 		return 0;
1663 
1664 bad:
1665 	ingress_pkey_table_fail(ppd, pkey, slid);
1666 	return 1;
1667 }
1668 
1669 /*
1670  * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1671  * otherwise. It only ensures pkey is vlid for QP0. This function
1672  * should be called on the data path instead of ingress_pkey_check
1673  * as on data path, pkey check is done by HW (except for QP0).
1674  */
1675 static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1676 				 u8 sc5, u16 slid)
1677 {
1678 	if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1679 		return 0;
1680 
1681 	/* If SC15, pkey[0:14] must be 0x7fff */
1682 	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1683 		goto bad;
1684 
1685 	return 0;
1686 bad:
1687 	ingress_pkey_table_fail(ppd, pkey, slid);
1688 	return 1;
1689 }
1690 
1691 /* MTU handling */
1692 
1693 /* MTU enumeration, 256-4k match IB */
1694 #define OPA_MTU_0     0
1695 #define OPA_MTU_256   1
1696 #define OPA_MTU_512   2
1697 #define OPA_MTU_1024  3
1698 #define OPA_MTU_2048  4
1699 #define OPA_MTU_4096  5
1700 
1701 u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
1702 int mtu_to_enum(u32 mtu, int default_if_bad);
1703 u16 enum_to_mtu(int mtu);
1704 static inline int valid_ib_mtu(unsigned int mtu)
1705 {
1706 	return mtu == 256 || mtu == 512 ||
1707 		mtu == 1024 || mtu == 2048 ||
1708 		mtu == 4096;
1709 }
1710 
1711 static inline int valid_opa_max_mtu(unsigned int mtu)
1712 {
1713 	return mtu >= 2048 &&
1714 		(valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
1715 }
1716 
1717 int set_mtu(struct hfi1_pportdata *ppd);
1718 
1719 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
1720 void hfi1_disable_after_error(struct hfi1_devdata *dd);
1721 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
1722 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
1723 
1724 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
1725 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
1726 
1727 void set_up_vau(struct hfi1_devdata *dd, u8 vau);
1728 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
1729 void reset_link_credits(struct hfi1_devdata *dd);
1730 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1731 
1732 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
1733 
1734 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
1735 {
1736 	return ppd->dd;
1737 }
1738 
1739 static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
1740 {
1741 	return container_of(dev, struct hfi1_devdata, verbs_dev);
1742 }
1743 
1744 static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
1745 {
1746 	return dd_from_dev(to_idev(ibdev));
1747 }
1748 
1749 static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
1750 {
1751 	return container_of(ibp, struct hfi1_pportdata, ibport_data);
1752 }
1753 
1754 static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
1755 {
1756 	return container_of(rdi, struct hfi1_ibdev, rdi);
1757 }
1758 
1759 static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
1760 {
1761 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1762 	unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
1763 
1764 	WARN_ON(pidx >= dd->num_pports);
1765 	return &dd->pport[pidx].ibport_data;
1766 }
1767 
1768 static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
1769 {
1770 	return &rcd->ppd->ibport_data;
1771 }
1772 
1773 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
1774 			       bool do_cnp);
1775 static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
1776 			       bool do_cnp)
1777 {
1778 	struct ib_other_headers *ohdr = pkt->ohdr;
1779 
1780 	u32 bth1;
1781 	bool becn = false;
1782 	bool fecn = false;
1783 
1784 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
1785 		fecn = hfi1_16B_get_fecn(pkt->hdr);
1786 		becn = hfi1_16B_get_becn(pkt->hdr);
1787 	} else {
1788 		bth1 = be32_to_cpu(ohdr->bth[1]);
1789 		fecn = bth1 & IB_FECN_SMASK;
1790 		becn = bth1 & IB_BECN_SMASK;
1791 	}
1792 	if (unlikely(fecn || becn)) {
1793 		hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
1794 		return fecn;
1795 	}
1796 	return false;
1797 }
1798 
1799 /*
1800  * Return the indexed PKEY from the port PKEY table.
1801  */
1802 static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
1803 {
1804 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1805 	u16 ret;
1806 
1807 	if (index >= ARRAY_SIZE(ppd->pkeys))
1808 		ret = 0;
1809 	else
1810 		ret = ppd->pkeys[index];
1811 
1812 	return ret;
1813 }
1814 
1815 /*
1816  * Return the indexed GUID from the port GUIDs table.
1817  */
1818 static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
1819 {
1820 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1821 
1822 	WARN_ON(index >= HFI1_GUIDS_PER_PORT);
1823 	return cpu_to_be64(ppd->guids[index]);
1824 }
1825 
1826 /*
1827  * Called by readers of cc_state only, must call under rcu_read_lock().
1828  */
1829 static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
1830 {
1831 	return rcu_dereference(ppd->cc_state);
1832 }
1833 
1834 /*
1835  * Called by writers of cc_state only,  must call under cc_state_lock.
1836  */
1837 static inline
1838 struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
1839 {
1840 	return rcu_dereference_protected(ppd->cc_state,
1841 					 lockdep_is_held(&ppd->cc_state_lock));
1842 }
1843 
1844 /*
1845  * values for dd->flags (_device_ related flags)
1846  */
1847 #define HFI1_INITTED           0x1    /* chip and driver up and initted */
1848 #define HFI1_PRESENT           0x2    /* chip accesses can be done */
1849 #define HFI1_FROZEN            0x4    /* chip in SPC freeze */
1850 #define HFI1_HAS_SDMA_TIMEOUT  0x8
1851 #define HFI1_HAS_SEND_DMA      0x10   /* Supports Send DMA */
1852 #define HFI1_FORCED_FREEZE     0x80   /* driver forced freeze mode */
1853 
1854 /* IB dword length mask in PBC (lower 11 bits); same for all chips */
1855 #define HFI1_PBC_LENGTH_MASK                     ((1 << 11) - 1)
1856 
1857 /* ctxt_flag bit offsets */
1858 		/* base context has not finished initializing */
1859 #define HFI1_CTXT_BASE_UNINIT 1
1860 		/* base context initaliation failed */
1861 #define HFI1_CTXT_BASE_FAILED 2
1862 		/* waiting for a packet to arrive */
1863 #define HFI1_CTXT_WAITING_RCV 3
1864 		/* waiting for an urgent packet to arrive */
1865 #define HFI1_CTXT_WAITING_URG 4
1866 
1867 /* free up any allocated data at closes */
1868 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
1869 				  const struct pci_device_id *ent);
1870 void hfi1_free_devdata(struct hfi1_devdata *dd);
1871 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
1872 
1873 /* LED beaconing functions */
1874 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
1875 			     unsigned int timeoff);
1876 void shutdown_led_override(struct hfi1_pportdata *ppd);
1877 
1878 #define HFI1_CREDIT_RETURN_RATE (100)
1879 
1880 /*
1881  * The number of words for the KDETH protocol field.  If this is
1882  * larger then the actual field used, then part of the payload
1883  * will be in the header.
1884  *
1885  * Optimally, we want this sized so that a typical case will
1886  * use full cache lines.  The typical local KDETH header would
1887  * be:
1888  *
1889  *	Bytes	Field
1890  *	  8	LRH
1891  *	 12	BHT
1892  *	 ??	KDETH
1893  *	  8	RHF
1894  *	---
1895  *	 28 + KDETH
1896  *
1897  * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
1898  */
1899 #define DEFAULT_RCVHDRSIZE 9
1900 
1901 /*
1902  * Maximal header byte count:
1903  *
1904  *	Bytes	Field
1905  *	  8	LRH
1906  *	 40	GRH (optional)
1907  *	 12	BTH
1908  *	 ??	KDETH
1909  *	  8	RHF
1910  *	---
1911  *	 68 + KDETH
1912  *
1913  * We also want to maintain a cache line alignment to assist DMA'ing
1914  * of the header bytes.  Round up to a good size.
1915  */
1916 #define DEFAULT_RCVHDR_ENTSIZE 32
1917 
1918 bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
1919 			u32 nlocked, u32 npages);
1920 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
1921 			    size_t npages, bool writable, struct page **pages);
1922 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
1923 			     size_t npages, bool dirty);
1924 
1925 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
1926 {
1927 	*((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
1928 }
1929 
1930 static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
1931 {
1932 	/*
1933 	 * volatile because it's a DMA target from the chip, routine is
1934 	 * inlined, and don't want register caching or reordering.
1935 	 */
1936 	return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
1937 }
1938 
1939 /*
1940  * sysfs interface.
1941  */
1942 
1943 extern const char ib_hfi1_version[];
1944 
1945 int hfi1_device_create(struct hfi1_devdata *dd);
1946 void hfi1_device_remove(struct hfi1_devdata *dd);
1947 
1948 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
1949 			   struct kobject *kobj);
1950 int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
1951 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
1952 /* Hook for sysfs read of QSFP */
1953 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
1954 
1955 int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
1956 void hfi1_pcie_cleanup(struct pci_dev *pdev);
1957 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
1958 void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
1959 int pcie_speeds(struct hfi1_devdata *dd);
1960 int request_msix(struct hfi1_devdata *dd, u32 msireq);
1961 int restore_pci_variables(struct hfi1_devdata *dd);
1962 int save_pci_variables(struct hfi1_devdata *dd);
1963 int do_pcie_gen3_transition(struct hfi1_devdata *dd);
1964 int parse_platform_config(struct hfi1_devdata *dd);
1965 int get_platform_config_field(struct hfi1_devdata *dd,
1966 			      enum platform_config_table_type_encoding
1967 			      table_type, int table_index, int field_index,
1968 			      u32 *data, u32 len);
1969 
1970 const char *get_unit_name(int unit);
1971 const char *get_card_name(struct rvt_dev_info *rdi);
1972 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
1973 
1974 /*
1975  * Flush write combining store buffers (if present) and perform a write
1976  * barrier.
1977  */
1978 static inline void flush_wc(void)
1979 {
1980 	asm volatile("sfence" : : : "memory");
1981 }
1982 
1983 void handle_eflags(struct hfi1_packet *packet);
1984 int process_receive_ib(struct hfi1_packet *packet);
1985 int process_receive_bypass(struct hfi1_packet *packet);
1986 int process_receive_error(struct hfi1_packet *packet);
1987 int kdeth_process_expected(struct hfi1_packet *packet);
1988 int kdeth_process_eager(struct hfi1_packet *packet);
1989 int process_receive_invalid(struct hfi1_packet *packet);
1990 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
1991 
1992 /* global module parameter variables */
1993 extern unsigned int hfi1_max_mtu;
1994 extern unsigned int hfi1_cu;
1995 extern unsigned int user_credit_return_threshold;
1996 extern int num_user_contexts;
1997 extern unsigned long n_krcvqs;
1998 extern uint krcvqs[];
1999 extern int krcvqsset;
2000 extern uint kdeth_qp;
2001 extern uint loopback;
2002 extern uint quick_linkup;
2003 extern uint rcv_intr_timeout;
2004 extern uint rcv_intr_count;
2005 extern uint rcv_intr_dynamic;
2006 extern ushort link_crc_mask;
2007 
2008 extern struct mutex hfi1_mutex;
2009 
2010 /* Number of seconds before our card status check...  */
2011 #define STATUS_TIMEOUT 60
2012 
2013 #define DRIVER_NAME		"hfi1"
2014 #define HFI1_USER_MINOR_BASE     0
2015 #define HFI1_TRACE_MINOR         127
2016 #define HFI1_NMINORS             255
2017 
2018 #define PCI_VENDOR_ID_INTEL 0x8086
2019 #define PCI_DEVICE_ID_INTEL0 0x24f0
2020 #define PCI_DEVICE_ID_INTEL1 0x24f1
2021 
2022 #define HFI1_PKT_USER_SC_INTEGRITY					    \
2023 	(SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK	    \
2024 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK		\
2025 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK		    \
2026 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
2027 
2028 #define HFI1_PKT_KERNEL_SC_INTEGRITY					    \
2029 	(SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
2030 
2031 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
2032 						  u16 ctxt_type)
2033 {
2034 	u64 base_sc_integrity;
2035 
2036 	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2037 	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2038 		return 0;
2039 
2040 	base_sc_integrity =
2041 	SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2042 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
2043 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2044 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2045 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2046 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
2047 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2048 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2049 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2050 	| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
2051 	| SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2052 	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2053 	| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
2054 	| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
2055 	| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
2056 	| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2057 
2058 	if (ctxt_type == SC_USER)
2059 		base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY;
2060 	else
2061 		base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
2062 
2063 	/* turn on send-side job key checks if !A0 */
2064 	if (!is_ax(dd))
2065 		base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2066 
2067 	return base_sc_integrity;
2068 }
2069 
2070 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
2071 {
2072 	u64 base_sdma_integrity;
2073 
2074 	/* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2075 	if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2076 		return 0;
2077 
2078 	base_sdma_integrity =
2079 	SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2080 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2081 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2082 	| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2083 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2084 	| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2085 	| SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2086 	| SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
2087 	| SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2088 	| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2089 	| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
2090 	| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
2091 	| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
2092 	| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2093 
2094 	if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
2095 		base_sdma_integrity |=
2096 		SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
2097 
2098 	/* turn on send-side job key checks if !A0 */
2099 	if (!is_ax(dd))
2100 		base_sdma_integrity |=
2101 			SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2102 
2103 	return base_sdma_integrity;
2104 }
2105 
2106 /*
2107  * hfi1_early_err is used (only!) to print early errors before devdata is
2108  * allocated, or when dd->pcidev may not be valid, and at the tail end of
2109  * cleanup when devdata may have been freed, etc.  hfi1_dev_porterr is
2110  * the same as dd_dev_err, but is used when the message really needs
2111  * the IB port# to be definitive as to what's happening..
2112  */
2113 #define hfi1_early_err(dev, fmt, ...) \
2114 	dev_err(dev, fmt, ##__VA_ARGS__)
2115 
2116 #define hfi1_early_info(dev, fmt, ...) \
2117 	dev_info(dev, fmt, ##__VA_ARGS__)
2118 
2119 #define dd_dev_emerg(dd, fmt, ...) \
2120 	dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2121 		  get_unit_name((dd)->unit), ##__VA_ARGS__)
2122 
2123 #define dd_dev_err(dd, fmt, ...) \
2124 	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2125 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2126 
2127 #define dd_dev_err_ratelimited(dd, fmt, ...) \
2128 	dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2129 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2130 
2131 #define dd_dev_warn(dd, fmt, ...) \
2132 	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2133 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2134 
2135 #define dd_dev_warn_ratelimited(dd, fmt, ...) \
2136 	dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2137 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2138 
2139 #define dd_dev_info(dd, fmt, ...) \
2140 	dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2141 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2142 
2143 #define dd_dev_info_ratelimited(dd, fmt, ...) \
2144 	dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2145 			get_unit_name((dd)->unit), ##__VA_ARGS__)
2146 
2147 #define dd_dev_dbg(dd, fmt, ...) \
2148 	dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2149 		get_unit_name((dd)->unit), ##__VA_ARGS__)
2150 
2151 #define hfi1_dev_porterr(dd, port, fmt, ...) \
2152 	dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2153 			get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
2154 
2155 /*
2156  * this is used for formatting hw error messages...
2157  */
2158 struct hfi1_hwerror_msgs {
2159 	u64 mask;
2160 	const char *msg;
2161 	size_t sz;
2162 };
2163 
2164 /* in intr.c... */
2165 void hfi1_format_hwerrors(u64 hwerrs,
2166 			  const struct hfi1_hwerror_msgs *hwerrmsgs,
2167 			  size_t nhwerrmsgs, char *msg, size_t lmsg);
2168 
2169 #define USER_OPCODE_CHECK_VAL 0xC0
2170 #define USER_OPCODE_CHECK_MASK 0xC0
2171 #define OPCODE_CHECK_VAL_DISABLED 0x0
2172 #define OPCODE_CHECK_MASK_DISABLED 0x0
2173 
2174 static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
2175 {
2176 	struct hfi1_pportdata *ppd;
2177 	int i;
2178 
2179 	dd->z_int_counter = get_all_cpu_total(dd->int_counter);
2180 	dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
2181 	dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
2182 
2183 	ppd = (struct hfi1_pportdata *)(dd + 1);
2184 	for (i = 0; i < dd->num_pports; i++, ppd++) {
2185 		ppd->ibport_data.rvp.z_rc_acks =
2186 			get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
2187 		ppd->ibport_data.rvp.z_rc_qacks =
2188 			get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
2189 	}
2190 }
2191 
2192 /* Control LED state */
2193 static inline void setextled(struct hfi1_devdata *dd, u32 on)
2194 {
2195 	if (on)
2196 		write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
2197 	else
2198 		write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
2199 }
2200 
2201 /* return the i2c resource given the target */
2202 static inline u32 i2c_target(u32 target)
2203 {
2204 	return target ? CR_I2C2 : CR_I2C1;
2205 }
2206 
2207 /* return the i2c chain chip resource that this HFI uses for QSFP */
2208 static inline u32 qsfp_resource(struct hfi1_devdata *dd)
2209 {
2210 	return i2c_target(dd->hfi1_id);
2211 }
2212 
2213 /* Is this device integrated or discrete? */
2214 static inline bool is_integrated(struct hfi1_devdata *dd)
2215 {
2216 	return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
2217 }
2218 
2219 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
2220 
2221 #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
2222 #define DD_DEV_ASSIGN(dd)      __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2223 
2224 static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
2225 				       struct rdma_ah_attr *attr)
2226 {
2227 	struct hfi1_pportdata *ppd;
2228 	struct hfi1_ibport *ibp;
2229 	u32 dlid = rdma_ah_get_dlid(attr);
2230 
2231 	/*
2232 	 * Kernel clients may not have setup GRH information
2233 	 * Set that here.
2234 	 */
2235 	ibp = to_iport(ibdev, rdma_ah_get_port_num(attr));
2236 	ppd = ppd_from_ibp(ibp);
2237 	if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
2238 	      (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) &&
2239 	    (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) &&
2240 	    (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2241 	    (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) ||
2242 	    (rdma_ah_get_make_grd(attr))) {
2243 		rdma_ah_set_ah_flags(attr, IB_AH_GRH);
2244 		rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid));
2245 		rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix);
2246 	}
2247 }
2248 
2249 /*
2250  * hfi1_check_mcast- Check if the given lid is
2251  * in the OPA multicast range.
2252  *
2253  * The LID might either reside in ah.dlid or might be
2254  * in the GRH of the address handle as DGID if extended
2255  * addresses are in use.
2256  */
2257 static inline bool hfi1_check_mcast(u32 lid)
2258 {
2259 	return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) &&
2260 		(lid != be32_to_cpu(OPA_LID_PERMISSIVE)));
2261 }
2262 
2263 #define opa_get_lid(lid, format)	\
2264 	__opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
2265 
2266 /* Convert a lid to a specific lid space */
2267 static inline u32 __opa_get_lid(u32 lid, u8 format)
2268 {
2269 	bool is_mcast = hfi1_check_mcast(lid);
2270 
2271 	switch (format) {
2272 	case OPA_PORT_PACKET_FORMAT_8B:
2273 	case OPA_PORT_PACKET_FORMAT_10B:
2274 		if (is_mcast)
2275 			return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2276 				0xF0000);
2277 		return lid & 0xFFFFF;
2278 	case OPA_PORT_PACKET_FORMAT_16B:
2279 		if (is_mcast)
2280 			return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2281 				0xF00000);
2282 		return lid & 0xFFFFFF;
2283 	case OPA_PORT_PACKET_FORMAT_9B:
2284 		if (is_mcast)
2285 			return (lid -
2286 				opa_get_mcast_base(OPA_MCAST_NR) +
2287 				be16_to_cpu(IB_MULTICAST_LID_BASE));
2288 		else
2289 			return lid & 0xFFFF;
2290 	default:
2291 		return lid;
2292 	}
2293 }
2294 
2295 /* Return true if the given lid is the OPA 16B multicast range */
2296 static inline bool hfi1_is_16B_mcast(u32 lid)
2297 {
2298 	return ((lid >=
2299 		opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) &&
2300 		(lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)));
2301 }
2302 
2303 static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
2304 {
2305 	const struct ib_global_route *grh = rdma_ah_read_grh(attr);
2306 	u32 dlid = rdma_ah_get_dlid(attr);
2307 
2308 	/* Modify ah_attr.dlid to be in the 32 bit LID space.
2309 	 * This is how the address will be laid out:
2310 	 * Assuming MCAST_NR to be 4,
2311 	 * 32 bit permissive LID = 0xFFFFFFFF
2312 	 * Multicast LID range = 0xFFFFFFFE to 0xF0000000
2313 	 * Unicast LID range = 0xEFFFFFFF to 1
2314 	 * Invalid LID = 0
2315 	 */
2316 	if (ib_is_opa_gid(&grh->dgid))
2317 		dlid = opa_get_lid_from_gid(&grh->dgid);
2318 	else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
2319 		 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2320 		 (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
2321 		dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
2322 			opa_get_mcast_base(OPA_MCAST_NR);
2323 	else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
2324 		dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
2325 
2326 	rdma_ah_set_dlid(attr, dlid);
2327 }
2328 
2329 static inline u8 hfi1_get_packet_type(u32 lid)
2330 {
2331 	/* 9B if lid > 0xF0000000 */
2332 	if (lid >= opa_get_mcast_base(OPA_MCAST_NR))
2333 		return HFI1_PKT_TYPE_9B;
2334 
2335 	/* 16B if lid > 0xC000 */
2336 	if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B))
2337 		return HFI1_PKT_TYPE_16B;
2338 
2339 	return HFI1_PKT_TYPE_9B;
2340 }
2341 
2342 static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
2343 {
2344 	/*
2345 	 * If there was an incoming 16B packet with permissive
2346 	 * LIDs, OPA GIDs would have been programmed when those
2347 	 * packets were received. A 16B packet will have to
2348 	 * be sent in response to that packet. Return a 16B
2349 	 * header type if that's the case.
2350 	 */
2351 	if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE))
2352 		return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ?
2353 			HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
2354 
2355 	/*
2356 	 * Return a 16B header type if either the the destination
2357 	 * or source lid is extended.
2358 	 */
2359 	if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
2360 		return HFI1_PKT_TYPE_16B;
2361 
2362 	return hfi1_get_packet_type(lid);
2363 }
2364 
2365 static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
2366 				     struct ib_grh *grh, u32 slid,
2367 				     u32 dlid)
2368 {
2369 	struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
2370 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2371 
2372 	if (!ibp)
2373 		return;
2374 
2375 	grh->hop_limit = 1;
2376 	grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2377 	if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))
2378 		grh->sgid.global.interface_id =
2379 			OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE));
2380 	else
2381 		grh->sgid.global.interface_id = OPA_MAKE_ID(slid);
2382 
2383 	/*
2384 	 * Upper layers (like mad) may compare the dgid in the
2385 	 * wc that is obtained here with the sgid_index in
2386 	 * the wr. Since sgid_index in wr is always 0 for
2387 	 * extended lids, set the dgid here to the default
2388 	 * IB gid.
2389 	 */
2390 	grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2391 	grh->dgid.global.interface_id =
2392 		cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
2393 }
2394 
2395 static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
2396 {
2397 	return -(hdr_size + payload + (SIZE_OF_CRC << 2) +
2398 		     SIZE_OF_LT) & 0x7;
2399 }
2400 
2401 static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
2402 				    u16 lrh0, u16 len,
2403 				    u16 dlid, u16 slid)
2404 {
2405 	hdr->lrh[0] = cpu_to_be16(lrh0);
2406 	hdr->lrh[1] = cpu_to_be16(dlid);
2407 	hdr->lrh[2] = cpu_to_be16(len);
2408 	hdr->lrh[3] = cpu_to_be16(slid);
2409 }
2410 
2411 static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
2412 				     u32 slid, u32 dlid,
2413 				     u16 len, u16 pkey,
2414 				     u8 becn, u8 fecn, u8 l4,
2415 				     u8 sc)
2416 {
2417 	u32 lrh0 = 0;
2418 	u32 lrh1 = 0x40000000;
2419 	u32 lrh2 = 0;
2420 	u32 lrh3 = 0;
2421 
2422 	lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT);
2423 	lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT);
2424 	lrh0 = (lrh0 & ~OPA_16B_LID_MASK)  | (slid & OPA_16B_LID_MASK);
2425 	lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT);
2426 	lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT);
2427 	lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK);
2428 	lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) |
2429 		((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
2430 	lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
2431 		((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
2432 	lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT);
2433 	lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
2434 
2435 	hdr->lrh[0] = lrh0;
2436 	hdr->lrh[1] = lrh1;
2437 	hdr->lrh[2] = lrh2;
2438 	hdr->lrh[3] = lrh3;
2439 }
2440 #endif                          /* _HFI1_KERNEL_H */
2441