xref: /openbmc/linux/drivers/infiniband/hw/hfi1/verbs.c (revision 711aab1d)
1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <linux/io.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
54 #include <linux/mm.h>
55 #include <linux/vmalloc.h>
56 #include <rdma/opa_addr.h>
57 
58 #include "hfi.h"
59 #include "common.h"
60 #include "device.h"
61 #include "trace.h"
62 #include "qp.h"
63 #include "verbs_txreq.h"
64 #include "debugfs.h"
65 #include "vnic.h"
66 
67 static unsigned int hfi1_lkey_table_size = 16;
68 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
69 		   S_IRUGO);
70 MODULE_PARM_DESC(lkey_table_size,
71 		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
72 
73 static unsigned int hfi1_max_pds = 0xFFFF;
74 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_pds,
76 		 "Maximum number of protection domains to support");
77 
78 static unsigned int hfi1_max_ahs = 0xFFFF;
79 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
80 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
81 
82 unsigned int hfi1_max_cqes = 0x2FFFFF;
83 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_cqes,
85 		 "Maximum number of completion queue entries to support");
86 
87 unsigned int hfi1_max_cqs = 0x1FFFF;
88 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
89 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
90 
91 unsigned int hfi1_max_qp_wrs = 0x3FFF;
92 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
93 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
94 
95 unsigned int hfi1_max_qps = 32768;
96 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
97 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
98 
99 unsigned int hfi1_max_sges = 0x60;
100 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
101 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
102 
103 unsigned int hfi1_max_mcast_grps = 16384;
104 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
105 MODULE_PARM_DESC(max_mcast_grps,
106 		 "Maximum number of multicast groups to support");
107 
108 unsigned int hfi1_max_mcast_qp_attached = 16;
109 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
110 		   uint, S_IRUGO);
111 MODULE_PARM_DESC(max_mcast_qp_attached,
112 		 "Maximum number of attached QPs to support");
113 
114 unsigned int hfi1_max_srqs = 1024;
115 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
116 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
117 
118 unsigned int hfi1_max_srq_sges = 128;
119 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
120 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
121 
122 unsigned int hfi1_max_srq_wrs = 0x1FFFF;
123 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
124 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
125 
126 unsigned short piothreshold = 256;
127 module_param(piothreshold, ushort, S_IRUGO);
128 MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
129 
130 #define COPY_CACHELESS 1
131 #define COPY_ADAPTIVE  2
132 static unsigned int sge_copy_mode;
133 module_param(sge_copy_mode, uint, S_IRUGO);
134 MODULE_PARM_DESC(sge_copy_mode,
135 		 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
136 
137 static void verbs_sdma_complete(
138 	struct sdma_txreq *cookie,
139 	int status);
140 
141 static int pio_wait(struct rvt_qp *qp,
142 		    struct send_context *sc,
143 		    struct hfi1_pkt_state *ps,
144 		    u32 flag);
145 
146 /* Length of buffer to create verbs txreq cache name */
147 #define TXREQ_NAME_LEN 24
148 
149 static uint wss_threshold;
150 module_param(wss_threshold, uint, S_IRUGO);
151 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
152 static uint wss_clean_period = 256;
153 module_param(wss_clean_period, uint, S_IRUGO);
154 MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
155 
156 /* memory working set size */
157 struct hfi1_wss {
158 	unsigned long *entries;
159 	atomic_t total_count;
160 	atomic_t clean_counter;
161 	atomic_t clean_entry;
162 
163 	int threshold;
164 	int num_entries;
165 	long pages_mask;
166 };
167 
168 static struct hfi1_wss wss;
169 
170 int hfi1_wss_init(void)
171 {
172 	long llc_size;
173 	long llc_bits;
174 	long table_size;
175 	long table_bits;
176 
177 	/* check for a valid percent range - default to 80 if none or invalid */
178 	if (wss_threshold < 1 || wss_threshold > 100)
179 		wss_threshold = 80;
180 	/* reject a wildly large period */
181 	if (wss_clean_period > 1000000)
182 		wss_clean_period = 256;
183 	/* reject a zero period */
184 	if (wss_clean_period == 0)
185 		wss_clean_period = 1;
186 
187 	/*
188 	 * Calculate the table size - the next power of 2 larger than the
189 	 * LLC size.  LLC size is in KiB.
190 	 */
191 	llc_size = wss_llc_size() * 1024;
192 	table_size = roundup_pow_of_two(llc_size);
193 
194 	/* one bit per page in rounded up table */
195 	llc_bits = llc_size / PAGE_SIZE;
196 	table_bits = table_size / PAGE_SIZE;
197 	wss.pages_mask = table_bits - 1;
198 	wss.num_entries = table_bits / BITS_PER_LONG;
199 
200 	wss.threshold = (llc_bits * wss_threshold) / 100;
201 	if (wss.threshold == 0)
202 		wss.threshold = 1;
203 
204 	atomic_set(&wss.clean_counter, wss_clean_period);
205 
206 	wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
207 			      GFP_KERNEL);
208 	if (!wss.entries) {
209 		hfi1_wss_exit();
210 		return -ENOMEM;
211 	}
212 
213 	return 0;
214 }
215 
216 void hfi1_wss_exit(void)
217 {
218 	/* coded to handle partially initialized and repeat callers */
219 	kfree(wss.entries);
220 	wss.entries = NULL;
221 }
222 
223 /*
224  * Advance the clean counter.  When the clean period has expired,
225  * clean an entry.
226  *
227  * This is implemented in atomics to avoid locking.  Because multiple
228  * variables are involved, it can be racy which can lead to slightly
229  * inaccurate information.  Since this is only a heuristic, this is
230  * OK.  Any innaccuracies will clean themselves out as the counter
231  * advances.  That said, it is unlikely the entry clean operation will
232  * race - the next possible racer will not start until the next clean
233  * period.
234  *
235  * The clean counter is implemented as a decrement to zero.  When zero
236  * is reached an entry is cleaned.
237  */
238 static void wss_advance_clean_counter(void)
239 {
240 	int entry;
241 	int weight;
242 	unsigned long bits;
243 
244 	/* become the cleaner if we decrement the counter to zero */
245 	if (atomic_dec_and_test(&wss.clean_counter)) {
246 		/*
247 		 * Set, not add, the clean period.  This avoids an issue
248 		 * where the counter could decrement below the clean period.
249 		 * Doing a set can result in lost decrements, slowing the
250 		 * clean advance.  Since this a heuristic, this possible
251 		 * slowdown is OK.
252 		 *
253 		 * An alternative is to loop, advancing the counter by a
254 		 * clean period until the result is > 0. However, this could
255 		 * lead to several threads keeping another in the clean loop.
256 		 * This could be mitigated by limiting the number of times
257 		 * we stay in the loop.
258 		 */
259 		atomic_set(&wss.clean_counter, wss_clean_period);
260 
261 		/*
262 		 * Uniquely grab the entry to clean and move to next.
263 		 * The current entry is always the lower bits of
264 		 * wss.clean_entry.  The table size, wss.num_entries,
265 		 * is always a power-of-2.
266 		 */
267 		entry = (atomic_inc_return(&wss.clean_entry) - 1)
268 			& (wss.num_entries - 1);
269 
270 		/* clear the entry and count the bits */
271 		bits = xchg(&wss.entries[entry], 0);
272 		weight = hweight64((u64)bits);
273 		/* only adjust the contended total count if needed */
274 		if (weight)
275 			atomic_sub(weight, &wss.total_count);
276 	}
277 }
278 
279 /*
280  * Insert the given address into the working set array.
281  */
282 static void wss_insert(void *address)
283 {
284 	u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
285 	u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
286 	u32 nr = page & (BITS_PER_LONG - 1);
287 
288 	if (!test_and_set_bit(nr, &wss.entries[entry]))
289 		atomic_inc(&wss.total_count);
290 
291 	wss_advance_clean_counter();
292 }
293 
294 /*
295  * Is the working set larger than the threshold?
296  */
297 static inline bool wss_exceeds_threshold(void)
298 {
299 	return atomic_read(&wss.total_count) >= wss.threshold;
300 }
301 
302 /*
303  * Translate ib_wr_opcode into ib_wc_opcode.
304  */
305 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
306 	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
307 	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
308 	[IB_WR_SEND] = IB_WC_SEND,
309 	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
310 	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
311 	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
312 	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
313 	[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
314 	[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
315 	[IB_WR_REG_MR] = IB_WC_REG_MR
316 };
317 
318 /*
319  * Length of header by opcode, 0 --> not supported
320  */
321 const u8 hdr_len_by_opcode[256] = {
322 	/* RC */
323 	[IB_OPCODE_RC_SEND_FIRST]                     = 12 + 8,
324 	[IB_OPCODE_RC_SEND_MIDDLE]                    = 12 + 8,
325 	[IB_OPCODE_RC_SEND_LAST]                      = 12 + 8,
326 	[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE]       = 12 + 8 + 4,
327 	[IB_OPCODE_RC_SEND_ONLY]                      = 12 + 8,
328 	[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 4,
329 	[IB_OPCODE_RC_RDMA_WRITE_FIRST]               = 12 + 8 + 16,
330 	[IB_OPCODE_RC_RDMA_WRITE_MIDDLE]              = 12 + 8,
331 	[IB_OPCODE_RC_RDMA_WRITE_LAST]                = 12 + 8,
332 	[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
333 	[IB_OPCODE_RC_RDMA_WRITE_ONLY]                = 12 + 8 + 16,
334 	[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
335 	[IB_OPCODE_RC_RDMA_READ_REQUEST]              = 12 + 8 + 16,
336 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST]       = 12 + 8 + 4,
337 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE]      = 12 + 8,
338 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST]        = 12 + 8 + 4,
339 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY]        = 12 + 8 + 4,
340 	[IB_OPCODE_RC_ACKNOWLEDGE]                    = 12 + 8 + 4,
341 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = 12 + 8 + 4 + 8,
342 	[IB_OPCODE_RC_COMPARE_SWAP]                   = 12 + 8 + 28,
343 	[IB_OPCODE_RC_FETCH_ADD]                      = 12 + 8 + 28,
344 	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = 12 + 8 + 4,
345 	[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = 12 + 8 + 4,
346 	/* UC */
347 	[IB_OPCODE_UC_SEND_FIRST]                     = 12 + 8,
348 	[IB_OPCODE_UC_SEND_MIDDLE]                    = 12 + 8,
349 	[IB_OPCODE_UC_SEND_LAST]                      = 12 + 8,
350 	[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE]       = 12 + 8 + 4,
351 	[IB_OPCODE_UC_SEND_ONLY]                      = 12 + 8,
352 	[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 4,
353 	[IB_OPCODE_UC_RDMA_WRITE_FIRST]               = 12 + 8 + 16,
354 	[IB_OPCODE_UC_RDMA_WRITE_MIDDLE]              = 12 + 8,
355 	[IB_OPCODE_UC_RDMA_WRITE_LAST]                = 12 + 8,
356 	[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
357 	[IB_OPCODE_UC_RDMA_WRITE_ONLY]                = 12 + 8 + 16,
358 	[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
359 	/* UD */
360 	[IB_OPCODE_UD_SEND_ONLY]                      = 12 + 8 + 8,
361 	[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE]       = 12 + 8 + 12
362 };
363 
364 static const opcode_handler opcode_handler_tbl[256] = {
365 	/* RC */
366 	[IB_OPCODE_RC_SEND_FIRST]                     = &hfi1_rc_rcv,
367 	[IB_OPCODE_RC_SEND_MIDDLE]                    = &hfi1_rc_rcv,
368 	[IB_OPCODE_RC_SEND_LAST]                      = &hfi1_rc_rcv,
369 	[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE]       = &hfi1_rc_rcv,
370 	[IB_OPCODE_RC_SEND_ONLY]                      = &hfi1_rc_rcv,
371 	[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_rc_rcv,
372 	[IB_OPCODE_RC_RDMA_WRITE_FIRST]               = &hfi1_rc_rcv,
373 	[IB_OPCODE_RC_RDMA_WRITE_MIDDLE]              = &hfi1_rc_rcv,
374 	[IB_OPCODE_RC_RDMA_WRITE_LAST]                = &hfi1_rc_rcv,
375 	[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
376 	[IB_OPCODE_RC_RDMA_WRITE_ONLY]                = &hfi1_rc_rcv,
377 	[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
378 	[IB_OPCODE_RC_RDMA_READ_REQUEST]              = &hfi1_rc_rcv,
379 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST]       = &hfi1_rc_rcv,
380 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE]      = &hfi1_rc_rcv,
381 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST]        = &hfi1_rc_rcv,
382 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY]        = &hfi1_rc_rcv,
383 	[IB_OPCODE_RC_ACKNOWLEDGE]                    = &hfi1_rc_rcv,
384 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]             = &hfi1_rc_rcv,
385 	[IB_OPCODE_RC_COMPARE_SWAP]                   = &hfi1_rc_rcv,
386 	[IB_OPCODE_RC_FETCH_ADD]                      = &hfi1_rc_rcv,
387 	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]      = &hfi1_rc_rcv,
388 	[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE]      = &hfi1_rc_rcv,
389 	/* UC */
390 	[IB_OPCODE_UC_SEND_FIRST]                     = &hfi1_uc_rcv,
391 	[IB_OPCODE_UC_SEND_MIDDLE]                    = &hfi1_uc_rcv,
392 	[IB_OPCODE_UC_SEND_LAST]                      = &hfi1_uc_rcv,
393 	[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE]       = &hfi1_uc_rcv,
394 	[IB_OPCODE_UC_SEND_ONLY]                      = &hfi1_uc_rcv,
395 	[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_uc_rcv,
396 	[IB_OPCODE_UC_RDMA_WRITE_FIRST]               = &hfi1_uc_rcv,
397 	[IB_OPCODE_UC_RDMA_WRITE_MIDDLE]              = &hfi1_uc_rcv,
398 	[IB_OPCODE_UC_RDMA_WRITE_LAST]                = &hfi1_uc_rcv,
399 	[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
400 	[IB_OPCODE_UC_RDMA_WRITE_ONLY]                = &hfi1_uc_rcv,
401 	[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
402 	/* UD */
403 	[IB_OPCODE_UD_SEND_ONLY]                      = &hfi1_ud_rcv,
404 	[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE]       = &hfi1_ud_rcv,
405 	/* CNP */
406 	[IB_OPCODE_CNP]				      = &hfi1_cnp_rcv
407 };
408 
409 #define OPMASK 0x1f
410 
411 static const u32 pio_opmask[BIT(3)] = {
412 	/* RC */
413 	[IB_OPCODE_RC >> 5] =
414 		BIT(RC_OP(SEND_ONLY) & OPMASK) |
415 		BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
416 		BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
417 		BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
418 		BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
419 		BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
420 		BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
421 		BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
422 		BIT(RC_OP(FETCH_ADD) & OPMASK),
423 	/* UC */
424 	[IB_OPCODE_UC >> 5] =
425 		BIT(UC_OP(SEND_ONLY) & OPMASK) |
426 		BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
427 		BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
428 		BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
429 };
430 
431 /*
432  * System image GUID.
433  */
434 __be64 ib_hfi1_sys_image_guid;
435 
436 /**
437  * hfi1_copy_sge - copy data to SGE memory
438  * @ss: the SGE state
439  * @data: the data to copy
440  * @length: the length of the data
441  * @release: boolean to release MR
442  * @copy_last: do a separate copy of the last 8 bytes
443  */
444 void hfi1_copy_sge(
445 	struct rvt_sge_state *ss,
446 	void *data, u32 length,
447 	bool release,
448 	bool copy_last)
449 {
450 	struct rvt_sge *sge = &ss->sge;
451 	int i;
452 	bool in_last = false;
453 	bool cacheless_copy = false;
454 
455 	if (sge_copy_mode == COPY_CACHELESS) {
456 		cacheless_copy = length >= PAGE_SIZE;
457 	} else if (sge_copy_mode == COPY_ADAPTIVE) {
458 		if (length >= PAGE_SIZE) {
459 			/*
460 			 * NOTE: this *assumes*:
461 			 * o The first vaddr is the dest.
462 			 * o If multiple pages, then vaddr is sequential.
463 			 */
464 			wss_insert(sge->vaddr);
465 			if (length >= (2 * PAGE_SIZE))
466 				wss_insert(sge->vaddr + PAGE_SIZE);
467 
468 			cacheless_copy = wss_exceeds_threshold();
469 		} else {
470 			wss_advance_clean_counter();
471 		}
472 	}
473 	if (copy_last) {
474 		if (length > 8) {
475 			length -= 8;
476 		} else {
477 			copy_last = false;
478 			in_last = true;
479 		}
480 	}
481 
482 again:
483 	while (length) {
484 		u32 len = rvt_get_sge_length(sge, length);
485 
486 		WARN_ON_ONCE(len == 0);
487 		if (unlikely(in_last)) {
488 			/* enforce byte transfer ordering */
489 			for (i = 0; i < len; i++)
490 				((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
491 		} else if (cacheless_copy) {
492 			cacheless_memcpy(sge->vaddr, data, len);
493 		} else {
494 			memcpy(sge->vaddr, data, len);
495 		}
496 		rvt_update_sge(ss, len, release);
497 		data += len;
498 		length -= len;
499 	}
500 
501 	if (copy_last) {
502 		copy_last = false;
503 		in_last = true;
504 		length = 8;
505 		goto again;
506 	}
507 }
508 
509 /*
510  * Make sure the QP is ready and able to accept the given opcode.
511  */
512 static inline opcode_handler qp_ok(struct hfi1_packet *packet)
513 {
514 	if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
515 		return NULL;
516 	if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
517 	     packet->qp->allowed_ops) ||
518 	    (packet->opcode == IB_OPCODE_CNP))
519 		return opcode_handler_tbl[packet->opcode];
520 
521 	return NULL;
522 }
523 
524 static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
525 {
526 #ifdef CONFIG_FAULT_INJECTION
527 	if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP)
528 		/*
529 		 * In order to drop non-IB traffic we
530 		 * set PbcInsertHrc to NONE (0x2).
531 		 * The packet will still be delivered
532 		 * to the receiving node but a
533 		 * KHdrHCRCErr (KDETH packet with a bad
534 		 * HCRC) will be triggered and the
535 		 * packet will not be delivered to the
536 		 * correct context.
537 		 */
538 		pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
539 	else
540 		/*
541 		 * In order to drop regular verbs
542 		 * traffic we set the PbcTestEbp
543 		 * flag. The packet will still be
544 		 * delivered to the receiving node but
545 		 * a 'late ebp error' will be
546 		 * triggered and will be dropped.
547 		 */
548 		pbc |= PBC_TEST_EBP;
549 #endif
550 	return pbc;
551 }
552 
553 static int hfi1_do_pkey_check(struct hfi1_packet *packet)
554 {
555 	struct hfi1_ctxtdata *rcd = packet->rcd;
556 	struct hfi1_pportdata *ppd = rcd->ppd;
557 	struct hfi1_16b_header *hdr = packet->hdr;
558 	u16 pkey;
559 
560 	/* Pkey check needed only for bypass packets */
561 	if (packet->etype != RHF_RCV_TYPE_BYPASS)
562 		return 0;
563 
564 	/* Perform pkey check */
565 	pkey = hfi1_16B_get_pkey(hdr);
566 	return ingress_pkey_check(ppd, pkey, packet->sc,
567 				  packet->qp->s_pkey_index,
568 				  packet->slid, true);
569 }
570 
571 static inline void hfi1_handle_packet(struct hfi1_packet *packet,
572 				      bool is_mcast)
573 {
574 	u32 qp_num;
575 	struct hfi1_ctxtdata *rcd = packet->rcd;
576 	struct hfi1_pportdata *ppd = rcd->ppd;
577 	struct hfi1_ibport *ibp = rcd_to_iport(rcd);
578 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
579 	opcode_handler packet_handler;
580 	unsigned long flags;
581 
582 	inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
583 
584 	if (unlikely(is_mcast)) {
585 		struct rvt_mcast *mcast;
586 		struct rvt_mcast_qp *p;
587 
588 		if (!packet->grh)
589 			goto drop;
590 		mcast = rvt_mcast_find(&ibp->rvp,
591 				       &packet->grh->dgid,
592 				       opa_get_lid(packet->dlid, 9B));
593 		if (!mcast)
594 			goto drop;
595 		list_for_each_entry_rcu(p, &mcast->qp_list, list) {
596 			packet->qp = p->qp;
597 			if (hfi1_do_pkey_check(packet))
598 				goto drop;
599 			spin_lock_irqsave(&packet->qp->r_lock, flags);
600 			packet_handler = qp_ok(packet);
601 			if (likely(packet_handler))
602 				packet_handler(packet);
603 			else
604 				ibp->rvp.n_pkt_drops++;
605 			spin_unlock_irqrestore(&packet->qp->r_lock, flags);
606 		}
607 		/*
608 		 * Notify rvt_multicast_detach() if it is waiting for us
609 		 * to finish.
610 		 */
611 		if (atomic_dec_return(&mcast->refcount) <= 1)
612 			wake_up(&mcast->wait);
613 	} else {
614 		/* Get the destination QP number. */
615 		qp_num = ib_bth_get_qpn(packet->ohdr);
616 		rcu_read_lock();
617 		packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
618 		if (!packet->qp)
619 			goto unlock_drop;
620 
621 		if (hfi1_do_pkey_check(packet))
622 			goto unlock_drop;
623 
624 		if (unlikely(hfi1_dbg_fault_opcode(packet->qp, packet->opcode,
625 						   true)))
626 			goto unlock_drop;
627 
628 		spin_lock_irqsave(&packet->qp->r_lock, flags);
629 		packet_handler = qp_ok(packet);
630 		if (likely(packet_handler))
631 			packet_handler(packet);
632 		else
633 			ibp->rvp.n_pkt_drops++;
634 		spin_unlock_irqrestore(&packet->qp->r_lock, flags);
635 		rcu_read_unlock();
636 	}
637 	return;
638 unlock_drop:
639 	rcu_read_unlock();
640 drop:
641 	ibp->rvp.n_pkt_drops++;
642 }
643 
644 /**
645  * hfi1_ib_rcv - process an incoming packet
646  * @packet: data packet information
647  *
648  * This is called to process an incoming packet at interrupt level.
649  */
650 void hfi1_ib_rcv(struct hfi1_packet *packet)
651 {
652 	struct hfi1_ctxtdata *rcd = packet->rcd;
653 
654 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
655 	hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
656 }
657 
658 void hfi1_16B_rcv(struct hfi1_packet *packet)
659 {
660 	struct hfi1_ctxtdata *rcd = packet->rcd;
661 
662 	trace_input_ibhdr(rcd->dd, packet, false);
663 	hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
664 }
665 
666 /*
667  * This is called from a timer to check for QPs
668  * which need kernel memory in order to send a packet.
669  */
670 static void mem_timer(unsigned long data)
671 {
672 	struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
673 	struct list_head *list = &dev->memwait;
674 	struct rvt_qp *qp = NULL;
675 	struct iowait *wait;
676 	unsigned long flags;
677 	struct hfi1_qp_priv *priv;
678 
679 	write_seqlock_irqsave(&dev->iowait_lock, flags);
680 	if (!list_empty(list)) {
681 		wait = list_first_entry(list, struct iowait, list);
682 		qp = iowait_to_qp(wait);
683 		priv = qp->priv;
684 		list_del_init(&priv->s_iowait.list);
685 		priv->s_iowait.lock = NULL;
686 		/* refcount held until actual wake up */
687 		if (!list_empty(list))
688 			mod_timer(&dev->mem_timer, jiffies + 1);
689 	}
690 	write_sequnlock_irqrestore(&dev->iowait_lock, flags);
691 
692 	if (qp)
693 		hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
694 }
695 
696 /*
697  * This is called with progress side lock held.
698  */
699 /* New API */
700 static void verbs_sdma_complete(
701 	struct sdma_txreq *cookie,
702 	int status)
703 {
704 	struct verbs_txreq *tx =
705 		container_of(cookie, struct verbs_txreq, txreq);
706 	struct rvt_qp *qp = tx->qp;
707 
708 	spin_lock(&qp->s_lock);
709 	if (tx->wqe) {
710 		hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
711 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
712 		struct hfi1_opa_header *hdr;
713 
714 		hdr = &tx->phdr.hdr;
715 		hfi1_rc_send_complete(qp, hdr);
716 	}
717 	spin_unlock(&qp->s_lock);
718 
719 	hfi1_put_txreq(tx);
720 }
721 
722 static int wait_kmem(struct hfi1_ibdev *dev,
723 		     struct rvt_qp *qp,
724 		     struct hfi1_pkt_state *ps)
725 {
726 	struct hfi1_qp_priv *priv = qp->priv;
727 	unsigned long flags;
728 	int ret = 0;
729 
730 	spin_lock_irqsave(&qp->s_lock, flags);
731 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
732 		write_seqlock(&dev->iowait_lock);
733 		list_add_tail(&ps->s_txreq->txreq.list,
734 			      &priv->s_iowait.tx_head);
735 		if (list_empty(&priv->s_iowait.list)) {
736 			if (list_empty(&dev->memwait))
737 				mod_timer(&dev->mem_timer, jiffies + 1);
738 			qp->s_flags |= RVT_S_WAIT_KMEM;
739 			list_add_tail(&priv->s_iowait.list, &dev->memwait);
740 			priv->s_iowait.lock = &dev->iowait_lock;
741 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
742 			rvt_get_qp(qp);
743 		}
744 		write_sequnlock(&dev->iowait_lock);
745 		qp->s_flags &= ~RVT_S_BUSY;
746 		ret = -EBUSY;
747 	}
748 	spin_unlock_irqrestore(&qp->s_lock, flags);
749 
750 	return ret;
751 }
752 
753 /*
754  * This routine calls txadds for each sg entry.
755  *
756  * Add failures will revert the sge cursor
757  */
758 static noinline int build_verbs_ulp_payload(
759 	struct sdma_engine *sde,
760 	u32 length,
761 	struct verbs_txreq *tx)
762 {
763 	struct rvt_sge_state *ss = tx->ss;
764 	struct rvt_sge *sg_list = ss->sg_list;
765 	struct rvt_sge sge = ss->sge;
766 	u8 num_sge = ss->num_sge;
767 	u32 len;
768 	int ret = 0;
769 
770 	while (length) {
771 		len = ss->sge.length;
772 		if (len > length)
773 			len = length;
774 		if (len > ss->sge.sge_length)
775 			len = ss->sge.sge_length;
776 		WARN_ON_ONCE(len == 0);
777 		ret = sdma_txadd_kvaddr(
778 			sde->dd,
779 			&tx->txreq,
780 			ss->sge.vaddr,
781 			len);
782 		if (ret)
783 			goto bail_txadd;
784 		rvt_update_sge(ss, len, false);
785 		length -= len;
786 	}
787 	return ret;
788 bail_txadd:
789 	/* unwind cursor */
790 	ss->sge = sge;
791 	ss->num_sge = num_sge;
792 	ss->sg_list = sg_list;
793 	return ret;
794 }
795 
796 /*
797  * Build the number of DMA descriptors needed to send length bytes of data.
798  *
799  * NOTE: DMA mapping is held in the tx until completed in the ring or
800  *       the tx desc is freed without having been submitted to the ring
801  *
802  * This routine ensures all the helper routine calls succeed.
803  */
804 /* New API */
805 static int build_verbs_tx_desc(
806 	struct sdma_engine *sde,
807 	u32 length,
808 	struct verbs_txreq *tx,
809 	struct hfi1_ahg_info *ahg_info,
810 	u64 pbc)
811 {
812 	int ret = 0;
813 	struct hfi1_sdma_header *phdr = &tx->phdr;
814 	u16 hdrbytes = tx->hdr_dwords << 2;
815 	u32 *hdr;
816 	u8 extra_bytes = 0;
817 	static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
818 
819 	if (tx->phdr.hdr.hdr_type) {
820 		/*
821 		 * hdrbytes accounts for PBC. Need to subtract 8 bytes
822 		 * before calculating padding.
823 		 */
824 		extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
825 			      (SIZE_OF_CRC << 2) + SIZE_OF_LT;
826 		hdr = (u32 *)&phdr->hdr.opah;
827 	} else {
828 		hdr = (u32 *)&phdr->hdr.ibh;
829 	}
830 	if (!ahg_info->ahgcount) {
831 		ret = sdma_txinit_ahg(
832 			&tx->txreq,
833 			ahg_info->tx_flags,
834 			hdrbytes + length +
835 			extra_bytes,
836 			ahg_info->ahgidx,
837 			0,
838 			NULL,
839 			0,
840 			verbs_sdma_complete);
841 		if (ret)
842 			goto bail_txadd;
843 		phdr->pbc = cpu_to_le64(pbc);
844 		ret = sdma_txadd_kvaddr(
845 			sde->dd,
846 			&tx->txreq,
847 			phdr,
848 			hdrbytes);
849 		if (ret)
850 			goto bail_txadd;
851 	} else {
852 		ret = sdma_txinit_ahg(
853 			&tx->txreq,
854 			ahg_info->tx_flags,
855 			length,
856 			ahg_info->ahgidx,
857 			ahg_info->ahgcount,
858 			ahg_info->ahgdesc,
859 			hdrbytes,
860 			verbs_sdma_complete);
861 		if (ret)
862 			goto bail_txadd;
863 	}
864 	/* add the ulp payload - if any. tx->ss can be NULL for acks */
865 	if (tx->ss) {
866 		ret = build_verbs_ulp_payload(sde, length, tx);
867 		if (ret)
868 			goto bail_txadd;
869 	}
870 
871 	/* add icrc, lt byte, and padding to flit */
872 	if (extra_bytes != 0)
873 		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
874 					trail_buf, extra_bytes);
875 
876 bail_txadd:
877 	return ret;
878 }
879 
880 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
881 			u64 pbc)
882 {
883 	struct hfi1_qp_priv *priv = qp->priv;
884 	struct hfi1_ahg_info *ahg_info = priv->s_ahg;
885 	u32 hdrwords = qp->s_hdrwords;
886 	u32 len = ps->s_txreq->s_cur_size;
887 	u32 plen;
888 	struct hfi1_ibdev *dev = ps->dev;
889 	struct hfi1_pportdata *ppd = ps->ppd;
890 	struct verbs_txreq *tx;
891 	u8 sc5 = priv->s_sc;
892 	int ret;
893 	u32 dwords;
894 	bool bypass = false;
895 
896 	if (ps->s_txreq->phdr.hdr.hdr_type) {
897 		u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
898 
899 		dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
900 			  SIZE_OF_LT) >> 2;
901 		bypass = true;
902 	} else {
903 		dwords = (len + 3) >> 2;
904 	}
905 	plen = hdrwords + dwords + 2;
906 
907 	tx = ps->s_txreq;
908 	if (!sdma_txreq_built(&tx->txreq)) {
909 		if (likely(pbc == 0)) {
910 			u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
911 
912 			/* No vl15 here */
913 			/* set PBC_DC_INFO bit (aka SC[4]) in pbc */
914 			if (ps->s_txreq->phdr.hdr.hdr_type)
915 				pbc |= PBC_PACKET_BYPASS |
916 				       PBC_INSERT_BYPASS_ICRC;
917 			else
918 				pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
919 
920 			if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode,
921 							   false)))
922 				pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
923 			pbc = create_pbc(ppd,
924 					 pbc,
925 					 qp->srate_mbps,
926 					 vl,
927 					 plen);
928 		}
929 		tx->wqe = qp->s_wqe;
930 		ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
931 		if (unlikely(ret))
932 			goto bail_build;
933 	}
934 	ret =  sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
935 			       ps->pkts_sent);
936 	if (unlikely(ret < 0)) {
937 		if (ret == -ECOMM)
938 			goto bail_ecomm;
939 		return ret;
940 	}
941 	trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
942 				&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
943 	return ret;
944 
945 bail_ecomm:
946 	/* The current one got "sent" */
947 	return 0;
948 bail_build:
949 	ret = wait_kmem(dev, qp, ps);
950 	if (!ret) {
951 		/* free txreq - bad state */
952 		hfi1_put_txreq(ps->s_txreq);
953 		ps->s_txreq = NULL;
954 	}
955 	return ret;
956 }
957 
958 /*
959  * If we are now in the error state, return zero to flush the
960  * send work request.
961  */
962 static int pio_wait(struct rvt_qp *qp,
963 		    struct send_context *sc,
964 		    struct hfi1_pkt_state *ps,
965 		    u32 flag)
966 {
967 	struct hfi1_qp_priv *priv = qp->priv;
968 	struct hfi1_devdata *dd = sc->dd;
969 	struct hfi1_ibdev *dev = &dd->verbs_dev;
970 	unsigned long flags;
971 	int ret = 0;
972 
973 	/*
974 	 * Note that as soon as want_buffer() is called and
975 	 * possibly before it returns, sc_piobufavail()
976 	 * could be called. Therefore, put QP on the I/O wait list before
977 	 * enabling the PIO avail interrupt.
978 	 */
979 	spin_lock_irqsave(&qp->s_lock, flags);
980 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
981 		write_seqlock(&dev->iowait_lock);
982 		list_add_tail(&ps->s_txreq->txreq.list,
983 			      &priv->s_iowait.tx_head);
984 		if (list_empty(&priv->s_iowait.list)) {
985 			struct hfi1_ibdev *dev = &dd->verbs_dev;
986 			int was_empty;
987 
988 			dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
989 			dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
990 			qp->s_flags |= flag;
991 			was_empty = list_empty(&sc->piowait);
992 			iowait_queue(ps->pkts_sent, &priv->s_iowait,
993 				     &sc->piowait);
994 			priv->s_iowait.lock = &dev->iowait_lock;
995 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
996 			rvt_get_qp(qp);
997 			/* counting: only call wantpiobuf_intr if first user */
998 			if (was_empty)
999 				hfi1_sc_wantpiobuf_intr(sc, 1);
1000 		}
1001 		write_sequnlock(&dev->iowait_lock);
1002 		qp->s_flags &= ~RVT_S_BUSY;
1003 		ret = -EBUSY;
1004 	}
1005 	spin_unlock_irqrestore(&qp->s_lock, flags);
1006 	return ret;
1007 }
1008 
1009 static void verbs_pio_complete(void *arg, int code)
1010 {
1011 	struct rvt_qp *qp = (struct rvt_qp *)arg;
1012 	struct hfi1_qp_priv *priv = qp->priv;
1013 
1014 	if (iowait_pio_dec(&priv->s_iowait))
1015 		iowait_drain_wakeup(&priv->s_iowait);
1016 }
1017 
1018 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1019 			u64 pbc)
1020 {
1021 	struct hfi1_qp_priv *priv = qp->priv;
1022 	u32 hdrwords = qp->s_hdrwords;
1023 	struct rvt_sge_state *ss = ps->s_txreq->ss;
1024 	u32 len = ps->s_txreq->s_cur_size;
1025 	u32 dwords;
1026 	u32 plen;
1027 	struct hfi1_pportdata *ppd = ps->ppd;
1028 	u32 *hdr;
1029 	u8 sc5;
1030 	unsigned long flags = 0;
1031 	struct send_context *sc;
1032 	struct pio_buf *pbuf;
1033 	int wc_status = IB_WC_SUCCESS;
1034 	int ret = 0;
1035 	pio_release_cb cb = NULL;
1036 	u32 lrh0_16b;
1037 	bool bypass = false;
1038 	u8 extra_bytes = 0;
1039 
1040 	if (ps->s_txreq->phdr.hdr.hdr_type) {
1041 		u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
1042 
1043 		extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
1044 		dwords = (len + extra_bytes) >> 2;
1045 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
1046 		lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
1047 		bypass = true;
1048 	} else {
1049 		dwords = (len + 3) >> 2;
1050 		hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
1051 	}
1052 	plen = hdrwords + dwords + 2;
1053 
1054 	/* only RC/UC use complete */
1055 	switch (qp->ibqp.qp_type) {
1056 	case IB_QPT_RC:
1057 	case IB_QPT_UC:
1058 		cb = verbs_pio_complete;
1059 		break;
1060 	default:
1061 		break;
1062 	}
1063 
1064 	/* vl15 special case taken care of in ud.c */
1065 	sc5 = priv->s_sc;
1066 	sc = ps->s_txreq->psc;
1067 
1068 	if (likely(pbc == 0)) {
1069 		u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
1070 
1071 		/* set PBC_DC_INFO bit (aka SC[4]) in pbc */
1072 		if (ps->s_txreq->phdr.hdr.hdr_type)
1073 			pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1074 		else
1075 			pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
1076 		if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode, false)))
1077 			pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
1078 		pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
1079 	}
1080 	if (cb)
1081 		iowait_pio_inc(&priv->s_iowait);
1082 	pbuf = sc_buffer_alloc(sc, plen, cb, qp);
1083 	if (unlikely(!pbuf)) {
1084 		if (cb)
1085 			verbs_pio_complete(qp, 0);
1086 		if (ppd->host_link_state != HLS_UP_ACTIVE) {
1087 			/*
1088 			 * If we have filled the PIO buffers to capacity and are
1089 			 * not in an active state this request is not going to
1090 			 * go out to so just complete it with an error or else a
1091 			 * ULP or the core may be stuck waiting.
1092 			 */
1093 			hfi1_cdbg(
1094 				PIO,
1095 				"alloc failed. state not active, completing");
1096 			wc_status = IB_WC_GENERAL_ERR;
1097 			goto pio_bail;
1098 		} else {
1099 			/*
1100 			 * This is a normal occurrence. The PIO buffs are full
1101 			 * up but we are still happily sending, well we could be
1102 			 * so lets continue to queue the request.
1103 			 */
1104 			hfi1_cdbg(PIO, "alloc failed. state active, queuing");
1105 			ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
1106 			if (!ret)
1107 				/* txreq not queued - free */
1108 				goto bail;
1109 			/* tx consumed in wait */
1110 			return ret;
1111 		}
1112 	}
1113 
1114 	if (dwords == 0) {
1115 		pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1116 	} else {
1117 		seg_pio_copy_start(pbuf, pbc,
1118 				   hdr, hdrwords * 4);
1119 		if (ss) {
1120 			while (len) {
1121 				void *addr = ss->sge.vaddr;
1122 				u32 slen = ss->sge.length;
1123 
1124 				if (slen > len)
1125 					slen = len;
1126 				rvt_update_sge(ss, slen, false);
1127 				seg_pio_copy_mid(pbuf, addr, slen);
1128 				len -= slen;
1129 			}
1130 		}
1131 		/*
1132 		 * Bypass packet will need to copy additional
1133 		 * bytes to accommodate for CRC and LT bytes
1134 		 */
1135 		if (extra_bytes) {
1136 			u8 *empty_buf;
1137 
1138 			empty_buf = kcalloc(extra_bytes, sizeof(u8),
1139 					    GFP_KERNEL);
1140 			seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
1141 			kfree(empty_buf);
1142 		}
1143 		seg_pio_copy_end(pbuf);
1144 	}
1145 
1146 	trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1147 			       &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
1148 
1149 pio_bail:
1150 	if (qp->s_wqe) {
1151 		spin_lock_irqsave(&qp->s_lock, flags);
1152 		hfi1_send_complete(qp, qp->s_wqe, wc_status);
1153 		spin_unlock_irqrestore(&qp->s_lock, flags);
1154 	} else if (qp->ibqp.qp_type == IB_QPT_RC) {
1155 		spin_lock_irqsave(&qp->s_lock, flags);
1156 		hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
1157 		spin_unlock_irqrestore(&qp->s_lock, flags);
1158 	}
1159 
1160 	ret = 0;
1161 
1162 bail:
1163 	hfi1_put_txreq(ps->s_txreq);
1164 	return ret;
1165 }
1166 
1167 /*
1168  * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1169  * being an entry from the partition key table), return 0
1170  * otherwise. Use the matching criteria for egress partition keys
1171  * specified in the OPAv1 spec., section 9.1l.7.
1172  */
1173 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1174 {
1175 	u16 mkey = pkey & PKEY_LOW_15_MASK;
1176 	u16 mentry = ent & PKEY_LOW_15_MASK;
1177 
1178 	if (mkey == mentry) {
1179 		/*
1180 		 * If pkey[15] is set (full partition member),
1181 		 * is bit 15 in the corresponding table element
1182 		 * clear (limited member)?
1183 		 */
1184 		if (pkey & PKEY_MEMBER_MASK)
1185 			return !!(ent & PKEY_MEMBER_MASK);
1186 		return 1;
1187 	}
1188 	return 0;
1189 }
1190 
1191 /**
1192  * egress_pkey_check - check P_KEY of a packet
1193  * @ppd:  Physical IB port data
1194  * @slid: SLID for packet
1195  * @bkey: PKEY for header
1196  * @sc5:  SC for packet
1197  * @s_pkey_index: It will be used for look up optimization for kernel contexts
1198  * only. If it is negative value, then it means user contexts is calling this
1199  * function.
1200  *
1201  * It checks if hdr's pkey is valid.
1202  *
1203  * Return: 0 on success, otherwise, 1
1204  */
1205 int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
1206 		      u8 sc5, int8_t s_pkey_index)
1207 {
1208 	struct hfi1_devdata *dd;
1209 	int i;
1210 	int is_user_ctxt_mechanism = (s_pkey_index < 0);
1211 
1212 	if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1213 		return 0;
1214 
1215 	/* If SC15, pkey[0:14] must be 0x7fff */
1216 	if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1217 		goto bad;
1218 
1219 	/* Is the pkey = 0x0, or 0x8000? */
1220 	if ((pkey & PKEY_LOW_15_MASK) == 0)
1221 		goto bad;
1222 
1223 	/*
1224 	 * For the kernel contexts only, if a qp is passed into the function,
1225 	 * the most likely matching pkey has index qp->s_pkey_index
1226 	 */
1227 	if (!is_user_ctxt_mechanism &&
1228 	    egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
1229 		return 0;
1230 	}
1231 
1232 	for (i = 0; i < MAX_PKEY_VALUES; i++) {
1233 		if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1234 			return 0;
1235 	}
1236 bad:
1237 	/*
1238 	 * For the user-context mechanism, the P_KEY check would only happen
1239 	 * once per SDMA request, not once per packet.  Therefore, there's no
1240 	 * need to increment the counter for the user-context mechanism.
1241 	 */
1242 	if (!is_user_ctxt_mechanism) {
1243 		incr_cntr64(&ppd->port_xmit_constraint_errors);
1244 		dd = ppd->dd;
1245 		if (!(dd->err_info_xmit_constraint.status &
1246 		      OPA_EI_STATUS_SMASK)) {
1247 			dd->err_info_xmit_constraint.status |=
1248 				OPA_EI_STATUS_SMASK;
1249 			dd->err_info_xmit_constraint.slid = slid;
1250 			dd->err_info_xmit_constraint.pkey = pkey;
1251 		}
1252 	}
1253 	return 1;
1254 }
1255 
1256 /**
1257  * get_send_routine - choose an egress routine
1258  *
1259  * Choose an egress routine based on QP type
1260  * and size
1261  */
1262 static inline send_routine get_send_routine(struct rvt_qp *qp,
1263 					    struct hfi1_pkt_state *ps)
1264 {
1265 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1266 	struct hfi1_qp_priv *priv = qp->priv;
1267 	struct verbs_txreq *tx = ps->s_txreq;
1268 
1269 	if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1270 		return dd->process_pio_send;
1271 	switch (qp->ibqp.qp_type) {
1272 	case IB_QPT_SMI:
1273 		return dd->process_pio_send;
1274 	case IB_QPT_GSI:
1275 	case IB_QPT_UD:
1276 		break;
1277 	case IB_QPT_UC:
1278 	case IB_QPT_RC: {
1279 		if (piothreshold &&
1280 		    tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
1281 		    (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
1282 		    iowait_sdma_pending(&priv->s_iowait) == 0 &&
1283 		    !sdma_txreq_built(&tx->txreq))
1284 			return dd->process_pio_send;
1285 		break;
1286 	}
1287 	default:
1288 		break;
1289 	}
1290 	return dd->process_dma_send;
1291 }
1292 
1293 /**
1294  * hfi1_verbs_send - send a packet
1295  * @qp: the QP to send on
1296  * @ps: the state of the packet to send
1297  *
1298  * Return zero if packet is sent or queued OK.
1299  * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1300  */
1301 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
1302 {
1303 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1304 	struct hfi1_qp_priv *priv = qp->priv;
1305 	struct ib_other_headers *ohdr;
1306 	send_routine sr;
1307 	int ret;
1308 	u16 pkey;
1309 	u32 slid;
1310 
1311 	/* locate the pkey within the headers */
1312 	if (ps->s_txreq->phdr.hdr.hdr_type) {
1313 		struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
1314 		u8 l4 = hfi1_16B_get_l4(hdr);
1315 
1316 		if (l4 == OPA_16B_L4_IB_GLOBAL)
1317 			ohdr = &hdr->u.l.oth;
1318 		else
1319 			ohdr = &hdr->u.oth;
1320 		slid = hfi1_16B_get_slid(hdr);
1321 		pkey = hfi1_16B_get_pkey(hdr);
1322 	} else {
1323 		struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
1324 		u8 lnh = ib_get_lnh(hdr);
1325 
1326 		if (lnh == HFI1_LRH_GRH)
1327 			ohdr = &hdr->u.l.oth;
1328 		else
1329 			ohdr = &hdr->u.oth;
1330 		slid = ib_get_slid(hdr);
1331 		pkey = ib_bth_get_pkey(ohdr);
1332 	}
1333 
1334 	ps->opcode = ib_bth_get_opcode(ohdr);
1335 	sr = get_send_routine(qp, ps);
1336 	ret = egress_pkey_check(dd->pport, slid, pkey,
1337 				priv->s_sc, qp->s_pkey_index);
1338 	if (unlikely(ret)) {
1339 		/*
1340 		 * The value we are returning here does not get propagated to
1341 		 * the verbs caller. Thus we need to complete the request with
1342 		 * error otherwise the caller could be sitting waiting on the
1343 		 * completion event. Only do this for PIO. SDMA has its own
1344 		 * mechanism for handling the errors. So for SDMA we can just
1345 		 * return.
1346 		 */
1347 		if (sr == dd->process_pio_send) {
1348 			unsigned long flags;
1349 
1350 			hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1351 				  __func__);
1352 			spin_lock_irqsave(&qp->s_lock, flags);
1353 			hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1354 			spin_unlock_irqrestore(&qp->s_lock, flags);
1355 		}
1356 		return -EINVAL;
1357 	}
1358 	if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
1359 		return pio_wait(qp,
1360 				ps->s_txreq->psc,
1361 				ps,
1362 				RVT_S_WAIT_PIO_DRAIN);
1363 	return sr(qp, ps, 0);
1364 }
1365 
1366 /**
1367  * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1368  * @dd: the device data structure
1369  */
1370 static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1371 {
1372 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1373 	u32 ver = dd->dc8051_ver;
1374 
1375 	memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1376 
1377 	rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
1378 		((u64)(dc8051_ver_min(ver)) << 16) |
1379 		(u64)dc8051_ver_patch(ver);
1380 
1381 	rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1382 			IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1383 			IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1384 			IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
1385 			IB_DEVICE_MEM_MGT_EXTENSIONS |
1386 			IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
1387 	rdi->dparms.props.page_size_cap = PAGE_SIZE;
1388 	rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1389 	rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1390 	rdi->dparms.props.hw_ver = dd->minrev;
1391 	rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
1392 	rdi->dparms.props.max_mr_size = U64_MAX;
1393 	rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
1394 	rdi->dparms.props.max_qp = hfi1_max_qps;
1395 	rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1396 	rdi->dparms.props.max_sge = hfi1_max_sges;
1397 	rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1398 	rdi->dparms.props.max_cq = hfi1_max_cqs;
1399 	rdi->dparms.props.max_ah = hfi1_max_ahs;
1400 	rdi->dparms.props.max_cqe = hfi1_max_cqes;
1401 	rdi->dparms.props.max_mr = rdi->lkey_table.max;
1402 	rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1403 	rdi->dparms.props.max_map_per_fmr = 32767;
1404 	rdi->dparms.props.max_pd = hfi1_max_pds;
1405 	rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1406 	rdi->dparms.props.max_qp_init_rd_atom = 255;
1407 	rdi->dparms.props.max_srq = hfi1_max_srqs;
1408 	rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1409 	rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1410 	rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1411 	rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1412 	rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1413 	rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1414 	rdi->dparms.props.max_total_mcast_qp_attach =
1415 					rdi->dparms.props.max_mcast_qp_attach *
1416 					rdi->dparms.props.max_mcast_grp;
1417 }
1418 
1419 static inline u16 opa_speed_to_ib(u16 in)
1420 {
1421 	u16 out = 0;
1422 
1423 	if (in & OPA_LINK_SPEED_25G)
1424 		out |= IB_SPEED_EDR;
1425 	if (in & OPA_LINK_SPEED_12_5G)
1426 		out |= IB_SPEED_FDR;
1427 
1428 	return out;
1429 }
1430 
1431 /*
1432  * Convert a single OPA link width (no multiple flags) to an IB value.
1433  * A zero OPA link width means link down, which means the IB width value
1434  * is a don't care.
1435  */
1436 static inline u16 opa_width_to_ib(u16 in)
1437 {
1438 	switch (in) {
1439 	case OPA_LINK_WIDTH_1X:
1440 	/* map 2x and 3x to 1x as they don't exist in IB */
1441 	case OPA_LINK_WIDTH_2X:
1442 	case OPA_LINK_WIDTH_3X:
1443 		return IB_WIDTH_1X;
1444 	default: /* link down or unknown, return our largest width */
1445 	case OPA_LINK_WIDTH_4X:
1446 		return IB_WIDTH_4X;
1447 	}
1448 }
1449 
1450 static int query_port(struct rvt_dev_info *rdi, u8 port_num,
1451 		      struct ib_port_attr *props)
1452 {
1453 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1454 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1455 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1456 	u32 lid = ppd->lid;
1457 
1458 	/* props being zeroed by the caller, avoid zeroing it here */
1459 	props->lid = lid ? lid : 0;
1460 	props->lmc = ppd->lmc;
1461 	/* OPA logical states match IB logical states */
1462 	props->state = driver_lstate(ppd);
1463 	props->phys_state = driver_pstate(ppd);
1464 	props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
1465 	props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1466 	/* see rate_show() in ib core/sysfs.c */
1467 	props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1468 	props->max_vl_num = ppd->vls_supported;
1469 
1470 	/* Once we are a "first class" citizen and have added the OPA MTUs to
1471 	 * the core we can advertise the larger MTU enum to the ULPs, for now
1472 	 * advertise only 4K.
1473 	 *
1474 	 * Those applications which are either OPA aware or pass the MTU enum
1475 	 * from the Path Records to us will get the new 8k MTU.  Those that
1476 	 * attempt to process the MTU enum may fail in various ways.
1477 	 */
1478 	props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1479 				      4096 : hfi1_max_mtu), IB_MTU_4096);
1480 	props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1481 		mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
1482 
1483 	/*
1484 	 * sm_lid of 0xFFFF needs special handling so that it can
1485 	 * be differentiated from a permissve LID of 0xFFFF.
1486 	 * We set the grh_required flag here so the SA can program
1487 	 * the DGID in the address handle appropriately
1488 	 */
1489 	if (props->sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))
1490 		props->grh_required = true;
1491 
1492 	return 0;
1493 }
1494 
1495 static int modify_device(struct ib_device *device,
1496 			 int device_modify_mask,
1497 			 struct ib_device_modify *device_modify)
1498 {
1499 	struct hfi1_devdata *dd = dd_from_ibdev(device);
1500 	unsigned i;
1501 	int ret;
1502 
1503 	if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1504 				   IB_DEVICE_MODIFY_NODE_DESC)) {
1505 		ret = -EOPNOTSUPP;
1506 		goto bail;
1507 	}
1508 
1509 	if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1510 		memcpy(device->node_desc, device_modify->node_desc,
1511 		       IB_DEVICE_NODE_DESC_MAX);
1512 		for (i = 0; i < dd->num_pports; i++) {
1513 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1514 
1515 			hfi1_node_desc_chg(ibp);
1516 		}
1517 	}
1518 
1519 	if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1520 		ib_hfi1_sys_image_guid =
1521 			cpu_to_be64(device_modify->sys_image_guid);
1522 		for (i = 0; i < dd->num_pports; i++) {
1523 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1524 
1525 			hfi1_sys_guid_chg(ibp);
1526 		}
1527 	}
1528 
1529 	ret = 0;
1530 
1531 bail:
1532 	return ret;
1533 }
1534 
1535 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1536 {
1537 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1538 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1539 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1540 	int ret;
1541 
1542 	set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1543 			     OPA_LINKDOWN_REASON_UNKNOWN);
1544 	ret = set_link_state(ppd, HLS_DN_DOWNDEF);
1545 	return ret;
1546 }
1547 
1548 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1549 			    int guid_index, __be64 *guid)
1550 {
1551 	struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
1552 
1553 	if (guid_index >= HFI1_GUIDS_PER_PORT)
1554 		return -EINVAL;
1555 
1556 	*guid = get_sguid(ibp, guid_index);
1557 	return 0;
1558 }
1559 
1560 /*
1561  * convert ah port,sl to sc
1562  */
1563 u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
1564 {
1565 	struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
1566 
1567 	return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
1568 }
1569 
1570 static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1571 {
1572 	struct hfi1_ibport *ibp;
1573 	struct hfi1_pportdata *ppd;
1574 	struct hfi1_devdata *dd;
1575 	u8 sc5;
1576 
1577 	if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
1578 	    !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
1579 		return -EINVAL;
1580 
1581 	/* test the mapping for validity */
1582 	ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1583 	ppd = ppd_from_ibp(ibp);
1584 	sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
1585 	dd = dd_from_ppd(ppd);
1586 	if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1587 		return -EINVAL;
1588 	return 0;
1589 }
1590 
1591 static void hfi1_notify_new_ah(struct ib_device *ibdev,
1592 			       struct rdma_ah_attr *ah_attr,
1593 			       struct rvt_ah *ah)
1594 {
1595 	struct hfi1_ibport *ibp;
1596 	struct hfi1_pportdata *ppd;
1597 	struct hfi1_devdata *dd;
1598 	u8 sc5;
1599 	struct rdma_ah_attr *attr = &ah->attr;
1600 
1601 	/*
1602 	 * Do not trust reading anything from rvt_ah at this point as it is not
1603 	 * done being setup. We can however modify things which we need to set.
1604 	 */
1605 
1606 	ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1607 	ppd = ppd_from_ibp(ibp);
1608 	sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
1609 	hfi1_update_ah_attr(ibdev, attr);
1610 	hfi1_make_opa_lid(attr);
1611 	dd = dd_from_ppd(ppd);
1612 	ah->vl = sc_to_vlt(dd, sc5);
1613 	if (ah->vl < num_vls || ah->vl == 15)
1614 		ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1615 }
1616 
1617 /**
1618  * hfi1_get_npkeys - return the size of the PKEY table for context 0
1619  * @dd: the hfi1_ib device
1620  */
1621 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1622 {
1623 	return ARRAY_SIZE(dd->pport[0].pkeys);
1624 }
1625 
1626 static void init_ibport(struct hfi1_pportdata *ppd)
1627 {
1628 	struct hfi1_ibport *ibp = &ppd->ibport_data;
1629 	size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1630 	int i;
1631 
1632 	for (i = 0; i < sz; i++) {
1633 		ibp->sl_to_sc[i] = i;
1634 		ibp->sc_to_sl[i] = i;
1635 	}
1636 
1637 	for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
1638 		INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
1639 	setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
1640 		    (unsigned long)ibp);
1641 
1642 	spin_lock_init(&ibp->rvp.lock);
1643 	/* Set the prefix to the default value (see ch. 4.1.1) */
1644 	ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1645 	ibp->rvp.sm_lid = 0;
1646 	/*
1647 	 * Below should only set bits defined in OPA PortInfo.CapabilityMask
1648 	 * and PortInfo.CapabilityMask3
1649 	 */
1650 	ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
1651 		IB_PORT_CAP_MASK_NOTICE_SUP;
1652 	ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
1653 	ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1654 	ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1655 	ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1656 	ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1657 	ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1658 
1659 	RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1660 	RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1661 }
1662 
1663 static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
1664 {
1665 	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
1666 	struct hfi1_ibdev *dev = dev_from_rdi(rdi);
1667 	u32 ver = dd_from_dev(dev)->dc8051_ver;
1668 
1669 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
1670 		 dc8051_ver_min(ver), dc8051_ver_patch(ver));
1671 }
1672 
1673 static const char * const driver_cntr_names[] = {
1674 	/* must be element 0*/
1675 	"DRIVER_KernIntr",
1676 	"DRIVER_ErrorIntr",
1677 	"DRIVER_Tx_Errs",
1678 	"DRIVER_Rcv_Errs",
1679 	"DRIVER_HW_Errs",
1680 	"DRIVER_NoPIOBufs",
1681 	"DRIVER_CtxtsOpen",
1682 	"DRIVER_RcvLen_Errs",
1683 	"DRIVER_EgrBufFull",
1684 	"DRIVER_EgrHdrFull"
1685 };
1686 
1687 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
1688 static const char **dev_cntr_names;
1689 static const char **port_cntr_names;
1690 static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
1691 static int num_dev_cntrs;
1692 static int num_port_cntrs;
1693 static int cntr_names_initialized;
1694 
1695 /*
1696  * Convert a list of names separated by '\n' into an array of NULL terminated
1697  * strings. Optionally some entries can be reserved in the array to hold extra
1698  * external strings.
1699  */
1700 static int init_cntr_names(const char *names_in,
1701 			   const size_t names_len,
1702 			   int num_extra_names,
1703 			   int *num_cntrs,
1704 			   const char ***cntr_names)
1705 {
1706 	char *names_out, *p, **q;
1707 	int i, n;
1708 
1709 	n = 0;
1710 	for (i = 0; i < names_len; i++)
1711 		if (names_in[i] == '\n')
1712 			n++;
1713 
1714 	names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
1715 			    GFP_KERNEL);
1716 	if (!names_out) {
1717 		*num_cntrs = 0;
1718 		*cntr_names = NULL;
1719 		return -ENOMEM;
1720 	}
1721 
1722 	p = names_out + (n + num_extra_names) * sizeof(char *);
1723 	memcpy(p, names_in, names_len);
1724 
1725 	q = (char **)names_out;
1726 	for (i = 0; i < n; i++) {
1727 		q[i] = p;
1728 		p = strchr(p, '\n');
1729 		*p++ = '\0';
1730 	}
1731 
1732 	*num_cntrs = n;
1733 	*cntr_names = (const char **)names_out;
1734 	return 0;
1735 }
1736 
1737 static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
1738 					    u8 port_num)
1739 {
1740 	int i, err;
1741 
1742 	mutex_lock(&cntr_names_lock);
1743 	if (!cntr_names_initialized) {
1744 		struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1745 
1746 		err = init_cntr_names(dd->cntrnames,
1747 				      dd->cntrnameslen,
1748 				      num_driver_cntrs,
1749 				      &num_dev_cntrs,
1750 				      &dev_cntr_names);
1751 		if (err) {
1752 			mutex_unlock(&cntr_names_lock);
1753 			return NULL;
1754 		}
1755 
1756 		for (i = 0; i < num_driver_cntrs; i++)
1757 			dev_cntr_names[num_dev_cntrs + i] =
1758 				driver_cntr_names[i];
1759 
1760 		err = init_cntr_names(dd->portcntrnames,
1761 				      dd->portcntrnameslen,
1762 				      0,
1763 				      &num_port_cntrs,
1764 				      &port_cntr_names);
1765 		if (err) {
1766 			kfree(dev_cntr_names);
1767 			dev_cntr_names = NULL;
1768 			mutex_unlock(&cntr_names_lock);
1769 			return NULL;
1770 		}
1771 		cntr_names_initialized = 1;
1772 	}
1773 	mutex_unlock(&cntr_names_lock);
1774 
1775 	if (!port_num)
1776 		return rdma_alloc_hw_stats_struct(
1777 				dev_cntr_names,
1778 				num_dev_cntrs + num_driver_cntrs,
1779 				RDMA_HW_STATS_DEFAULT_LIFESPAN);
1780 	else
1781 		return rdma_alloc_hw_stats_struct(
1782 				port_cntr_names,
1783 				num_port_cntrs,
1784 				RDMA_HW_STATS_DEFAULT_LIFESPAN);
1785 }
1786 
1787 static u64 hfi1_sps_ints(void)
1788 {
1789 	unsigned long flags;
1790 	struct hfi1_devdata *dd;
1791 	u64 sps_ints = 0;
1792 
1793 	spin_lock_irqsave(&hfi1_devs_lock, flags);
1794 	list_for_each_entry(dd, &hfi1_dev_list, list) {
1795 		sps_ints += get_all_cpu_total(dd->int_counter);
1796 	}
1797 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1798 	return sps_ints;
1799 }
1800 
1801 static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1802 			u8 port, int index)
1803 {
1804 	u64 *values;
1805 	int count;
1806 
1807 	if (!port) {
1808 		u64 *stats = (u64 *)&hfi1_stats;
1809 		int i;
1810 
1811 		hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
1812 		values[num_dev_cntrs] = hfi1_sps_ints();
1813 		for (i = 1; i < num_driver_cntrs; i++)
1814 			values[num_dev_cntrs + i] = stats[i];
1815 		count = num_dev_cntrs + num_driver_cntrs;
1816 	} else {
1817 		struct hfi1_ibport *ibp = to_iport(ibdev, port);
1818 
1819 		hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
1820 		count = num_port_cntrs;
1821 	}
1822 
1823 	memcpy(stats->value, values, count * sizeof(u64));
1824 	return count;
1825 }
1826 
1827 /**
1828  * hfi1_register_ib_device - register our device with the infiniband core
1829  * @dd: the device data structure
1830  * Return 0 if successful, errno if unsuccessful.
1831  */
1832 int hfi1_register_ib_device(struct hfi1_devdata *dd)
1833 {
1834 	struct hfi1_ibdev *dev = &dd->verbs_dev;
1835 	struct ib_device *ibdev = &dev->rdi.ibdev;
1836 	struct hfi1_pportdata *ppd = dd->pport;
1837 	struct hfi1_ibport *ibp = &ppd->ibport_data;
1838 	unsigned i;
1839 	int ret;
1840 	size_t lcpysz = IB_DEVICE_NAME_MAX;
1841 
1842 	for (i = 0; i < dd->num_pports; i++)
1843 		init_ibport(ppd + i);
1844 
1845 	/* Only need to initialize non-zero fields. */
1846 
1847 	setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1848 
1849 	seqlock_init(&dev->iowait_lock);
1850 	seqlock_init(&dev->txwait_lock);
1851 	INIT_LIST_HEAD(&dev->txwait);
1852 	INIT_LIST_HEAD(&dev->memwait);
1853 
1854 	ret = verbs_txreq_init(dev);
1855 	if (ret)
1856 		goto err_verbs_txreq;
1857 
1858 	/* Use first-port GUID as node guid */
1859 	ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
1860 
1861 	/*
1862 	 * The system image GUID is supposed to be the same for all
1863 	 * HFIs in a single system but since there can be other
1864 	 * device types in the system, we can't be sure this is unique.
1865 	 */
1866 	if (!ib_hfi1_sys_image_guid)
1867 		ib_hfi1_sys_image_guid = ibdev->node_guid;
1868 	lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1869 	strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1870 	ibdev->owner = THIS_MODULE;
1871 	ibdev->phys_port_cnt = dd->num_pports;
1872 	ibdev->dev.parent = &dd->pcidev->dev;
1873 	ibdev->modify_device = modify_device;
1874 	ibdev->alloc_hw_stats = alloc_hw_stats;
1875 	ibdev->get_hw_stats = get_hw_stats;
1876 	ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn;
1877 
1878 	/* keep process mad in the driver */
1879 	ibdev->process_mad = hfi1_process_mad;
1880 	ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
1881 
1882 	strncpy(ibdev->node_desc, init_utsname()->nodename,
1883 		sizeof(ibdev->node_desc));
1884 
1885 	/*
1886 	 * Fill in rvt info object.
1887 	 */
1888 	dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
1889 	dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
1890 	dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
1891 	dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
1892 	dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
1893 	dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
1894 	dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1895 	dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1896 	dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
1897 	/*
1898 	 * Fill in rvt info device attributes.
1899 	 */
1900 	hfi1_fill_device_attr(dd);
1901 
1902 	/* queue pair */
1903 	dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1904 	dd->verbs_dev.rdi.dparms.qpn_start = 0;
1905 	dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1906 	dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1907 	dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1908 	dd->verbs_dev.rdi.dparms.qpn_res_end =
1909 	dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
1910 	dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1911 	dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1912 	dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1913 	dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
1914 	dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
1915 						RDMA_CORE_CAP_OPA_AH;
1916 	dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1917 
1918 	dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1919 	dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1920 	dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1921 	dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1922 	dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
1923 	dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1924 	dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
1925 	dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1926 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1927 	dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1928 	dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1929 	dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1930 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1931 	dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1932 	dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1933 	dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1934 	dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
1935 	dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
1936 	dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
1937 
1938 	/* completeion queue */
1939 	snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1940 		 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1941 		 "hfi1_cq%d", dd->unit);
1942 	dd->verbs_dev.rdi.dparms.node = dd->node;
1943 
1944 	/* misc settings */
1945 	dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
1946 	dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
1947 	dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1948 	dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1949 
1950 	/* post send table */
1951 	dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
1952 
1953 	ppd = dd->pport;
1954 	for (i = 0; i < dd->num_pports; i++, ppd++)
1955 		rvt_init_port(&dd->verbs_dev.rdi,
1956 			      &ppd->ibport_data.rvp,
1957 			      i,
1958 			      ppd->pkeys);
1959 
1960 	ret = rvt_register_device(&dd->verbs_dev.rdi);
1961 	if (ret)
1962 		goto err_verbs_txreq;
1963 
1964 	ret = hfi1_verbs_register_sysfs(dd);
1965 	if (ret)
1966 		goto err_class;
1967 
1968 	return ret;
1969 
1970 err_class:
1971 	rvt_unregister_device(&dd->verbs_dev.rdi);
1972 err_verbs_txreq:
1973 	verbs_txreq_exit(dev);
1974 	dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1975 	return ret;
1976 }
1977 
1978 void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1979 {
1980 	struct hfi1_ibdev *dev = &dd->verbs_dev;
1981 
1982 	hfi1_verbs_unregister_sysfs(dd);
1983 
1984 	rvt_unregister_device(&dd->verbs_dev.rdi);
1985 
1986 	if (!list_empty(&dev->txwait))
1987 		dd_dev_err(dd, "txwait list not empty!\n");
1988 	if (!list_empty(&dev->memwait))
1989 		dd_dev_err(dd, "memwait list not empty!\n");
1990 
1991 	del_timer_sync(&dev->mem_timer);
1992 	verbs_txreq_exit(dev);
1993 
1994 	mutex_lock(&cntr_names_lock);
1995 	kfree(dev_cntr_names);
1996 	kfree(port_cntr_names);
1997 	dev_cntr_names = NULL;
1998 	port_cntr_names = NULL;
1999 	cntr_names_initialized = 0;
2000 	mutex_unlock(&cntr_names_lock);
2001 }
2002 
2003 void hfi1_cnp_rcv(struct hfi1_packet *packet)
2004 {
2005 	struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
2006 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2007 	struct ib_header *hdr = packet->hdr;
2008 	struct rvt_qp *qp = packet->qp;
2009 	u32 lqpn, rqpn = 0;
2010 	u16 rlid = 0;
2011 	u8 sl, sc5, svc_type;
2012 
2013 	switch (packet->qp->ibqp.qp_type) {
2014 	case IB_QPT_UC:
2015 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
2016 		rqpn = qp->remote_qpn;
2017 		svc_type = IB_CC_SVCTYPE_UC;
2018 		break;
2019 	case IB_QPT_RC:
2020 		rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
2021 		rqpn = qp->remote_qpn;
2022 		svc_type = IB_CC_SVCTYPE_RC;
2023 		break;
2024 	case IB_QPT_SMI:
2025 	case IB_QPT_GSI:
2026 	case IB_QPT_UD:
2027 		svc_type = IB_CC_SVCTYPE_UD;
2028 		break;
2029 	default:
2030 		ibp->rvp.n_pkt_drops++;
2031 		return;
2032 	}
2033 
2034 	sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
2035 	sl = ibp->sc_to_sl[sc5];
2036 	lqpn = qp->ibqp.qp_num;
2037 
2038 	process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
2039 }
2040