xref: /openbmc/linux/drivers/infiniband/sw/rdmavt/qp.c (revision 5190f052)
1 /*
2  * Copyright(c) 2016 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include "qp.h"
57 #include "vt.h"
58 #include "trace.h"
59 
60 static void rvt_rc_timeout(struct timer_list *t);
61 
62 /*
63  * Convert the AETH RNR timeout code into the number of microseconds.
64  */
65 static const u32 ib_rvt_rnr_table[32] = {
66 	655360, /* 00: 655.36 */
67 	10,     /* 01:    .01 */
68 	20,     /* 02     .02 */
69 	30,     /* 03:    .03 */
70 	40,     /* 04:    .04 */
71 	60,     /* 05:    .06 */
72 	80,     /* 06:    .08 */
73 	120,    /* 07:    .12 */
74 	160,    /* 08:    .16 */
75 	240,    /* 09:    .24 */
76 	320,    /* 0A:    .32 */
77 	480,    /* 0B:    .48 */
78 	640,    /* 0C:    .64 */
79 	960,    /* 0D:    .96 */
80 	1280,   /* 0E:   1.28 */
81 	1920,   /* 0F:   1.92 */
82 	2560,   /* 10:   2.56 */
83 	3840,   /* 11:   3.84 */
84 	5120,   /* 12:   5.12 */
85 	7680,   /* 13:   7.68 */
86 	10240,  /* 14:  10.24 */
87 	15360,  /* 15:  15.36 */
88 	20480,  /* 16:  20.48 */
89 	30720,  /* 17:  30.72 */
90 	40960,  /* 18:  40.96 */
91 	61440,  /* 19:  61.44 */
92 	81920,  /* 1A:  81.92 */
93 	122880, /* 1B: 122.88 */
94 	163840, /* 1C: 163.84 */
95 	245760, /* 1D: 245.76 */
96 	327680, /* 1E: 327.68 */
97 	491520  /* 1F: 491.52 */
98 };
99 
100 /*
101  * Note that it is OK to post send work requests in the SQE and ERR
102  * states; rvt_do_send() will process them and generate error
103  * completions as per IB 1.2 C10-96.
104  */
105 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
106 	[IB_QPS_RESET] = 0,
107 	[IB_QPS_INIT] = RVT_POST_RECV_OK,
108 	[IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
109 	[IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
110 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
111 	    RVT_PROCESS_NEXT_SEND_OK,
112 	[IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
113 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
114 	[IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
116 	[IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
117 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
118 };
119 EXPORT_SYMBOL(ib_rvt_state_ops);
120 
121 /* platform specific: return the last level cache (llc) size, in KiB */
122 static int rvt_wss_llc_size(void)
123 {
124 	/* assume that the boot CPU value is universal for all CPUs */
125 	return boot_cpu_data.x86_cache_size;
126 }
127 
128 /* platform specific: cacheless copy */
129 static void cacheless_memcpy(void *dst, void *src, size_t n)
130 {
131 	/*
132 	 * Use the only available X64 cacheless copy.  Add a __user cast
133 	 * to quiet sparse.  The src agument is already in the kernel so
134 	 * there are no security issues.  The extra fault recovery machinery
135 	 * is not invoked.
136 	 */
137 	__copy_user_nocache(dst, (void __user *)src, n, 0);
138 }
139 
140 void rvt_wss_exit(struct rvt_dev_info *rdi)
141 {
142 	struct rvt_wss *wss = rdi->wss;
143 
144 	if (!wss)
145 		return;
146 
147 	/* coded to handle partially initialized and repeat callers */
148 	kfree(wss->entries);
149 	wss->entries = NULL;
150 	kfree(rdi->wss);
151 	rdi->wss = NULL;
152 }
153 
154 /**
155  * rvt_wss_init - Init wss data structures
156  *
157  * Return: 0 on success
158  */
159 int rvt_wss_init(struct rvt_dev_info *rdi)
160 {
161 	unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
162 	unsigned int wss_threshold = rdi->dparms.wss_threshold;
163 	unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
164 	long llc_size;
165 	long llc_bits;
166 	long table_size;
167 	long table_bits;
168 	struct rvt_wss *wss;
169 	int node = rdi->dparms.node;
170 
171 	if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
172 		rdi->wss = NULL;
173 		return 0;
174 	}
175 
176 	rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
177 	if (!rdi->wss)
178 		return -ENOMEM;
179 	wss = rdi->wss;
180 
181 	/* check for a valid percent range - default to 80 if none or invalid */
182 	if (wss_threshold < 1 || wss_threshold > 100)
183 		wss_threshold = 80;
184 
185 	/* reject a wildly large period */
186 	if (wss_clean_period > 1000000)
187 		wss_clean_period = 256;
188 
189 	/* reject a zero period */
190 	if (wss_clean_period == 0)
191 		wss_clean_period = 1;
192 
193 	/*
194 	 * Calculate the table size - the next power of 2 larger than the
195 	 * LLC size.  LLC size is in KiB.
196 	 */
197 	llc_size = rvt_wss_llc_size() * 1024;
198 	table_size = roundup_pow_of_two(llc_size);
199 
200 	/* one bit per page in rounded up table */
201 	llc_bits = llc_size / PAGE_SIZE;
202 	table_bits = table_size / PAGE_SIZE;
203 	wss->pages_mask = table_bits - 1;
204 	wss->num_entries = table_bits / BITS_PER_LONG;
205 
206 	wss->threshold = (llc_bits * wss_threshold) / 100;
207 	if (wss->threshold == 0)
208 		wss->threshold = 1;
209 
210 	wss->clean_period = wss_clean_period;
211 	atomic_set(&wss->clean_counter, wss_clean_period);
212 
213 	wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
214 				    GFP_KERNEL, node);
215 	if (!wss->entries) {
216 		rvt_wss_exit(rdi);
217 		return -ENOMEM;
218 	}
219 
220 	return 0;
221 }
222 
223 /*
224  * Advance the clean counter.  When the clean period has expired,
225  * clean an entry.
226  *
227  * This is implemented in atomics to avoid locking.  Because multiple
228  * variables are involved, it can be racy which can lead to slightly
229  * inaccurate information.  Since this is only a heuristic, this is
230  * OK.  Any innaccuracies will clean themselves out as the counter
231  * advances.  That said, it is unlikely the entry clean operation will
232  * race - the next possible racer will not start until the next clean
233  * period.
234  *
235  * The clean counter is implemented as a decrement to zero.  When zero
236  * is reached an entry is cleaned.
237  */
238 static void wss_advance_clean_counter(struct rvt_wss *wss)
239 {
240 	int entry;
241 	int weight;
242 	unsigned long bits;
243 
244 	/* become the cleaner if we decrement the counter to zero */
245 	if (atomic_dec_and_test(&wss->clean_counter)) {
246 		/*
247 		 * Set, not add, the clean period.  This avoids an issue
248 		 * where the counter could decrement below the clean period.
249 		 * Doing a set can result in lost decrements, slowing the
250 		 * clean advance.  Since this a heuristic, this possible
251 		 * slowdown is OK.
252 		 *
253 		 * An alternative is to loop, advancing the counter by a
254 		 * clean period until the result is > 0. However, this could
255 		 * lead to several threads keeping another in the clean loop.
256 		 * This could be mitigated by limiting the number of times
257 		 * we stay in the loop.
258 		 */
259 		atomic_set(&wss->clean_counter, wss->clean_period);
260 
261 		/*
262 		 * Uniquely grab the entry to clean and move to next.
263 		 * The current entry is always the lower bits of
264 		 * wss.clean_entry.  The table size, wss.num_entries,
265 		 * is always a power-of-2.
266 		 */
267 		entry = (atomic_inc_return(&wss->clean_entry) - 1)
268 			& (wss->num_entries - 1);
269 
270 		/* clear the entry and count the bits */
271 		bits = xchg(&wss->entries[entry], 0);
272 		weight = hweight64((u64)bits);
273 		/* only adjust the contended total count if needed */
274 		if (weight)
275 			atomic_sub(weight, &wss->total_count);
276 	}
277 }
278 
279 /*
280  * Insert the given address into the working set array.
281  */
282 static void wss_insert(struct rvt_wss *wss, void *address)
283 {
284 	u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
285 	u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
286 	u32 nr = page & (BITS_PER_LONG - 1);
287 
288 	if (!test_and_set_bit(nr, &wss->entries[entry]))
289 		atomic_inc(&wss->total_count);
290 
291 	wss_advance_clean_counter(wss);
292 }
293 
294 /*
295  * Is the working set larger than the threshold?
296  */
297 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
298 {
299 	return atomic_read(&wss->total_count) >= wss->threshold;
300 }
301 
302 static void get_map_page(struct rvt_qpn_table *qpt,
303 			 struct rvt_qpn_map *map)
304 {
305 	unsigned long page = get_zeroed_page(GFP_KERNEL);
306 
307 	/*
308 	 * Free the page if someone raced with us installing it.
309 	 */
310 
311 	spin_lock(&qpt->lock);
312 	if (map->page)
313 		free_page(page);
314 	else
315 		map->page = (void *)page;
316 	spin_unlock(&qpt->lock);
317 }
318 
319 /**
320  * init_qpn_table - initialize the QP number table for a device
321  * @qpt: the QPN table
322  */
323 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
324 {
325 	u32 offset, i;
326 	struct rvt_qpn_map *map;
327 	int ret = 0;
328 
329 	if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
330 		return -EINVAL;
331 
332 	spin_lock_init(&qpt->lock);
333 
334 	qpt->last = rdi->dparms.qpn_start;
335 	qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
336 
337 	/*
338 	 * Drivers may want some QPs beyond what we need for verbs let them use
339 	 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
340 	 * for those. The reserved range must be *after* the range which verbs
341 	 * will pick from.
342 	 */
343 
344 	/* Figure out number of bit maps needed before reserved range */
345 	qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
346 
347 	/* This should always be zero */
348 	offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
349 
350 	/* Starting with the first reserved bit map */
351 	map = &qpt->map[qpt->nmaps];
352 
353 	rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
354 		    rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
355 	for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
356 		if (!map->page) {
357 			get_map_page(qpt, map);
358 			if (!map->page) {
359 				ret = -ENOMEM;
360 				break;
361 			}
362 		}
363 		set_bit(offset, map->page);
364 		offset++;
365 		if (offset == RVT_BITS_PER_PAGE) {
366 			/* next page */
367 			qpt->nmaps++;
368 			map++;
369 			offset = 0;
370 		}
371 	}
372 	return ret;
373 }
374 
375 /**
376  * free_qpn_table - free the QP number table for a device
377  * @qpt: the QPN table
378  */
379 static void free_qpn_table(struct rvt_qpn_table *qpt)
380 {
381 	int i;
382 
383 	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
384 		free_page((unsigned long)qpt->map[i].page);
385 }
386 
387 /**
388  * rvt_driver_qp_init - Init driver qp resources
389  * @rdi: rvt dev strucutre
390  *
391  * Return: 0 on success
392  */
393 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
394 {
395 	int i;
396 	int ret = -ENOMEM;
397 
398 	if (!rdi->dparms.qp_table_size)
399 		return -EINVAL;
400 
401 	/*
402 	 * If driver is not doing any QP allocation then make sure it is
403 	 * providing the necessary QP functions.
404 	 */
405 	if (!rdi->driver_f.free_all_qps ||
406 	    !rdi->driver_f.qp_priv_alloc ||
407 	    !rdi->driver_f.qp_priv_free ||
408 	    !rdi->driver_f.notify_qp_reset ||
409 	    !rdi->driver_f.notify_restart_rc)
410 		return -EINVAL;
411 
412 	/* allocate parent object */
413 	rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
414 				   rdi->dparms.node);
415 	if (!rdi->qp_dev)
416 		return -ENOMEM;
417 
418 	/* allocate hash table */
419 	rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
420 	rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
421 	rdi->qp_dev->qp_table =
422 		kmalloc_array_node(rdi->qp_dev->qp_table_size,
423 			     sizeof(*rdi->qp_dev->qp_table),
424 			     GFP_KERNEL, rdi->dparms.node);
425 	if (!rdi->qp_dev->qp_table)
426 		goto no_qp_table;
427 
428 	for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
429 		RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
430 
431 	spin_lock_init(&rdi->qp_dev->qpt_lock);
432 
433 	/* initialize qpn map */
434 	if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
435 		goto fail_table;
436 
437 	spin_lock_init(&rdi->n_qps_lock);
438 
439 	return 0;
440 
441 fail_table:
442 	kfree(rdi->qp_dev->qp_table);
443 	free_qpn_table(&rdi->qp_dev->qpn_table);
444 
445 no_qp_table:
446 	kfree(rdi->qp_dev);
447 
448 	return ret;
449 }
450 
451 /**
452  * free_all_qps - check for QPs still in use
453  * @rdi: rvt device info structure
454  *
455  * There should not be any QPs still in use.
456  * Free memory for table.
457  */
458 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
459 {
460 	unsigned long flags;
461 	struct rvt_qp *qp;
462 	unsigned n, qp_inuse = 0;
463 	spinlock_t *ql; /* work around too long line below */
464 
465 	if (rdi->driver_f.free_all_qps)
466 		qp_inuse = rdi->driver_f.free_all_qps(rdi);
467 
468 	qp_inuse += rvt_mcast_tree_empty(rdi);
469 
470 	if (!rdi->qp_dev)
471 		return qp_inuse;
472 
473 	ql = &rdi->qp_dev->qpt_lock;
474 	spin_lock_irqsave(ql, flags);
475 	for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
476 		qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
477 					       lockdep_is_held(ql));
478 		RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
479 
480 		for (; qp; qp = rcu_dereference_protected(qp->next,
481 							  lockdep_is_held(ql)))
482 			qp_inuse++;
483 	}
484 	spin_unlock_irqrestore(ql, flags);
485 	synchronize_rcu();
486 	return qp_inuse;
487 }
488 
489 /**
490  * rvt_qp_exit - clean up qps on device exit
491  * @rdi: rvt dev structure
492  *
493  * Check for qp leaks and free resources.
494  */
495 void rvt_qp_exit(struct rvt_dev_info *rdi)
496 {
497 	u32 qps_inuse = rvt_free_all_qps(rdi);
498 
499 	if (qps_inuse)
500 		rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
501 			   qps_inuse);
502 	if (!rdi->qp_dev)
503 		return;
504 
505 	kfree(rdi->qp_dev->qp_table);
506 	free_qpn_table(&rdi->qp_dev->qpn_table);
507 	kfree(rdi->qp_dev);
508 }
509 
510 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
511 			      struct rvt_qpn_map *map, unsigned off)
512 {
513 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
514 }
515 
516 /**
517  * alloc_qpn - Allocate the next available qpn or zero/one for QP type
518  *	       IB_QPT_SMI/IB_QPT_GSI
519  * @rdi: rvt device info structure
520  * @qpt: queue pair number table pointer
521  * @port_num: IB port number, 1 based, comes from core
522  *
523  * Return: The queue pair number
524  */
525 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
526 		     enum ib_qp_type type, u8 port_num)
527 {
528 	u32 i, offset, max_scan, qpn;
529 	struct rvt_qpn_map *map;
530 	u32 ret;
531 
532 	if (rdi->driver_f.alloc_qpn)
533 		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
534 
535 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
536 		unsigned n;
537 
538 		ret = type == IB_QPT_GSI;
539 		n = 1 << (ret + 2 * (port_num - 1));
540 		spin_lock(&qpt->lock);
541 		if (qpt->flags & n)
542 			ret = -EINVAL;
543 		else
544 			qpt->flags |= n;
545 		spin_unlock(&qpt->lock);
546 		goto bail;
547 	}
548 
549 	qpn = qpt->last + qpt->incr;
550 	if (qpn >= RVT_QPN_MAX)
551 		qpn = qpt->incr | ((qpt->last & 1) ^ 1);
552 	/* offset carries bit 0 */
553 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
554 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
555 	max_scan = qpt->nmaps - !offset;
556 	for (i = 0;;) {
557 		if (unlikely(!map->page)) {
558 			get_map_page(qpt, map);
559 			if (unlikely(!map->page))
560 				break;
561 		}
562 		do {
563 			if (!test_and_set_bit(offset, map->page)) {
564 				qpt->last = qpn;
565 				ret = qpn;
566 				goto bail;
567 			}
568 			offset += qpt->incr;
569 			/*
570 			 * This qpn might be bogus if offset >= BITS_PER_PAGE.
571 			 * That is OK.   It gets re-assigned below
572 			 */
573 			qpn = mk_qpn(qpt, map, offset);
574 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
575 		/*
576 		 * In order to keep the number of pages allocated to a
577 		 * minimum, we scan the all existing pages before increasing
578 		 * the size of the bitmap table.
579 		 */
580 		if (++i > max_scan) {
581 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
582 				break;
583 			map = &qpt->map[qpt->nmaps++];
584 			/* start at incr with current bit 0 */
585 			offset = qpt->incr | (offset & 1);
586 		} else if (map < &qpt->map[qpt->nmaps]) {
587 			++map;
588 			/* start at incr with current bit 0 */
589 			offset = qpt->incr | (offset & 1);
590 		} else {
591 			map = &qpt->map[0];
592 			/* wrap to first map page, invert bit 0 */
593 			offset = qpt->incr | ((offset & 1) ^ 1);
594 		}
595 		/* there can be no set bits in low-order QoS bits */
596 		WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
597 		qpn = mk_qpn(qpt, map, offset);
598 	}
599 
600 	ret = -ENOMEM;
601 
602 bail:
603 	return ret;
604 }
605 
606 /**
607  * rvt_clear_mr_refs - Drop help mr refs
608  * @qp: rvt qp data structure
609  * @clr_sends: If shoudl clear send side or not
610  */
611 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
612 {
613 	unsigned n;
614 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
615 
616 	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
617 		rvt_put_ss(&qp->s_rdma_read_sge);
618 
619 	rvt_put_ss(&qp->r_sge);
620 
621 	if (clr_sends) {
622 		while (qp->s_last != qp->s_head) {
623 			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
624 
625 			rvt_put_swqe(wqe);
626 
627 			if (qp->ibqp.qp_type == IB_QPT_UD ||
628 			    qp->ibqp.qp_type == IB_QPT_SMI ||
629 			    qp->ibqp.qp_type == IB_QPT_GSI)
630 				atomic_dec(&ibah_to_rvtah(
631 						wqe->ud_wr.ah)->refcount);
632 			if (++qp->s_last >= qp->s_size)
633 				qp->s_last = 0;
634 			smp_wmb(); /* see qp_set_savail */
635 		}
636 		if (qp->s_rdma_mr) {
637 			rvt_put_mr(qp->s_rdma_mr);
638 			qp->s_rdma_mr = NULL;
639 		}
640 	}
641 
642 	for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
643 		struct rvt_ack_entry *e = &qp->s_ack_queue[n];
644 
645 		if (e->rdma_sge.mr) {
646 			rvt_put_mr(e->rdma_sge.mr);
647 			e->rdma_sge.mr = NULL;
648 		}
649 	}
650 }
651 
652 /**
653  * rvt_swqe_has_lkey - return true if lkey is used by swqe
654  * @wqe - the send wqe
655  * @lkey - the lkey
656  *
657  * Test the swqe for using lkey
658  */
659 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
660 {
661 	int i;
662 
663 	for (i = 0; i < wqe->wr.num_sge; i++) {
664 		struct rvt_sge *sge = &wqe->sg_list[i];
665 
666 		if (rvt_mr_has_lkey(sge->mr, lkey))
667 			return true;
668 	}
669 	return false;
670 }
671 
672 /**
673  * rvt_qp_sends_has_lkey - return true is qp sends use lkey
674  * @qp - the rvt_qp
675  * @lkey - the lkey
676  */
677 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
678 {
679 	u32 s_last = qp->s_last;
680 
681 	while (s_last != qp->s_head) {
682 		struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
683 
684 		if (rvt_swqe_has_lkey(wqe, lkey))
685 			return true;
686 
687 		if (++s_last >= qp->s_size)
688 			s_last = 0;
689 	}
690 	if (qp->s_rdma_mr)
691 		if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
692 			return true;
693 	return false;
694 }
695 
696 /**
697  * rvt_qp_acks_has_lkey - return true if acks have lkey
698  * @qp - the qp
699  * @lkey - the lkey
700  */
701 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
702 {
703 	int i;
704 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
705 
706 	for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
707 		struct rvt_ack_entry *e = &qp->s_ack_queue[i];
708 
709 		if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
710 			return true;
711 	}
712 	return false;
713 }
714 
715 /*
716  * rvt_qp_mr_clean - clean up remote ops for lkey
717  * @qp - the qp
718  * @lkey - the lkey that is being de-registered
719  *
720  * This routine checks if the lkey is being used by
721  * the qp.
722  *
723  * If so, the qp is put into an error state to elminate
724  * any references from the qp.
725  */
726 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
727 {
728 	bool lastwqe = false;
729 
730 	if (qp->ibqp.qp_type == IB_QPT_SMI ||
731 	    qp->ibqp.qp_type == IB_QPT_GSI)
732 		/* avoid special QPs */
733 		return;
734 	spin_lock_irq(&qp->r_lock);
735 	spin_lock(&qp->s_hlock);
736 	spin_lock(&qp->s_lock);
737 
738 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
739 		goto check_lwqe;
740 
741 	if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
742 	    rvt_qp_sends_has_lkey(qp, lkey) ||
743 	    rvt_qp_acks_has_lkey(qp, lkey))
744 		lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
745 check_lwqe:
746 	spin_unlock(&qp->s_lock);
747 	spin_unlock(&qp->s_hlock);
748 	spin_unlock_irq(&qp->r_lock);
749 	if (lastwqe) {
750 		struct ib_event ev;
751 
752 		ev.device = qp->ibqp.device;
753 		ev.element.qp = &qp->ibqp;
754 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
755 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
756 	}
757 }
758 
759 /**
760  * rvt_remove_qp - remove qp form table
761  * @rdi: rvt dev struct
762  * @qp: qp to remove
763  *
764  * Remove the QP from the table so it can't be found asynchronously by
765  * the receive routine.
766  */
767 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
768 {
769 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
770 	u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
771 	unsigned long flags;
772 	int removed = 1;
773 
774 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
775 
776 	if (rcu_dereference_protected(rvp->qp[0],
777 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
778 		RCU_INIT_POINTER(rvp->qp[0], NULL);
779 	} else if (rcu_dereference_protected(rvp->qp[1],
780 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
781 		RCU_INIT_POINTER(rvp->qp[1], NULL);
782 	} else {
783 		struct rvt_qp *q;
784 		struct rvt_qp __rcu **qpp;
785 
786 		removed = 0;
787 		qpp = &rdi->qp_dev->qp_table[n];
788 		for (; (q = rcu_dereference_protected(*qpp,
789 			lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
790 			qpp = &q->next) {
791 			if (q == qp) {
792 				RCU_INIT_POINTER(*qpp,
793 				     rcu_dereference_protected(qp->next,
794 				     lockdep_is_held(&rdi->qp_dev->qpt_lock)));
795 				removed = 1;
796 				trace_rvt_qpremove(qp, n);
797 				break;
798 			}
799 		}
800 	}
801 
802 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
803 	if (removed) {
804 		synchronize_rcu();
805 		rvt_put_qp(qp);
806 	}
807 }
808 
809 /**
810  * rvt_init_qp - initialize the QP state to the reset state
811  * @qp: the QP to init or reinit
812  * @type: the QP type
813  *
814  * This function is called from both rvt_create_qp() and
815  * rvt_reset_qp().   The difference is that the reset
816  * patch the necessary locks to protect against concurent
817  * access.
818  */
819 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
820 			enum ib_qp_type type)
821 {
822 	qp->remote_qpn = 0;
823 	qp->qkey = 0;
824 	qp->qp_access_flags = 0;
825 	qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
826 	qp->s_hdrwords = 0;
827 	qp->s_wqe = NULL;
828 	qp->s_draining = 0;
829 	qp->s_next_psn = 0;
830 	qp->s_last_psn = 0;
831 	qp->s_sending_psn = 0;
832 	qp->s_sending_hpsn = 0;
833 	qp->s_psn = 0;
834 	qp->r_psn = 0;
835 	qp->r_msn = 0;
836 	if (type == IB_QPT_RC) {
837 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
838 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
839 	} else {
840 		qp->s_state = IB_OPCODE_UC_SEND_LAST;
841 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
842 	}
843 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
844 	qp->r_nak_state = 0;
845 	qp->r_aflags = 0;
846 	qp->r_flags = 0;
847 	qp->s_head = 0;
848 	qp->s_tail = 0;
849 	qp->s_cur = 0;
850 	qp->s_acked = 0;
851 	qp->s_last = 0;
852 	qp->s_ssn = 1;
853 	qp->s_lsn = 0;
854 	qp->s_mig_state = IB_MIG_MIGRATED;
855 	qp->r_head_ack_queue = 0;
856 	qp->s_tail_ack_queue = 0;
857 	qp->s_num_rd_atomic = 0;
858 	if (qp->r_rq.wq) {
859 		qp->r_rq.wq->head = 0;
860 		qp->r_rq.wq->tail = 0;
861 	}
862 	qp->r_sge.num_sge = 0;
863 	atomic_set(&qp->s_reserved_used, 0);
864 }
865 
866 /**
867  * rvt_reset_qp - initialize the QP state to the reset state
868  * @qp: the QP to reset
869  * @type: the QP type
870  *
871  * r_lock, s_hlock, and s_lock are required to be held by the caller
872  */
873 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
874 			 enum ib_qp_type type)
875 	__must_hold(&qp->s_lock)
876 	__must_hold(&qp->s_hlock)
877 	__must_hold(&qp->r_lock)
878 {
879 	lockdep_assert_held(&qp->r_lock);
880 	lockdep_assert_held(&qp->s_hlock);
881 	lockdep_assert_held(&qp->s_lock);
882 	if (qp->state != IB_QPS_RESET) {
883 		qp->state = IB_QPS_RESET;
884 
885 		/* Let drivers flush their waitlist */
886 		rdi->driver_f.flush_qp_waiters(qp);
887 		rvt_stop_rc_timers(qp);
888 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
889 		spin_unlock(&qp->s_lock);
890 		spin_unlock(&qp->s_hlock);
891 		spin_unlock_irq(&qp->r_lock);
892 
893 		/* Stop the send queue and the retry timer */
894 		rdi->driver_f.stop_send_queue(qp);
895 		rvt_del_timers_sync(qp);
896 		/* Wait for things to stop */
897 		rdi->driver_f.quiesce_qp(qp);
898 
899 		/* take qp out the hash and wait for it to be unused */
900 		rvt_remove_qp(rdi, qp);
901 
902 		/* grab the lock b/c it was locked at call time */
903 		spin_lock_irq(&qp->r_lock);
904 		spin_lock(&qp->s_hlock);
905 		spin_lock(&qp->s_lock);
906 
907 		rvt_clear_mr_refs(qp, 1);
908 		/*
909 		 * Let the driver do any tear down or re-init it needs to for
910 		 * a qp that has been reset
911 		 */
912 		rdi->driver_f.notify_qp_reset(qp);
913 	}
914 	rvt_init_qp(rdi, qp, type);
915 	lockdep_assert_held(&qp->r_lock);
916 	lockdep_assert_held(&qp->s_hlock);
917 	lockdep_assert_held(&qp->s_lock);
918 }
919 
920 /** rvt_free_qpn - Free a qpn from the bit map
921  * @qpt: QP table
922  * @qpn: queue pair number to free
923  */
924 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
925 {
926 	struct rvt_qpn_map *map;
927 
928 	map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
929 	if (map->page)
930 		clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
931 }
932 
933 /**
934  * rvt_create_qp - create a queue pair for a device
935  * @ibpd: the protection domain who's device we create the queue pair for
936  * @init_attr: the attributes of the queue pair
937  * @udata: user data for libibverbs.so
938  *
939  * Queue pair creation is mostly an rvt issue. However, drivers have their own
940  * unique idea of what queue pair numbers mean. For instance there is a reserved
941  * range for PSM.
942  *
943  * Return: the queue pair on success, otherwise returns an errno.
944  *
945  * Called by the ib_create_qp() core verbs function.
946  */
947 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
948 			    struct ib_qp_init_attr *init_attr,
949 			    struct ib_udata *udata)
950 {
951 	struct rvt_qp *qp;
952 	int err;
953 	struct rvt_swqe *swq = NULL;
954 	size_t sz;
955 	size_t sg_list_sz;
956 	struct ib_qp *ret = ERR_PTR(-ENOMEM);
957 	struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
958 	void *priv = NULL;
959 	size_t sqsize;
960 
961 	if (!rdi)
962 		return ERR_PTR(-EINVAL);
963 
964 	if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
965 	    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
966 	    init_attr->create_flags)
967 		return ERR_PTR(-EINVAL);
968 
969 	/* Check receive queue parameters if no SRQ is specified. */
970 	if (!init_attr->srq) {
971 		if (init_attr->cap.max_recv_sge >
972 		    rdi->dparms.props.max_recv_sge ||
973 		    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
974 			return ERR_PTR(-EINVAL);
975 
976 		if (init_attr->cap.max_send_sge +
977 		    init_attr->cap.max_send_wr +
978 		    init_attr->cap.max_recv_sge +
979 		    init_attr->cap.max_recv_wr == 0)
980 			return ERR_PTR(-EINVAL);
981 	}
982 	sqsize =
983 		init_attr->cap.max_send_wr + 1 +
984 		rdi->dparms.reserved_operations;
985 	switch (init_attr->qp_type) {
986 	case IB_QPT_SMI:
987 	case IB_QPT_GSI:
988 		if (init_attr->port_num == 0 ||
989 		    init_attr->port_num > ibpd->device->phys_port_cnt)
990 			return ERR_PTR(-EINVAL);
991 		/* fall through */
992 	case IB_QPT_UC:
993 	case IB_QPT_RC:
994 	case IB_QPT_UD:
995 		sz = sizeof(struct rvt_sge) *
996 			init_attr->cap.max_send_sge +
997 			sizeof(struct rvt_swqe);
998 		swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
999 		if (!swq)
1000 			return ERR_PTR(-ENOMEM);
1001 
1002 		sz = sizeof(*qp);
1003 		sg_list_sz = 0;
1004 		if (init_attr->srq) {
1005 			struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1006 
1007 			if (srq->rq.max_sge > 1)
1008 				sg_list_sz = sizeof(*qp->r_sg_list) *
1009 					(srq->rq.max_sge - 1);
1010 		} else if (init_attr->cap.max_recv_sge > 1)
1011 			sg_list_sz = sizeof(*qp->r_sg_list) *
1012 				(init_attr->cap.max_recv_sge - 1);
1013 		qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1014 				  rdi->dparms.node);
1015 		if (!qp)
1016 			goto bail_swq;
1017 
1018 		RCU_INIT_POINTER(qp->next, NULL);
1019 		if (init_attr->qp_type == IB_QPT_RC) {
1020 			qp->s_ack_queue =
1021 				kcalloc_node(rvt_max_atomic(rdi),
1022 					     sizeof(*qp->s_ack_queue),
1023 					     GFP_KERNEL,
1024 					     rdi->dparms.node);
1025 			if (!qp->s_ack_queue)
1026 				goto bail_qp;
1027 		}
1028 		/* initialize timers needed for rc qp */
1029 		timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1030 		hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1031 			     HRTIMER_MODE_REL);
1032 		qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1033 
1034 		/*
1035 		 * Driver needs to set up it's private QP structure and do any
1036 		 * initialization that is needed.
1037 		 */
1038 		priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1039 		if (IS_ERR(priv)) {
1040 			ret = priv;
1041 			goto bail_qp;
1042 		}
1043 		qp->priv = priv;
1044 		qp->timeout_jiffies =
1045 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1046 				1000UL);
1047 		if (init_attr->srq) {
1048 			sz = 0;
1049 		} else {
1050 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1051 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1052 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1053 				sizeof(struct rvt_rwqe);
1054 			if (udata)
1055 				qp->r_rq.wq = vmalloc_user(
1056 						sizeof(struct rvt_rwq) +
1057 						qp->r_rq.size * sz);
1058 			else
1059 				qp->r_rq.wq = vzalloc_node(
1060 						sizeof(struct rvt_rwq) +
1061 						qp->r_rq.size * sz,
1062 						rdi->dparms.node);
1063 			if (!qp->r_rq.wq)
1064 				goto bail_driver_priv;
1065 		}
1066 
1067 		/*
1068 		 * ib_create_qp() will initialize qp->ibqp
1069 		 * except for qp->ibqp.qp_num.
1070 		 */
1071 		spin_lock_init(&qp->r_lock);
1072 		spin_lock_init(&qp->s_hlock);
1073 		spin_lock_init(&qp->s_lock);
1074 		spin_lock_init(&qp->r_rq.lock);
1075 		atomic_set(&qp->refcount, 0);
1076 		atomic_set(&qp->local_ops_pending, 0);
1077 		init_waitqueue_head(&qp->wait);
1078 		INIT_LIST_HEAD(&qp->rspwait);
1079 		qp->state = IB_QPS_RESET;
1080 		qp->s_wq = swq;
1081 		qp->s_size = sqsize;
1082 		qp->s_avail = init_attr->cap.max_send_wr;
1083 		qp->s_max_sge = init_attr->cap.max_send_sge;
1084 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1085 			qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1086 
1087 		err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1088 				init_attr->qp_type,
1089 				init_attr->port_num);
1090 		if (err < 0) {
1091 			ret = ERR_PTR(err);
1092 			goto bail_rq_wq;
1093 		}
1094 		qp->ibqp.qp_num = err;
1095 		qp->port_num = init_attr->port_num;
1096 		rvt_init_qp(rdi, qp, init_attr->qp_type);
1097 		if (rdi->driver_f.qp_priv_init) {
1098 			err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1099 			if (err) {
1100 				ret = ERR_PTR(err);
1101 				goto bail_rq_wq;
1102 			}
1103 		}
1104 		break;
1105 
1106 	default:
1107 		/* Don't support raw QPs */
1108 		return ERR_PTR(-EINVAL);
1109 	}
1110 
1111 	init_attr->cap.max_inline_data = 0;
1112 
1113 	/*
1114 	 * Return the address of the RWQ as the offset to mmap.
1115 	 * See rvt_mmap() for details.
1116 	 */
1117 	if (udata && udata->outlen >= sizeof(__u64)) {
1118 		if (!qp->r_rq.wq) {
1119 			__u64 offset = 0;
1120 
1121 			err = ib_copy_to_udata(udata, &offset,
1122 					       sizeof(offset));
1123 			if (err) {
1124 				ret = ERR_PTR(err);
1125 				goto bail_qpn;
1126 			}
1127 		} else {
1128 			u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1129 
1130 			qp->ip = rvt_create_mmap_info(rdi, s,
1131 						      ibpd->uobject->context,
1132 						      qp->r_rq.wq);
1133 			if (!qp->ip) {
1134 				ret = ERR_PTR(-ENOMEM);
1135 				goto bail_qpn;
1136 			}
1137 
1138 			err = ib_copy_to_udata(udata, &qp->ip->offset,
1139 					       sizeof(qp->ip->offset));
1140 			if (err) {
1141 				ret = ERR_PTR(err);
1142 				goto bail_ip;
1143 			}
1144 		}
1145 		qp->pid = current->pid;
1146 	}
1147 
1148 	spin_lock(&rdi->n_qps_lock);
1149 	if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1150 		spin_unlock(&rdi->n_qps_lock);
1151 		ret = ERR_PTR(-ENOMEM);
1152 		goto bail_ip;
1153 	}
1154 
1155 	rdi->n_qps_allocated++;
1156 	/*
1157 	 * Maintain a busy_jiffies variable that will be added to the timeout
1158 	 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1159 	 * is scaled by the number of rc qps created for the device to reduce
1160 	 * the number of timeouts occurring when there is a large number of
1161 	 * qps. busy_jiffies is incremented every rc qp scaling interval.
1162 	 * The scaling interval is selected based on extensive performance
1163 	 * evaluation of targeted workloads.
1164 	 */
1165 	if (init_attr->qp_type == IB_QPT_RC) {
1166 		rdi->n_rc_qps++;
1167 		rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1168 	}
1169 	spin_unlock(&rdi->n_qps_lock);
1170 
1171 	if (qp->ip) {
1172 		spin_lock_irq(&rdi->pending_lock);
1173 		list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1174 		spin_unlock_irq(&rdi->pending_lock);
1175 	}
1176 
1177 	ret = &qp->ibqp;
1178 
1179 	/*
1180 	 * We have our QP and its good, now keep track of what types of opcodes
1181 	 * can be processed on this QP. We do this by keeping track of what the
1182 	 * 3 high order bits of the opcode are.
1183 	 */
1184 	switch (init_attr->qp_type) {
1185 	case IB_QPT_SMI:
1186 	case IB_QPT_GSI:
1187 	case IB_QPT_UD:
1188 		qp->allowed_ops = IB_OPCODE_UD;
1189 		break;
1190 	case IB_QPT_RC:
1191 		qp->allowed_ops = IB_OPCODE_RC;
1192 		break;
1193 	case IB_QPT_UC:
1194 		qp->allowed_ops = IB_OPCODE_UC;
1195 		break;
1196 	default:
1197 		ret = ERR_PTR(-EINVAL);
1198 		goto bail_ip;
1199 	}
1200 
1201 	return ret;
1202 
1203 bail_ip:
1204 	if (qp->ip)
1205 		kref_put(&qp->ip->ref, rvt_release_mmap_info);
1206 
1207 bail_qpn:
1208 	rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1209 
1210 bail_rq_wq:
1211 	if (!qp->ip)
1212 		vfree(qp->r_rq.wq);
1213 
1214 bail_driver_priv:
1215 	rdi->driver_f.qp_priv_free(rdi, qp);
1216 
1217 bail_qp:
1218 	kfree(qp->s_ack_queue);
1219 	kfree(qp);
1220 
1221 bail_swq:
1222 	vfree(swq);
1223 
1224 	return ret;
1225 }
1226 
1227 /**
1228  * rvt_error_qp - put a QP into the error state
1229  * @qp: the QP to put into the error state
1230  * @err: the receive completion error to signal if a RWQE is active
1231  *
1232  * Flushes both send and receive work queues.
1233  *
1234  * Return: true if last WQE event should be generated.
1235  * The QP r_lock and s_lock should be held and interrupts disabled.
1236  * If we are already in error state, just return.
1237  */
1238 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1239 {
1240 	struct ib_wc wc;
1241 	int ret = 0;
1242 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1243 
1244 	lockdep_assert_held(&qp->r_lock);
1245 	lockdep_assert_held(&qp->s_lock);
1246 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1247 		goto bail;
1248 
1249 	qp->state = IB_QPS_ERR;
1250 
1251 	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1252 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1253 		del_timer(&qp->s_timer);
1254 	}
1255 
1256 	if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1257 		qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1258 
1259 	rdi->driver_f.notify_error_qp(qp);
1260 
1261 	/* Schedule the sending tasklet to drain the send work queue. */
1262 	if (READ_ONCE(qp->s_last) != qp->s_head)
1263 		rdi->driver_f.schedule_send(qp);
1264 
1265 	rvt_clear_mr_refs(qp, 0);
1266 
1267 	memset(&wc, 0, sizeof(wc));
1268 	wc.qp = &qp->ibqp;
1269 	wc.opcode = IB_WC_RECV;
1270 
1271 	if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1272 		wc.wr_id = qp->r_wr_id;
1273 		wc.status = err;
1274 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1275 	}
1276 	wc.status = IB_WC_WR_FLUSH_ERR;
1277 
1278 	if (qp->r_rq.wq) {
1279 		struct rvt_rwq *wq;
1280 		u32 head;
1281 		u32 tail;
1282 
1283 		spin_lock(&qp->r_rq.lock);
1284 
1285 		/* sanity check pointers before trusting them */
1286 		wq = qp->r_rq.wq;
1287 		head = wq->head;
1288 		if (head >= qp->r_rq.size)
1289 			head = 0;
1290 		tail = wq->tail;
1291 		if (tail >= qp->r_rq.size)
1292 			tail = 0;
1293 		while (tail != head) {
1294 			wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1295 			if (++tail >= qp->r_rq.size)
1296 				tail = 0;
1297 			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1298 		}
1299 		wq->tail = tail;
1300 
1301 		spin_unlock(&qp->r_rq.lock);
1302 	} else if (qp->ibqp.event_handler) {
1303 		ret = 1;
1304 	}
1305 
1306 bail:
1307 	return ret;
1308 }
1309 EXPORT_SYMBOL(rvt_error_qp);
1310 
1311 /*
1312  * Put the QP into the hash table.
1313  * The hash table holds a reference to the QP.
1314  */
1315 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1316 {
1317 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1318 	unsigned long flags;
1319 
1320 	rvt_get_qp(qp);
1321 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1322 
1323 	if (qp->ibqp.qp_num <= 1) {
1324 		rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1325 	} else {
1326 		u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1327 
1328 		qp->next = rdi->qp_dev->qp_table[n];
1329 		rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1330 		trace_rvt_qpinsert(qp, n);
1331 	}
1332 
1333 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1334 }
1335 
1336 /**
1337  * rvt_modify_qp - modify the attributes of a queue pair
1338  * @ibqp: the queue pair who's attributes we're modifying
1339  * @attr: the new attributes
1340  * @attr_mask: the mask of attributes to modify
1341  * @udata: user data for libibverbs.so
1342  *
1343  * Return: 0 on success, otherwise returns an errno.
1344  */
1345 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1346 		  int attr_mask, struct ib_udata *udata)
1347 {
1348 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1349 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1350 	enum ib_qp_state cur_state, new_state;
1351 	struct ib_event ev;
1352 	int lastwqe = 0;
1353 	int mig = 0;
1354 	int pmtu = 0; /* for gcc warning only */
1355 	int opa_ah;
1356 
1357 	spin_lock_irq(&qp->r_lock);
1358 	spin_lock(&qp->s_hlock);
1359 	spin_lock(&qp->s_lock);
1360 
1361 	cur_state = attr_mask & IB_QP_CUR_STATE ?
1362 		attr->cur_qp_state : qp->state;
1363 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1364 	opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1365 
1366 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1367 				attr_mask))
1368 		goto inval;
1369 
1370 	if (rdi->driver_f.check_modify_qp &&
1371 	    rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1372 		goto inval;
1373 
1374 	if (attr_mask & IB_QP_AV) {
1375 		if (opa_ah) {
1376 			if (rdma_ah_get_dlid(&attr->ah_attr) >=
1377 				opa_get_mcast_base(OPA_MCAST_NR))
1378 				goto inval;
1379 		} else {
1380 			if (rdma_ah_get_dlid(&attr->ah_attr) >=
1381 				be16_to_cpu(IB_MULTICAST_LID_BASE))
1382 				goto inval;
1383 		}
1384 
1385 		if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1386 			goto inval;
1387 	}
1388 
1389 	if (attr_mask & IB_QP_ALT_PATH) {
1390 		if (opa_ah) {
1391 			if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1392 				opa_get_mcast_base(OPA_MCAST_NR))
1393 				goto inval;
1394 		} else {
1395 			if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1396 				be16_to_cpu(IB_MULTICAST_LID_BASE))
1397 				goto inval;
1398 		}
1399 
1400 		if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1401 			goto inval;
1402 		if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1403 			goto inval;
1404 	}
1405 
1406 	if (attr_mask & IB_QP_PKEY_INDEX)
1407 		if (attr->pkey_index >= rvt_get_npkeys(rdi))
1408 			goto inval;
1409 
1410 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
1411 		if (attr->min_rnr_timer > 31)
1412 			goto inval;
1413 
1414 	if (attr_mask & IB_QP_PORT)
1415 		if (qp->ibqp.qp_type == IB_QPT_SMI ||
1416 		    qp->ibqp.qp_type == IB_QPT_GSI ||
1417 		    attr->port_num == 0 ||
1418 		    attr->port_num > ibqp->device->phys_port_cnt)
1419 			goto inval;
1420 
1421 	if (attr_mask & IB_QP_DEST_QPN)
1422 		if (attr->dest_qp_num > RVT_QPN_MASK)
1423 			goto inval;
1424 
1425 	if (attr_mask & IB_QP_RETRY_CNT)
1426 		if (attr->retry_cnt > 7)
1427 			goto inval;
1428 
1429 	if (attr_mask & IB_QP_RNR_RETRY)
1430 		if (attr->rnr_retry > 7)
1431 			goto inval;
1432 
1433 	/*
1434 	 * Don't allow invalid path_mtu values.  OK to set greater
1435 	 * than the active mtu (or even the max_cap, if we have tuned
1436 	 * that to a small mtu.  We'll set qp->path_mtu
1437 	 * to the lesser of requested attribute mtu and active,
1438 	 * for packetizing messages.
1439 	 * Note that the QP port has to be set in INIT and MTU in RTR.
1440 	 */
1441 	if (attr_mask & IB_QP_PATH_MTU) {
1442 		pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1443 		if (pmtu < 0)
1444 			goto inval;
1445 	}
1446 
1447 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1448 		if (attr->path_mig_state == IB_MIG_REARM) {
1449 			if (qp->s_mig_state == IB_MIG_ARMED)
1450 				goto inval;
1451 			if (new_state != IB_QPS_RTS)
1452 				goto inval;
1453 		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1454 			if (qp->s_mig_state == IB_MIG_REARM)
1455 				goto inval;
1456 			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1457 				goto inval;
1458 			if (qp->s_mig_state == IB_MIG_ARMED)
1459 				mig = 1;
1460 		} else {
1461 			goto inval;
1462 		}
1463 	}
1464 
1465 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1466 		if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1467 			goto inval;
1468 
1469 	switch (new_state) {
1470 	case IB_QPS_RESET:
1471 		if (qp->state != IB_QPS_RESET)
1472 			rvt_reset_qp(rdi, qp, ibqp->qp_type);
1473 		break;
1474 
1475 	case IB_QPS_RTR:
1476 		/* Allow event to re-trigger if QP set to RTR more than once */
1477 		qp->r_flags &= ~RVT_R_COMM_EST;
1478 		qp->state = new_state;
1479 		break;
1480 
1481 	case IB_QPS_SQD:
1482 		qp->s_draining = qp->s_last != qp->s_cur;
1483 		qp->state = new_state;
1484 		break;
1485 
1486 	case IB_QPS_SQE:
1487 		if (qp->ibqp.qp_type == IB_QPT_RC)
1488 			goto inval;
1489 		qp->state = new_state;
1490 		break;
1491 
1492 	case IB_QPS_ERR:
1493 		lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1494 		break;
1495 
1496 	default:
1497 		qp->state = new_state;
1498 		break;
1499 	}
1500 
1501 	if (attr_mask & IB_QP_PKEY_INDEX)
1502 		qp->s_pkey_index = attr->pkey_index;
1503 
1504 	if (attr_mask & IB_QP_PORT)
1505 		qp->port_num = attr->port_num;
1506 
1507 	if (attr_mask & IB_QP_DEST_QPN)
1508 		qp->remote_qpn = attr->dest_qp_num;
1509 
1510 	if (attr_mask & IB_QP_SQ_PSN) {
1511 		qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1512 		qp->s_psn = qp->s_next_psn;
1513 		qp->s_sending_psn = qp->s_next_psn;
1514 		qp->s_last_psn = qp->s_next_psn - 1;
1515 		qp->s_sending_hpsn = qp->s_last_psn;
1516 	}
1517 
1518 	if (attr_mask & IB_QP_RQ_PSN)
1519 		qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1520 
1521 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1522 		qp->qp_access_flags = attr->qp_access_flags;
1523 
1524 	if (attr_mask & IB_QP_AV) {
1525 		rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1526 		qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1527 		qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1528 	}
1529 
1530 	if (attr_mask & IB_QP_ALT_PATH) {
1531 		rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1532 		qp->s_alt_pkey_index = attr->alt_pkey_index;
1533 	}
1534 
1535 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1536 		qp->s_mig_state = attr->path_mig_state;
1537 		if (mig) {
1538 			qp->remote_ah_attr = qp->alt_ah_attr;
1539 			qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1540 			qp->s_pkey_index = qp->s_alt_pkey_index;
1541 		}
1542 	}
1543 
1544 	if (attr_mask & IB_QP_PATH_MTU) {
1545 		qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1546 		qp->log_pmtu = ilog2(qp->pmtu);
1547 	}
1548 
1549 	if (attr_mask & IB_QP_RETRY_CNT) {
1550 		qp->s_retry_cnt = attr->retry_cnt;
1551 		qp->s_retry = attr->retry_cnt;
1552 	}
1553 
1554 	if (attr_mask & IB_QP_RNR_RETRY) {
1555 		qp->s_rnr_retry_cnt = attr->rnr_retry;
1556 		qp->s_rnr_retry = attr->rnr_retry;
1557 	}
1558 
1559 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
1560 		qp->r_min_rnr_timer = attr->min_rnr_timer;
1561 
1562 	if (attr_mask & IB_QP_TIMEOUT) {
1563 		qp->timeout = attr->timeout;
1564 		qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1565 	}
1566 
1567 	if (attr_mask & IB_QP_QKEY)
1568 		qp->qkey = attr->qkey;
1569 
1570 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1571 		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1572 
1573 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1574 		qp->s_max_rd_atomic = attr->max_rd_atomic;
1575 
1576 	if (rdi->driver_f.modify_qp)
1577 		rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1578 
1579 	spin_unlock(&qp->s_lock);
1580 	spin_unlock(&qp->s_hlock);
1581 	spin_unlock_irq(&qp->r_lock);
1582 
1583 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1584 		rvt_insert_qp(rdi, qp);
1585 
1586 	if (lastwqe) {
1587 		ev.device = qp->ibqp.device;
1588 		ev.element.qp = &qp->ibqp;
1589 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1590 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1591 	}
1592 	if (mig) {
1593 		ev.device = qp->ibqp.device;
1594 		ev.element.qp = &qp->ibqp;
1595 		ev.event = IB_EVENT_PATH_MIG;
1596 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1597 	}
1598 	return 0;
1599 
1600 inval:
1601 	spin_unlock(&qp->s_lock);
1602 	spin_unlock(&qp->s_hlock);
1603 	spin_unlock_irq(&qp->r_lock);
1604 	return -EINVAL;
1605 }
1606 
1607 /**
1608  * rvt_destroy_qp - destroy a queue pair
1609  * @ibqp: the queue pair to destroy
1610  *
1611  * Note that this can be called while the QP is actively sending or
1612  * receiving!
1613  *
1614  * Return: 0 on success.
1615  */
1616 int rvt_destroy_qp(struct ib_qp *ibqp)
1617 {
1618 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1619 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1620 
1621 	spin_lock_irq(&qp->r_lock);
1622 	spin_lock(&qp->s_hlock);
1623 	spin_lock(&qp->s_lock);
1624 	rvt_reset_qp(rdi, qp, ibqp->qp_type);
1625 	spin_unlock(&qp->s_lock);
1626 	spin_unlock(&qp->s_hlock);
1627 	spin_unlock_irq(&qp->r_lock);
1628 
1629 	wait_event(qp->wait, !atomic_read(&qp->refcount));
1630 	/* qpn is now available for use again */
1631 	rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1632 
1633 	spin_lock(&rdi->n_qps_lock);
1634 	rdi->n_qps_allocated--;
1635 	if (qp->ibqp.qp_type == IB_QPT_RC) {
1636 		rdi->n_rc_qps--;
1637 		rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1638 	}
1639 	spin_unlock(&rdi->n_qps_lock);
1640 
1641 	if (qp->ip)
1642 		kref_put(&qp->ip->ref, rvt_release_mmap_info);
1643 	else
1644 		vfree(qp->r_rq.wq);
1645 	vfree(qp->s_wq);
1646 	rdi->driver_f.qp_priv_free(rdi, qp);
1647 	kfree(qp->s_ack_queue);
1648 	rdma_destroy_ah_attr(&qp->remote_ah_attr);
1649 	rdma_destroy_ah_attr(&qp->alt_ah_attr);
1650 	kfree(qp);
1651 	return 0;
1652 }
1653 
1654 /**
1655  * rvt_query_qp - query an ipbq
1656  * @ibqp: IB qp to query
1657  * @attr: attr struct to fill in
1658  * @attr_mask: attr mask ignored
1659  * @init_attr: struct to fill in
1660  *
1661  * Return: always 0
1662  */
1663 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1664 		 int attr_mask, struct ib_qp_init_attr *init_attr)
1665 {
1666 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1667 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1668 
1669 	attr->qp_state = qp->state;
1670 	attr->cur_qp_state = attr->qp_state;
1671 	attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1672 	attr->path_mig_state = qp->s_mig_state;
1673 	attr->qkey = qp->qkey;
1674 	attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1675 	attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1676 	attr->dest_qp_num = qp->remote_qpn;
1677 	attr->qp_access_flags = qp->qp_access_flags;
1678 	attr->cap.max_send_wr = qp->s_size - 1 -
1679 		rdi->dparms.reserved_operations;
1680 	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1681 	attr->cap.max_send_sge = qp->s_max_sge;
1682 	attr->cap.max_recv_sge = qp->r_rq.max_sge;
1683 	attr->cap.max_inline_data = 0;
1684 	attr->ah_attr = qp->remote_ah_attr;
1685 	attr->alt_ah_attr = qp->alt_ah_attr;
1686 	attr->pkey_index = qp->s_pkey_index;
1687 	attr->alt_pkey_index = qp->s_alt_pkey_index;
1688 	attr->en_sqd_async_notify = 0;
1689 	attr->sq_draining = qp->s_draining;
1690 	attr->max_rd_atomic = qp->s_max_rd_atomic;
1691 	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1692 	attr->min_rnr_timer = qp->r_min_rnr_timer;
1693 	attr->port_num = qp->port_num;
1694 	attr->timeout = qp->timeout;
1695 	attr->retry_cnt = qp->s_retry_cnt;
1696 	attr->rnr_retry = qp->s_rnr_retry_cnt;
1697 	attr->alt_port_num =
1698 		rdma_ah_get_port_num(&qp->alt_ah_attr);
1699 	attr->alt_timeout = qp->alt_timeout;
1700 
1701 	init_attr->event_handler = qp->ibqp.event_handler;
1702 	init_attr->qp_context = qp->ibqp.qp_context;
1703 	init_attr->send_cq = qp->ibqp.send_cq;
1704 	init_attr->recv_cq = qp->ibqp.recv_cq;
1705 	init_attr->srq = qp->ibqp.srq;
1706 	init_attr->cap = attr->cap;
1707 	if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1708 		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1709 	else
1710 		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1711 	init_attr->qp_type = qp->ibqp.qp_type;
1712 	init_attr->port_num = qp->port_num;
1713 	return 0;
1714 }
1715 
1716 /**
1717  * rvt_post_receive - post a receive on a QP
1718  * @ibqp: the QP to post the receive on
1719  * @wr: the WR to post
1720  * @bad_wr: the first bad WR is put here
1721  *
1722  * This may be called from interrupt context.
1723  *
1724  * Return: 0 on success otherwise errno
1725  */
1726 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1727 		  const struct ib_recv_wr **bad_wr)
1728 {
1729 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1730 	struct rvt_rwq *wq = qp->r_rq.wq;
1731 	unsigned long flags;
1732 	int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1733 				!qp->ibqp.srq;
1734 
1735 	/* Check that state is OK to post receive. */
1736 	if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1737 		*bad_wr = wr;
1738 		return -EINVAL;
1739 	}
1740 
1741 	for (; wr; wr = wr->next) {
1742 		struct rvt_rwqe *wqe;
1743 		u32 next;
1744 		int i;
1745 
1746 		if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1747 			*bad_wr = wr;
1748 			return -EINVAL;
1749 		}
1750 
1751 		spin_lock_irqsave(&qp->r_rq.lock, flags);
1752 		next = wq->head + 1;
1753 		if (next >= qp->r_rq.size)
1754 			next = 0;
1755 		if (next == wq->tail) {
1756 			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1757 			*bad_wr = wr;
1758 			return -ENOMEM;
1759 		}
1760 		if (unlikely(qp_err_flush)) {
1761 			struct ib_wc wc;
1762 
1763 			memset(&wc, 0, sizeof(wc));
1764 			wc.qp = &qp->ibqp;
1765 			wc.opcode = IB_WC_RECV;
1766 			wc.wr_id = wr->wr_id;
1767 			wc.status = IB_WC_WR_FLUSH_ERR;
1768 			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1769 		} else {
1770 			wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1771 			wqe->wr_id = wr->wr_id;
1772 			wqe->num_sge = wr->num_sge;
1773 			for (i = 0; i < wr->num_sge; i++)
1774 				wqe->sg_list[i] = wr->sg_list[i];
1775 			/*
1776 			 * Make sure queue entry is written
1777 			 * before the head index.
1778 			 */
1779 			smp_wmb();
1780 			wq->head = next;
1781 		}
1782 		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1783 	}
1784 	return 0;
1785 }
1786 
1787 /**
1788  * rvt_qp_valid_operation - validate post send wr request
1789  * @qp - the qp
1790  * @post-parms - the post send table for the driver
1791  * @wr - the work request
1792  *
1793  * The routine validates the operation based on the
1794  * validation table an returns the length of the operation
1795  * which can extend beyond the ib_send_bw.  Operation
1796  * dependent flags key atomic operation validation.
1797  *
1798  * There is an exception for UD qps that validates the pd and
1799  * overrides the length to include the additional UD specific
1800  * length.
1801  *
1802  * Returns a negative error or the length of the work request
1803  * for building the swqe.
1804  */
1805 static inline int rvt_qp_valid_operation(
1806 	struct rvt_qp *qp,
1807 	const struct rvt_operation_params *post_parms,
1808 	const struct ib_send_wr *wr)
1809 {
1810 	int len;
1811 
1812 	if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1813 		return -EINVAL;
1814 	if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1815 		return -EINVAL;
1816 	if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1817 	    ibpd_to_rvtpd(qp->ibqp.pd)->user)
1818 		return -EINVAL;
1819 	if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1820 	    (wr->num_sge == 0 ||
1821 	     wr->sg_list[0].length < sizeof(u64) ||
1822 	     wr->sg_list[0].addr & (sizeof(u64) - 1)))
1823 		return -EINVAL;
1824 	if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1825 	    !qp->s_max_rd_atomic)
1826 		return -EINVAL;
1827 	len = post_parms[wr->opcode].length;
1828 	/* UD specific */
1829 	if (qp->ibqp.qp_type != IB_QPT_UC &&
1830 	    qp->ibqp.qp_type != IB_QPT_RC) {
1831 		if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1832 			return -EINVAL;
1833 		len = sizeof(struct ib_ud_wr);
1834 	}
1835 	return len;
1836 }
1837 
1838 /**
1839  * rvt_qp_is_avail - determine queue capacity
1840  * @qp: the qp
1841  * @rdi: the rdmavt device
1842  * @reserved_op: is reserved operation
1843  *
1844  * This assumes the s_hlock is held but the s_last
1845  * qp variable is uncontrolled.
1846  *
1847  * For non reserved operations, the qp->s_avail
1848  * may be changed.
1849  *
1850  * The return value is zero or a -ENOMEM.
1851  */
1852 static inline int rvt_qp_is_avail(
1853 	struct rvt_qp *qp,
1854 	struct rvt_dev_info *rdi,
1855 	bool reserved_op)
1856 {
1857 	u32 slast;
1858 	u32 avail;
1859 	u32 reserved_used;
1860 
1861 	/* see rvt_qp_wqe_unreserve() */
1862 	smp_mb__before_atomic();
1863 	reserved_used = atomic_read(&qp->s_reserved_used);
1864 	if (unlikely(reserved_op)) {
1865 		/* see rvt_qp_wqe_unreserve() */
1866 		smp_mb__before_atomic();
1867 		if (reserved_used >= rdi->dparms.reserved_operations)
1868 			return -ENOMEM;
1869 		return 0;
1870 	}
1871 	/* non-reserved operations */
1872 	if (likely(qp->s_avail))
1873 		return 0;
1874 	slast = READ_ONCE(qp->s_last);
1875 	if (qp->s_head >= slast)
1876 		avail = qp->s_size - (qp->s_head - slast);
1877 	else
1878 		avail = slast - qp->s_head;
1879 
1880 	/* see rvt_qp_wqe_unreserve() */
1881 	smp_mb__before_atomic();
1882 	reserved_used = atomic_read(&qp->s_reserved_used);
1883 	avail =  avail - 1 -
1884 		(rdi->dparms.reserved_operations - reserved_used);
1885 	/* insure we don't assign a negative s_avail */
1886 	if ((s32)avail <= 0)
1887 		return -ENOMEM;
1888 	qp->s_avail = avail;
1889 	if (WARN_ON(qp->s_avail >
1890 		    (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1891 		rvt_pr_err(rdi,
1892 			   "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1893 			   qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1894 			   qp->s_head, qp->s_tail, qp->s_cur,
1895 			   qp->s_acked, qp->s_last);
1896 	return 0;
1897 }
1898 
1899 /**
1900  * rvt_post_one_wr - post one RC, UC, or UD send work request
1901  * @qp: the QP to post on
1902  * @wr: the work request to send
1903  */
1904 static int rvt_post_one_wr(struct rvt_qp *qp,
1905 			   const struct ib_send_wr *wr,
1906 			   bool *call_send)
1907 {
1908 	struct rvt_swqe *wqe;
1909 	u32 next;
1910 	int i;
1911 	int j;
1912 	int acc;
1913 	struct rvt_lkey_table *rkt;
1914 	struct rvt_pd *pd;
1915 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1916 	u8 log_pmtu;
1917 	int ret;
1918 	size_t cplen;
1919 	bool reserved_op;
1920 	int local_ops_delayed = 0;
1921 
1922 	BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1923 
1924 	/* IB spec says that num_sge == 0 is OK. */
1925 	if (unlikely(wr->num_sge > qp->s_max_sge))
1926 		return -EINVAL;
1927 
1928 	ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1929 	if (ret < 0)
1930 		return ret;
1931 	cplen = ret;
1932 
1933 	/*
1934 	 * Local operations include fast register and local invalidate.
1935 	 * Fast register needs to be processed immediately because the
1936 	 * registered lkey may be used by following work requests and the
1937 	 * lkey needs to be valid at the time those requests are posted.
1938 	 * Local invalidate can be processed immediately if fencing is
1939 	 * not required and no previous local invalidate ops are pending.
1940 	 * Signaled local operations that have been processed immediately
1941 	 * need to have requests with "completion only" flags set posted
1942 	 * to the send queue in order to generate completions.
1943 	 */
1944 	if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1945 		switch (wr->opcode) {
1946 		case IB_WR_REG_MR:
1947 			ret = rvt_fast_reg_mr(qp,
1948 					      reg_wr(wr)->mr,
1949 					      reg_wr(wr)->key,
1950 					      reg_wr(wr)->access);
1951 			if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1952 				return ret;
1953 			break;
1954 		case IB_WR_LOCAL_INV:
1955 			if ((wr->send_flags & IB_SEND_FENCE) ||
1956 			    atomic_read(&qp->local_ops_pending)) {
1957 				local_ops_delayed = 1;
1958 			} else {
1959 				ret = rvt_invalidate_rkey(
1960 					qp, wr->ex.invalidate_rkey);
1961 				if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1962 					return ret;
1963 			}
1964 			break;
1965 		default:
1966 			return -EINVAL;
1967 		}
1968 	}
1969 
1970 	reserved_op = rdi->post_parms[wr->opcode].flags &
1971 			RVT_OPERATION_USE_RESERVE;
1972 	/* check for avail */
1973 	ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1974 	if (ret)
1975 		return ret;
1976 	next = qp->s_head + 1;
1977 	if (next >= qp->s_size)
1978 		next = 0;
1979 
1980 	rkt = &rdi->lkey_table;
1981 	pd = ibpd_to_rvtpd(qp->ibqp.pd);
1982 	wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1983 
1984 	/* cplen has length from above */
1985 	memcpy(&wqe->wr, wr, cplen);
1986 
1987 	wqe->length = 0;
1988 	j = 0;
1989 	if (wr->num_sge) {
1990 		struct rvt_sge *last_sge = NULL;
1991 
1992 		acc = wr->opcode >= IB_WR_RDMA_READ ?
1993 			IB_ACCESS_LOCAL_WRITE : 0;
1994 		for (i = 0; i < wr->num_sge; i++) {
1995 			u32 length = wr->sg_list[i].length;
1996 
1997 			if (length == 0)
1998 				continue;
1999 			ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2000 					  &wr->sg_list[i], acc);
2001 			if (unlikely(ret < 0))
2002 				goto bail_inval_free;
2003 			wqe->length += length;
2004 			if (ret)
2005 				last_sge = &wqe->sg_list[j];
2006 			j += ret;
2007 		}
2008 		wqe->wr.num_sge = j;
2009 	}
2010 
2011 	/*
2012 	 * Calculate and set SWQE PSN values prior to handing it off
2013 	 * to the driver's check routine. This give the driver the
2014 	 * opportunity to adjust PSN values based on internal checks.
2015 	 */
2016 	log_pmtu = qp->log_pmtu;
2017 	if (qp->ibqp.qp_type != IB_QPT_UC &&
2018 	    qp->ibqp.qp_type != IB_QPT_RC) {
2019 		struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
2020 
2021 		log_pmtu = ah->log_pmtu;
2022 		atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2023 	}
2024 
2025 	if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2026 		if (local_ops_delayed)
2027 			atomic_inc(&qp->local_ops_pending);
2028 		else
2029 			wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2030 		wqe->ssn = 0;
2031 		wqe->psn = 0;
2032 		wqe->lpsn = 0;
2033 	} else {
2034 		wqe->ssn = qp->s_ssn++;
2035 		wqe->psn = qp->s_next_psn;
2036 		wqe->lpsn = wqe->psn +
2037 				(wqe->length ?
2038 					((wqe->length - 1) >> log_pmtu) :
2039 					0);
2040 	}
2041 
2042 	/* general part of wqe valid - allow for driver checks */
2043 	if (rdi->driver_f.setup_wqe) {
2044 		ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2045 		if (ret < 0)
2046 			goto bail_inval_free_ref;
2047 	}
2048 
2049 	if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2050 		qp->s_next_psn = wqe->lpsn + 1;
2051 
2052 	if (unlikely(reserved_op)) {
2053 		wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2054 		rvt_qp_wqe_reserve(qp, wqe);
2055 	} else {
2056 		wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2057 		qp->s_avail--;
2058 	}
2059 	trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2060 	smp_wmb(); /* see request builders */
2061 	qp->s_head = next;
2062 
2063 	return 0;
2064 
2065 bail_inval_free_ref:
2066 	if (qp->ibqp.qp_type != IB_QPT_UC &&
2067 	    qp->ibqp.qp_type != IB_QPT_RC)
2068 		atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2069 bail_inval_free:
2070 	/* release mr holds */
2071 	while (j) {
2072 		struct rvt_sge *sge = &wqe->sg_list[--j];
2073 
2074 		rvt_put_mr(sge->mr);
2075 	}
2076 	return ret;
2077 }
2078 
2079 /**
2080  * rvt_post_send - post a send on a QP
2081  * @ibqp: the QP to post the send on
2082  * @wr: the list of work requests to post
2083  * @bad_wr: the first bad WR is put here
2084  *
2085  * This may be called from interrupt context.
2086  *
2087  * Return: 0 on success else errno
2088  */
2089 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2090 		  const struct ib_send_wr **bad_wr)
2091 {
2092 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2093 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2094 	unsigned long flags = 0;
2095 	bool call_send;
2096 	unsigned nreq = 0;
2097 	int err = 0;
2098 
2099 	spin_lock_irqsave(&qp->s_hlock, flags);
2100 
2101 	/*
2102 	 * Ensure QP state is such that we can send. If not bail out early,
2103 	 * there is no need to do this every time we post a send.
2104 	 */
2105 	if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2106 		spin_unlock_irqrestore(&qp->s_hlock, flags);
2107 		return -EINVAL;
2108 	}
2109 
2110 	/*
2111 	 * If the send queue is empty, and we only have a single WR then just go
2112 	 * ahead and kick the send engine into gear. Otherwise we will always
2113 	 * just schedule the send to happen later.
2114 	 */
2115 	call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2116 
2117 	for (; wr; wr = wr->next) {
2118 		err = rvt_post_one_wr(qp, wr, &call_send);
2119 		if (unlikely(err)) {
2120 			*bad_wr = wr;
2121 			goto bail;
2122 		}
2123 		nreq++;
2124 	}
2125 bail:
2126 	spin_unlock_irqrestore(&qp->s_hlock, flags);
2127 	if (nreq) {
2128 		/*
2129 		 * Only call do_send if there is exactly one packet, and the
2130 		 * driver said it was ok.
2131 		 */
2132 		if (nreq == 1 && call_send)
2133 			rdi->driver_f.do_send(qp);
2134 		else
2135 			rdi->driver_f.schedule_send_no_lock(qp);
2136 	}
2137 	return err;
2138 }
2139 
2140 /**
2141  * rvt_post_srq_receive - post a receive on a shared receive queue
2142  * @ibsrq: the SRQ to post the receive on
2143  * @wr: the list of work requests to post
2144  * @bad_wr: A pointer to the first WR to cause a problem is put here
2145  *
2146  * This may be called from interrupt context.
2147  *
2148  * Return: 0 on success else errno
2149  */
2150 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2151 		      const struct ib_recv_wr **bad_wr)
2152 {
2153 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2154 	struct rvt_rwq *wq;
2155 	unsigned long flags;
2156 
2157 	for (; wr; wr = wr->next) {
2158 		struct rvt_rwqe *wqe;
2159 		u32 next;
2160 		int i;
2161 
2162 		if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2163 			*bad_wr = wr;
2164 			return -EINVAL;
2165 		}
2166 
2167 		spin_lock_irqsave(&srq->rq.lock, flags);
2168 		wq = srq->rq.wq;
2169 		next = wq->head + 1;
2170 		if (next >= srq->rq.size)
2171 			next = 0;
2172 		if (next == wq->tail) {
2173 			spin_unlock_irqrestore(&srq->rq.lock, flags);
2174 			*bad_wr = wr;
2175 			return -ENOMEM;
2176 		}
2177 
2178 		wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2179 		wqe->wr_id = wr->wr_id;
2180 		wqe->num_sge = wr->num_sge;
2181 		for (i = 0; i < wr->num_sge; i++)
2182 			wqe->sg_list[i] = wr->sg_list[i];
2183 		/* Make sure queue entry is written before the head index. */
2184 		smp_wmb();
2185 		wq->head = next;
2186 		spin_unlock_irqrestore(&srq->rq.lock, flags);
2187 	}
2188 	return 0;
2189 }
2190 
2191 /*
2192  * Validate a RWQE and fill in the SGE state.
2193  * Return 1 if OK.
2194  */
2195 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2196 {
2197 	int i, j, ret;
2198 	struct ib_wc wc;
2199 	struct rvt_lkey_table *rkt;
2200 	struct rvt_pd *pd;
2201 	struct rvt_sge_state *ss;
2202 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2203 
2204 	rkt = &rdi->lkey_table;
2205 	pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2206 	ss = &qp->r_sge;
2207 	ss->sg_list = qp->r_sg_list;
2208 	qp->r_len = 0;
2209 	for (i = j = 0; i < wqe->num_sge; i++) {
2210 		if (wqe->sg_list[i].length == 0)
2211 			continue;
2212 		/* Check LKEY */
2213 		ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2214 				  NULL, &wqe->sg_list[i],
2215 				  IB_ACCESS_LOCAL_WRITE);
2216 		if (unlikely(ret <= 0))
2217 			goto bad_lkey;
2218 		qp->r_len += wqe->sg_list[i].length;
2219 		j++;
2220 	}
2221 	ss->num_sge = j;
2222 	ss->total_len = qp->r_len;
2223 	return 1;
2224 
2225 bad_lkey:
2226 	while (j) {
2227 		struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2228 
2229 		rvt_put_mr(sge->mr);
2230 	}
2231 	ss->num_sge = 0;
2232 	memset(&wc, 0, sizeof(wc));
2233 	wc.wr_id = wqe->wr_id;
2234 	wc.status = IB_WC_LOC_PROT_ERR;
2235 	wc.opcode = IB_WC_RECV;
2236 	wc.qp = &qp->ibqp;
2237 	/* Signal solicited completion event. */
2238 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2239 	return 0;
2240 }
2241 
2242 /**
2243  * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2244  * @qp: the QP
2245  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2246  *
2247  * Return -1 if there is a local error, 0 if no RWQE is available,
2248  * otherwise return 1.
2249  *
2250  * Can be called from interrupt level.
2251  */
2252 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2253 {
2254 	unsigned long flags;
2255 	struct rvt_rq *rq;
2256 	struct rvt_rwq *wq;
2257 	struct rvt_srq *srq;
2258 	struct rvt_rwqe *wqe;
2259 	void (*handler)(struct ib_event *, void *);
2260 	u32 tail;
2261 	int ret;
2262 
2263 	if (qp->ibqp.srq) {
2264 		srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2265 		handler = srq->ibsrq.event_handler;
2266 		rq = &srq->rq;
2267 	} else {
2268 		srq = NULL;
2269 		handler = NULL;
2270 		rq = &qp->r_rq;
2271 	}
2272 
2273 	spin_lock_irqsave(&rq->lock, flags);
2274 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2275 		ret = 0;
2276 		goto unlock;
2277 	}
2278 
2279 	wq = rq->wq;
2280 	tail = wq->tail;
2281 	/* Validate tail before using it since it is user writable. */
2282 	if (tail >= rq->size)
2283 		tail = 0;
2284 	if (unlikely(tail == wq->head)) {
2285 		ret = 0;
2286 		goto unlock;
2287 	}
2288 	/* Make sure entry is read after head index is read. */
2289 	smp_rmb();
2290 	wqe = rvt_get_rwqe_ptr(rq, tail);
2291 	/*
2292 	 * Even though we update the tail index in memory, the verbs
2293 	 * consumer is not supposed to post more entries until a
2294 	 * completion is generated.
2295 	 */
2296 	if (++tail >= rq->size)
2297 		tail = 0;
2298 	wq->tail = tail;
2299 	if (!wr_id_only && !init_sge(qp, wqe)) {
2300 		ret = -1;
2301 		goto unlock;
2302 	}
2303 	qp->r_wr_id = wqe->wr_id;
2304 
2305 	ret = 1;
2306 	set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2307 	if (handler) {
2308 		u32 n;
2309 
2310 		/*
2311 		 * Validate head pointer value and compute
2312 		 * the number of remaining WQEs.
2313 		 */
2314 		n = wq->head;
2315 		if (n >= rq->size)
2316 			n = 0;
2317 		if (n < tail)
2318 			n += rq->size - tail;
2319 		else
2320 			n -= tail;
2321 		if (n < srq->limit) {
2322 			struct ib_event ev;
2323 
2324 			srq->limit = 0;
2325 			spin_unlock_irqrestore(&rq->lock, flags);
2326 			ev.device = qp->ibqp.device;
2327 			ev.element.srq = qp->ibqp.srq;
2328 			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2329 			handler(&ev, srq->ibsrq.srq_context);
2330 			goto bail;
2331 		}
2332 	}
2333 unlock:
2334 	spin_unlock_irqrestore(&rq->lock, flags);
2335 bail:
2336 	return ret;
2337 }
2338 EXPORT_SYMBOL(rvt_get_rwqe);
2339 
2340 /**
2341  * qp_comm_est - handle trap with QP established
2342  * @qp: the QP
2343  */
2344 void rvt_comm_est(struct rvt_qp *qp)
2345 {
2346 	qp->r_flags |= RVT_R_COMM_EST;
2347 	if (qp->ibqp.event_handler) {
2348 		struct ib_event ev;
2349 
2350 		ev.device = qp->ibqp.device;
2351 		ev.element.qp = &qp->ibqp;
2352 		ev.event = IB_EVENT_COMM_EST;
2353 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2354 	}
2355 }
2356 EXPORT_SYMBOL(rvt_comm_est);
2357 
2358 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2359 {
2360 	unsigned long flags;
2361 	int lastwqe;
2362 
2363 	spin_lock_irqsave(&qp->s_lock, flags);
2364 	lastwqe = rvt_error_qp(qp, err);
2365 	spin_unlock_irqrestore(&qp->s_lock, flags);
2366 
2367 	if (lastwqe) {
2368 		struct ib_event ev;
2369 
2370 		ev.device = qp->ibqp.device;
2371 		ev.element.qp = &qp->ibqp;
2372 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2373 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2374 	}
2375 }
2376 EXPORT_SYMBOL(rvt_rc_error);
2377 
2378 /*
2379  *  rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2380  *  @index - the index
2381  *  return usec from an index into ib_rvt_rnr_table
2382  */
2383 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2384 {
2385 	return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2386 }
2387 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2388 
2389 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2390 {
2391 	return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2392 				  IB_AETH_CREDIT_MASK];
2393 }
2394 
2395 /*
2396  *  rvt_add_retry_timer - add/start a retry timer
2397  *  @qp - the QP
2398  *  add a retry timer on the QP
2399  */
2400 void rvt_add_retry_timer(struct rvt_qp *qp)
2401 {
2402 	struct ib_qp *ibqp = &qp->ibqp;
2403 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2404 
2405 	lockdep_assert_held(&qp->s_lock);
2406 	qp->s_flags |= RVT_S_TIMER;
2407        /* 4.096 usec. * (1 << qp->timeout) */
2408 	qp->s_timer.expires = jiffies + qp->timeout_jiffies +
2409 			     rdi->busy_jiffies;
2410 	add_timer(&qp->s_timer);
2411 }
2412 EXPORT_SYMBOL(rvt_add_retry_timer);
2413 
2414 /**
2415  * rvt_add_rnr_timer - add/start an rnr timer
2416  * @qp - the QP
2417  * @aeth - aeth of RNR timeout, simulated aeth for loopback
2418  * add an rnr timer on the QP
2419  */
2420 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2421 {
2422 	u32 to;
2423 
2424 	lockdep_assert_held(&qp->s_lock);
2425 	qp->s_flags |= RVT_S_WAIT_RNR;
2426 	to = rvt_aeth_to_usec(aeth);
2427 	trace_rvt_rnrnak_add(qp, to);
2428 	hrtimer_start(&qp->s_rnr_timer,
2429 		      ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2430 }
2431 EXPORT_SYMBOL(rvt_add_rnr_timer);
2432 
2433 /**
2434  * rvt_stop_rc_timers - stop all timers
2435  * @qp - the QP
2436  * stop any pending timers
2437  */
2438 void rvt_stop_rc_timers(struct rvt_qp *qp)
2439 {
2440 	lockdep_assert_held(&qp->s_lock);
2441 	/* Remove QP from all timers */
2442 	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2443 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2444 		del_timer(&qp->s_timer);
2445 		hrtimer_try_to_cancel(&qp->s_rnr_timer);
2446 	}
2447 }
2448 EXPORT_SYMBOL(rvt_stop_rc_timers);
2449 
2450 /**
2451  * rvt_stop_rnr_timer - stop an rnr timer
2452  * @qp - the QP
2453  *
2454  * stop an rnr timer and return if the timer
2455  * had been pending.
2456  */
2457 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2458 {
2459 	lockdep_assert_held(&qp->s_lock);
2460 	/* Remove QP from rnr timer */
2461 	if (qp->s_flags & RVT_S_WAIT_RNR) {
2462 		qp->s_flags &= ~RVT_S_WAIT_RNR;
2463 		trace_rvt_rnrnak_stop(qp, 0);
2464 	}
2465 }
2466 
2467 /**
2468  * rvt_del_timers_sync - wait for any timeout routines to exit
2469  * @qp - the QP
2470  */
2471 void rvt_del_timers_sync(struct rvt_qp *qp)
2472 {
2473 	del_timer_sync(&qp->s_timer);
2474 	hrtimer_cancel(&qp->s_rnr_timer);
2475 }
2476 EXPORT_SYMBOL(rvt_del_timers_sync);
2477 
2478 /**
2479  * This is called from s_timer for missing responses.
2480  */
2481 static void rvt_rc_timeout(struct timer_list *t)
2482 {
2483 	struct rvt_qp *qp = from_timer(qp, t, s_timer);
2484 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2485 	unsigned long flags;
2486 
2487 	spin_lock_irqsave(&qp->r_lock, flags);
2488 	spin_lock(&qp->s_lock);
2489 	if (qp->s_flags & RVT_S_TIMER) {
2490 		struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2491 
2492 		qp->s_flags &= ~RVT_S_TIMER;
2493 		rvp->n_rc_timeouts++;
2494 		del_timer(&qp->s_timer);
2495 		trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2496 		if (rdi->driver_f.notify_restart_rc)
2497 			rdi->driver_f.notify_restart_rc(qp,
2498 							qp->s_last_psn + 1,
2499 							1);
2500 		rdi->driver_f.schedule_send(qp);
2501 	}
2502 	spin_unlock(&qp->s_lock);
2503 	spin_unlock_irqrestore(&qp->r_lock, flags);
2504 }
2505 
2506 /*
2507  * This is called from s_timer for RNR timeouts.
2508  */
2509 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2510 {
2511 	struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2512 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2513 	unsigned long flags;
2514 
2515 	spin_lock_irqsave(&qp->s_lock, flags);
2516 	rvt_stop_rnr_timer(qp);
2517 	trace_rvt_rnrnak_timeout(qp, 0);
2518 	rdi->driver_f.schedule_send(qp);
2519 	spin_unlock_irqrestore(&qp->s_lock, flags);
2520 	return HRTIMER_NORESTART;
2521 }
2522 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2523 
2524 /**
2525  * rvt_qp_iter_init - initial for QP iteration
2526  * @rdi: rvt devinfo
2527  * @v: u64 value
2528  *
2529  * This returns an iterator suitable for iterating QPs
2530  * in the system.
2531  *
2532  * The @cb is a user defined callback and @v is a 64
2533  * bit value passed to and relevant for processing in the
2534  * @cb.  An example use case would be to alter QP processing
2535  * based on criteria not part of the rvt_qp.
2536  *
2537  * Use cases that require memory allocation to succeed
2538  * must preallocate appropriately.
2539  *
2540  * Return: a pointer to an rvt_qp_iter or NULL
2541  */
2542 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2543 				     u64 v,
2544 				     void (*cb)(struct rvt_qp *qp, u64 v))
2545 {
2546 	struct rvt_qp_iter *i;
2547 
2548 	i = kzalloc(sizeof(*i), GFP_KERNEL);
2549 	if (!i)
2550 		return NULL;
2551 
2552 	i->rdi = rdi;
2553 	/* number of special QPs (SMI/GSI) for device */
2554 	i->specials = rdi->ibdev.phys_port_cnt * 2;
2555 	i->v = v;
2556 	i->cb = cb;
2557 
2558 	return i;
2559 }
2560 EXPORT_SYMBOL(rvt_qp_iter_init);
2561 
2562 /**
2563  * rvt_qp_iter_next - return the next QP in iter
2564  * @iter - the iterator
2565  *
2566  * Fine grained QP iterator suitable for use
2567  * with debugfs seq_file mechanisms.
2568  *
2569  * Updates iter->qp with the current QP when the return
2570  * value is 0.
2571  *
2572  * Return: 0 - iter->qp is valid 1 - no more QPs
2573  */
2574 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2575 	__must_hold(RCU)
2576 {
2577 	int n = iter->n;
2578 	int ret = 1;
2579 	struct rvt_qp *pqp = iter->qp;
2580 	struct rvt_qp *qp;
2581 	struct rvt_dev_info *rdi = iter->rdi;
2582 
2583 	/*
2584 	 * The approach is to consider the special qps
2585 	 * as additional table entries before the
2586 	 * real hash table.  Since the qp code sets
2587 	 * the qp->next hash link to NULL, this works just fine.
2588 	 *
2589 	 * iter->specials is 2 * # ports
2590 	 *
2591 	 * n = 0..iter->specials is the special qp indices
2592 	 *
2593 	 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2594 	 * the potential hash bucket entries
2595 	 *
2596 	 */
2597 	for (; n <  rdi->qp_dev->qp_table_size + iter->specials; n++) {
2598 		if (pqp) {
2599 			qp = rcu_dereference(pqp->next);
2600 		} else {
2601 			if (n < iter->specials) {
2602 				struct rvt_ibport *rvp;
2603 				int pidx;
2604 
2605 				pidx = n % rdi->ibdev.phys_port_cnt;
2606 				rvp = rdi->ports[pidx];
2607 				qp = rcu_dereference(rvp->qp[n & 1]);
2608 			} else {
2609 				qp = rcu_dereference(
2610 					rdi->qp_dev->qp_table[
2611 						(n - iter->specials)]);
2612 			}
2613 		}
2614 		pqp = qp;
2615 		if (qp) {
2616 			iter->qp = qp;
2617 			iter->n = n;
2618 			return 0;
2619 		}
2620 	}
2621 	return ret;
2622 }
2623 EXPORT_SYMBOL(rvt_qp_iter_next);
2624 
2625 /**
2626  * rvt_qp_iter - iterate all QPs
2627  * @rdi - rvt devinfo
2628  * @v - a 64 bit value
2629  * @cb - a callback
2630  *
2631  * This provides a way for iterating all QPs.
2632  *
2633  * The @cb is a user defined callback and @v is a 64
2634  * bit value passed to and relevant for processing in the
2635  * cb.  An example use case would be to alter QP processing
2636  * based on criteria not part of the rvt_qp.
2637  *
2638  * The code has an internal iterator to simplify
2639  * non seq_file use cases.
2640  */
2641 void rvt_qp_iter(struct rvt_dev_info *rdi,
2642 		 u64 v,
2643 		 void (*cb)(struct rvt_qp *qp, u64 v))
2644 {
2645 	int ret;
2646 	struct rvt_qp_iter i = {
2647 		.rdi = rdi,
2648 		.specials = rdi->ibdev.phys_port_cnt * 2,
2649 		.v = v,
2650 		.cb = cb
2651 	};
2652 
2653 	rcu_read_lock();
2654 	do {
2655 		ret = rvt_qp_iter_next(&i);
2656 		if (!ret) {
2657 			rvt_get_qp(i.qp);
2658 			rcu_read_unlock();
2659 			i.cb(i.qp, i.v);
2660 			rcu_read_lock();
2661 			rvt_put_qp(i.qp);
2662 		}
2663 	} while (!ret);
2664 	rcu_read_unlock();
2665 }
2666 EXPORT_SYMBOL(rvt_qp_iter);
2667 
2668 /*
2669  * This should be called with s_lock held.
2670  */
2671 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2672 		       enum ib_wc_status status)
2673 {
2674 	u32 old_last, last;
2675 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2676 
2677 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2678 		return;
2679 
2680 	last = qp->s_last;
2681 	old_last = last;
2682 	trace_rvt_qp_send_completion(qp, wqe, last);
2683 	if (++last >= qp->s_size)
2684 		last = 0;
2685 	trace_rvt_qp_send_completion(qp, wqe, last);
2686 	qp->s_last = last;
2687 	/* See post_send() */
2688 	barrier();
2689 	rvt_put_swqe(wqe);
2690 	if (qp->ibqp.qp_type == IB_QPT_UD ||
2691 	    qp->ibqp.qp_type == IB_QPT_SMI ||
2692 	    qp->ibqp.qp_type == IB_QPT_GSI)
2693 		atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
2694 
2695 	rvt_qp_swqe_complete(qp,
2696 			     wqe,
2697 			     rdi->wc_opcode[wqe->wr.opcode],
2698 			     status);
2699 
2700 	if (qp->s_acked == old_last)
2701 		qp->s_acked = last;
2702 	if (qp->s_cur == old_last)
2703 		qp->s_cur = last;
2704 	if (qp->s_tail == old_last)
2705 		qp->s_tail = last;
2706 	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2707 		qp->s_draining = 0;
2708 }
2709 EXPORT_SYMBOL(rvt_send_complete);
2710 
2711 /**
2712  * rvt_copy_sge - copy data to SGE memory
2713  * @qp: associated QP
2714  * @ss: the SGE state
2715  * @data: the data to copy
2716  * @length: the length of the data
2717  * @release: boolean to release MR
2718  * @copy_last: do a separate copy of the last 8 bytes
2719  */
2720 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2721 		  void *data, u32 length,
2722 		  bool release, bool copy_last)
2723 {
2724 	struct rvt_sge *sge = &ss->sge;
2725 	int i;
2726 	bool in_last = false;
2727 	bool cacheless_copy = false;
2728 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2729 	struct rvt_wss *wss = rdi->wss;
2730 	unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2731 
2732 	if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2733 		cacheless_copy = length >= PAGE_SIZE;
2734 	} else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2735 		if (length >= PAGE_SIZE) {
2736 			/*
2737 			 * NOTE: this *assumes*:
2738 			 * o The first vaddr is the dest.
2739 			 * o If multiple pages, then vaddr is sequential.
2740 			 */
2741 			wss_insert(wss, sge->vaddr);
2742 			if (length >= (2 * PAGE_SIZE))
2743 				wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2744 
2745 			cacheless_copy = wss_exceeds_threshold(wss);
2746 		} else {
2747 			wss_advance_clean_counter(wss);
2748 		}
2749 	}
2750 
2751 	if (copy_last) {
2752 		if (length > 8) {
2753 			length -= 8;
2754 		} else {
2755 			copy_last = false;
2756 			in_last = true;
2757 		}
2758 	}
2759 
2760 again:
2761 	while (length) {
2762 		u32 len = rvt_get_sge_length(sge, length);
2763 
2764 		WARN_ON_ONCE(len == 0);
2765 		if (unlikely(in_last)) {
2766 			/* enforce byte transfer ordering */
2767 			for (i = 0; i < len; i++)
2768 				((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2769 		} else if (cacheless_copy) {
2770 			cacheless_memcpy(sge->vaddr, data, len);
2771 		} else {
2772 			memcpy(sge->vaddr, data, len);
2773 		}
2774 		rvt_update_sge(ss, len, release);
2775 		data += len;
2776 		length -= len;
2777 	}
2778 
2779 	if (copy_last) {
2780 		copy_last = false;
2781 		in_last = true;
2782 		length = 8;
2783 		goto again;
2784 	}
2785 }
2786 EXPORT_SYMBOL(rvt_copy_sge);
2787 
2788 /**
2789  * ruc_loopback - handle UC and RC loopback requests
2790  * @sqp: the sending QP
2791  *
2792  * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2793  * Note that although we are single threaded due to the send engine, we still
2794  * have to protect against post_send().  We don't have to worry about
2795  * receive interrupts since this is a connected protocol and all packets
2796  * will pass through here.
2797  */
2798 void rvt_ruc_loopback(struct rvt_qp *sqp)
2799 {
2800 	struct rvt_ibport *rvp =  NULL;
2801 	struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2802 	struct rvt_qp *qp;
2803 	struct rvt_swqe *wqe;
2804 	struct rvt_sge *sge;
2805 	unsigned long flags;
2806 	struct ib_wc wc;
2807 	u64 sdata;
2808 	atomic64_t *maddr;
2809 	enum ib_wc_status send_status;
2810 	bool release;
2811 	int ret;
2812 	bool copy_last = false;
2813 	int local_ops = 0;
2814 
2815 	rcu_read_lock();
2816 	rvp = rdi->ports[sqp->port_num - 1];
2817 
2818 	/*
2819 	 * Note that we check the responder QP state after
2820 	 * checking the requester's state.
2821 	 */
2822 
2823 	qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2824 			    sqp->remote_qpn);
2825 
2826 	spin_lock_irqsave(&sqp->s_lock, flags);
2827 
2828 	/* Return if we are already busy processing a work request. */
2829 	if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2830 	    !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2831 		goto unlock;
2832 
2833 	sqp->s_flags |= RVT_S_BUSY;
2834 
2835 again:
2836 	if (sqp->s_last == READ_ONCE(sqp->s_head))
2837 		goto clr_busy;
2838 	wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2839 
2840 	/* Return if it is not OK to start a new work request. */
2841 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2842 		if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2843 			goto clr_busy;
2844 		/* We are in the error state, flush the work request. */
2845 		send_status = IB_WC_WR_FLUSH_ERR;
2846 		goto flush_send;
2847 	}
2848 
2849 	/*
2850 	 * We can rely on the entry not changing without the s_lock
2851 	 * being held until we update s_last.
2852 	 * We increment s_cur to indicate s_last is in progress.
2853 	 */
2854 	if (sqp->s_last == sqp->s_cur) {
2855 		if (++sqp->s_cur >= sqp->s_size)
2856 			sqp->s_cur = 0;
2857 	}
2858 	spin_unlock_irqrestore(&sqp->s_lock, flags);
2859 
2860 	if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
2861 	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
2862 		rvp->n_pkt_drops++;
2863 		/*
2864 		 * For RC, the requester would timeout and retry so
2865 		 * shortcut the timeouts and just signal too many retries.
2866 		 */
2867 		if (sqp->ibqp.qp_type == IB_QPT_RC)
2868 			send_status = IB_WC_RETRY_EXC_ERR;
2869 		else
2870 			send_status = IB_WC_SUCCESS;
2871 		goto serr;
2872 	}
2873 
2874 	memset(&wc, 0, sizeof(wc));
2875 	send_status = IB_WC_SUCCESS;
2876 
2877 	release = true;
2878 	sqp->s_sge.sge = wqe->sg_list[0];
2879 	sqp->s_sge.sg_list = wqe->sg_list + 1;
2880 	sqp->s_sge.num_sge = wqe->wr.num_sge;
2881 	sqp->s_len = wqe->length;
2882 	switch (wqe->wr.opcode) {
2883 	case IB_WR_REG_MR:
2884 		goto send_comp;
2885 
2886 	case IB_WR_LOCAL_INV:
2887 		if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
2888 			if (rvt_invalidate_rkey(sqp,
2889 						wqe->wr.ex.invalidate_rkey))
2890 				send_status = IB_WC_LOC_PROT_ERR;
2891 			local_ops = 1;
2892 		}
2893 		goto send_comp;
2894 
2895 	case IB_WR_SEND_WITH_INV:
2896 		if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
2897 			wc.wc_flags = IB_WC_WITH_INVALIDATE;
2898 			wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
2899 		}
2900 		goto send;
2901 
2902 	case IB_WR_SEND_WITH_IMM:
2903 		wc.wc_flags = IB_WC_WITH_IMM;
2904 		wc.ex.imm_data = wqe->wr.ex.imm_data;
2905 		/* FALLTHROUGH */
2906 	case IB_WR_SEND:
2907 send:
2908 		ret = rvt_get_rwqe(qp, false);
2909 		if (ret < 0)
2910 			goto op_err;
2911 		if (!ret)
2912 			goto rnr_nak;
2913 		break;
2914 
2915 	case IB_WR_RDMA_WRITE_WITH_IMM:
2916 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2917 			goto inv_err;
2918 		wc.wc_flags = IB_WC_WITH_IMM;
2919 		wc.ex.imm_data = wqe->wr.ex.imm_data;
2920 		ret = rvt_get_rwqe(qp, true);
2921 		if (ret < 0)
2922 			goto op_err;
2923 		if (!ret)
2924 			goto rnr_nak;
2925 		/* skip copy_last set and qp_access_flags recheck */
2926 		goto do_write;
2927 	case IB_WR_RDMA_WRITE:
2928 		copy_last = rvt_is_user_qp(qp);
2929 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2930 			goto inv_err;
2931 do_write:
2932 		if (wqe->length == 0)
2933 			break;
2934 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
2935 					  wqe->rdma_wr.remote_addr,
2936 					  wqe->rdma_wr.rkey,
2937 					  IB_ACCESS_REMOTE_WRITE)))
2938 			goto acc_err;
2939 		qp->r_sge.sg_list = NULL;
2940 		qp->r_sge.num_sge = 1;
2941 		qp->r_sge.total_len = wqe->length;
2942 		break;
2943 
2944 	case IB_WR_RDMA_READ:
2945 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2946 			goto inv_err;
2947 		if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
2948 					  wqe->rdma_wr.remote_addr,
2949 					  wqe->rdma_wr.rkey,
2950 					  IB_ACCESS_REMOTE_READ)))
2951 			goto acc_err;
2952 		release = false;
2953 		sqp->s_sge.sg_list = NULL;
2954 		sqp->s_sge.num_sge = 1;
2955 		qp->r_sge.sge = wqe->sg_list[0];
2956 		qp->r_sge.sg_list = wqe->sg_list + 1;
2957 		qp->r_sge.num_sge = wqe->wr.num_sge;
2958 		qp->r_sge.total_len = wqe->length;
2959 		break;
2960 
2961 	case IB_WR_ATOMIC_CMP_AND_SWP:
2962 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2963 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2964 			goto inv_err;
2965 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2966 					  wqe->atomic_wr.remote_addr,
2967 					  wqe->atomic_wr.rkey,
2968 					  IB_ACCESS_REMOTE_ATOMIC)))
2969 			goto acc_err;
2970 		/* Perform atomic OP and save result. */
2971 		maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2972 		sdata = wqe->atomic_wr.compare_add;
2973 		*(u64 *)sqp->s_sge.sge.vaddr =
2974 			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
2975 			(u64)atomic64_add_return(sdata, maddr) - sdata :
2976 			(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2977 				      sdata, wqe->atomic_wr.swap);
2978 		rvt_put_mr(qp->r_sge.sge.mr);
2979 		qp->r_sge.num_sge = 0;
2980 		goto send_comp;
2981 
2982 	default:
2983 		send_status = IB_WC_LOC_QP_OP_ERR;
2984 		goto serr;
2985 	}
2986 
2987 	sge = &sqp->s_sge.sge;
2988 	while (sqp->s_len) {
2989 		u32 len = sqp->s_len;
2990 
2991 		if (len > sge->length)
2992 			len = sge->length;
2993 		if (len > sge->sge_length)
2994 			len = sge->sge_length;
2995 		WARN_ON_ONCE(len == 0);
2996 		rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
2997 			     len, release, copy_last);
2998 		sge->vaddr += len;
2999 		sge->length -= len;
3000 		sge->sge_length -= len;
3001 		if (sge->sge_length == 0) {
3002 			if (!release)
3003 				rvt_put_mr(sge->mr);
3004 			if (--sqp->s_sge.num_sge)
3005 				*sge = *sqp->s_sge.sg_list++;
3006 		} else if (sge->length == 0 && sge->mr->lkey) {
3007 			if (++sge->n >= RVT_SEGSZ) {
3008 				if (++sge->m >= sge->mr->mapsz)
3009 					break;
3010 				sge->n = 0;
3011 			}
3012 			sge->vaddr =
3013 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
3014 			sge->length =
3015 				sge->mr->map[sge->m]->segs[sge->n].length;
3016 		}
3017 		sqp->s_len -= len;
3018 	}
3019 	if (release)
3020 		rvt_put_ss(&qp->r_sge);
3021 
3022 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3023 		goto send_comp;
3024 
3025 	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3026 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3027 	else
3028 		wc.opcode = IB_WC_RECV;
3029 	wc.wr_id = qp->r_wr_id;
3030 	wc.status = IB_WC_SUCCESS;
3031 	wc.byte_len = wqe->length;
3032 	wc.qp = &qp->ibqp;
3033 	wc.src_qp = qp->remote_qpn;
3034 	wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3035 	wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3036 	wc.port_num = 1;
3037 	/* Signal completion event if the solicited bit is set. */
3038 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
3039 		     wqe->wr.send_flags & IB_SEND_SOLICITED);
3040 
3041 send_comp:
3042 	spin_lock_irqsave(&sqp->s_lock, flags);
3043 	rvp->n_loop_pkts++;
3044 flush_send:
3045 	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3046 	rvt_send_complete(sqp, wqe, send_status);
3047 	if (local_ops) {
3048 		atomic_dec(&sqp->local_ops_pending);
3049 		local_ops = 0;
3050 	}
3051 	goto again;
3052 
3053 rnr_nak:
3054 	/* Handle RNR NAK */
3055 	if (qp->ibqp.qp_type == IB_QPT_UC)
3056 		goto send_comp;
3057 	rvp->n_rnr_naks++;
3058 	/*
3059 	 * Note: we don't need the s_lock held since the BUSY flag
3060 	 * makes this single threaded.
3061 	 */
3062 	if (sqp->s_rnr_retry == 0) {
3063 		send_status = IB_WC_RNR_RETRY_EXC_ERR;
3064 		goto serr;
3065 	}
3066 	if (sqp->s_rnr_retry_cnt < 7)
3067 		sqp->s_rnr_retry--;
3068 	spin_lock_irqsave(&sqp->s_lock, flags);
3069 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3070 		goto clr_busy;
3071 	rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3072 				IB_AETH_CREDIT_SHIFT);
3073 	goto clr_busy;
3074 
3075 op_err:
3076 	send_status = IB_WC_REM_OP_ERR;
3077 	wc.status = IB_WC_LOC_QP_OP_ERR;
3078 	goto err;
3079 
3080 inv_err:
3081 	send_status = IB_WC_REM_INV_REQ_ERR;
3082 	wc.status = IB_WC_LOC_QP_OP_ERR;
3083 	goto err;
3084 
3085 acc_err:
3086 	send_status = IB_WC_REM_ACCESS_ERR;
3087 	wc.status = IB_WC_LOC_PROT_ERR;
3088 err:
3089 	/* responder goes to error state */
3090 	rvt_rc_error(qp, wc.status);
3091 
3092 serr:
3093 	spin_lock_irqsave(&sqp->s_lock, flags);
3094 	rvt_send_complete(sqp, wqe, send_status);
3095 	if (sqp->ibqp.qp_type == IB_QPT_RC) {
3096 		int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3097 
3098 		sqp->s_flags &= ~RVT_S_BUSY;
3099 		spin_unlock_irqrestore(&sqp->s_lock, flags);
3100 		if (lastwqe) {
3101 			struct ib_event ev;
3102 
3103 			ev.device = sqp->ibqp.device;
3104 			ev.element.qp = &sqp->ibqp;
3105 			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3106 			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3107 		}
3108 		goto done;
3109 	}
3110 clr_busy:
3111 	sqp->s_flags &= ~RVT_S_BUSY;
3112 unlock:
3113 	spin_unlock_irqrestore(&sqp->s_lock, flags);
3114 done:
3115 	rcu_read_unlock();
3116 }
3117 EXPORT_SYMBOL(rvt_ruc_loopback);
3118