xref: /openbmc/linux/drivers/infiniband/sw/rdmavt/qp.c (revision fe314195)
1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include "qp.h"
55 #include "vt.h"
56 #include "trace.h"
57 
58 /*
59  * Note that it is OK to post send work requests in the SQE and ERR
60  * states; rvt_do_send() will process them and generate error
61  * completions as per IB 1.2 C10-96.
62  */
63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64 	[IB_QPS_RESET] = 0,
65 	[IB_QPS_INIT] = RVT_POST_RECV_OK,
66 	[IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 	[IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 	    RVT_PROCESS_NEXT_SEND_OK,
70 	[IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 	[IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 	[IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76 };
77 EXPORT_SYMBOL(ib_rvt_state_ops);
78 
79 static void get_map_page(struct rvt_qpn_table *qpt,
80 			 struct rvt_qpn_map *map,
81 			 gfp_t gfp)
82 {
83 	unsigned long page = get_zeroed_page(gfp);
84 
85 	/*
86 	 * Free the page if someone raced with us installing it.
87 	 */
88 
89 	spin_lock(&qpt->lock);
90 	if (map->page)
91 		free_page(page);
92 	else
93 		map->page = (void *)page;
94 	spin_unlock(&qpt->lock);
95 }
96 
97 /**
98  * init_qpn_table - initialize the QP number table for a device
99  * @qpt: the QPN table
100  */
101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102 {
103 	u32 offset, i;
104 	struct rvt_qpn_map *map;
105 	int ret = 0;
106 
107 	if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
108 		return -EINVAL;
109 
110 	spin_lock_init(&qpt->lock);
111 
112 	qpt->last = rdi->dparms.qpn_start;
113 	qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114 
115 	/*
116 	 * Drivers may want some QPs beyond what we need for verbs let them use
117 	 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 	 * for those. The reserved range must be *after* the range which verbs
119 	 * will pick from.
120 	 */
121 
122 	/* Figure out number of bit maps needed before reserved range */
123 	qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124 
125 	/* This should always be zero */
126 	offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127 
128 	/* Starting with the first reserved bit map */
129 	map = &qpt->map[qpt->nmaps];
130 
131 	rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 		    rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
133 	for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
134 		if (!map->page) {
135 			get_map_page(qpt, map, GFP_KERNEL);
136 			if (!map->page) {
137 				ret = -ENOMEM;
138 				break;
139 			}
140 		}
141 		set_bit(offset, map->page);
142 		offset++;
143 		if (offset == RVT_BITS_PER_PAGE) {
144 			/* next page */
145 			qpt->nmaps++;
146 			map++;
147 			offset = 0;
148 		}
149 	}
150 	return ret;
151 }
152 
153 /**
154  * free_qpn_table - free the QP number table for a device
155  * @qpt: the QPN table
156  */
157 static void free_qpn_table(struct rvt_qpn_table *qpt)
158 {
159 	int i;
160 
161 	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162 		free_page((unsigned long)qpt->map[i].page);
163 }
164 
165 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
166 {
167 	int i;
168 	int ret = -ENOMEM;
169 
170 	if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
171 		rvt_pr_info(rdi, "Driver is doing QP init.\n");
172 		return 0;
173 	}
174 
175 	if (!rdi->dparms.qp_table_size)
176 		return -EINVAL;
177 
178 	/*
179 	 * If driver is not doing any QP allocation then make sure it is
180 	 * providing the necessary QP functions.
181 	 */
182 	if (!rdi->driver_f.free_all_qps ||
183 	    !rdi->driver_f.qp_priv_alloc ||
184 	    !rdi->driver_f.qp_priv_free ||
185 	    !rdi->driver_f.notify_qp_reset)
186 		return -EINVAL;
187 
188 	/* allocate parent object */
189 	rdi->qp_dev = kzalloc(sizeof(*rdi->qp_dev), GFP_KERNEL);
190 	if (!rdi->qp_dev)
191 		return -ENOMEM;
192 
193 	/* allocate hash table */
194 	rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
195 	rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
196 	rdi->qp_dev->qp_table =
197 		kmalloc(rdi->qp_dev->qp_table_size *
198 			sizeof(*rdi->qp_dev->qp_table),
199 			GFP_KERNEL);
200 	if (!rdi->qp_dev->qp_table)
201 		goto no_qp_table;
202 
203 	for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
204 		RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
205 
206 	spin_lock_init(&rdi->qp_dev->qpt_lock);
207 
208 	/* initialize qpn map */
209 	if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
210 		goto fail_table;
211 
212 	spin_lock_init(&rdi->n_qps_lock);
213 
214 	return 0;
215 
216 fail_table:
217 	kfree(rdi->qp_dev->qp_table);
218 	free_qpn_table(&rdi->qp_dev->qpn_table);
219 
220 no_qp_table:
221 	kfree(rdi->qp_dev);
222 
223 	return ret;
224 }
225 
226 /**
227  * free_all_qps - check for QPs still in use
228  * @qpt: the QP table to empty
229  *
230  * There should not be any QPs still in use.
231  * Free memory for table.
232  */
233 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
234 {
235 	unsigned long flags;
236 	struct rvt_qp *qp;
237 	unsigned n, qp_inuse = 0;
238 	spinlock_t *ql; /* work around too long line below */
239 
240 	if (rdi->driver_f.free_all_qps)
241 		qp_inuse = rdi->driver_f.free_all_qps(rdi);
242 
243 	qp_inuse += rvt_mcast_tree_empty(rdi);
244 
245 	if (!rdi->qp_dev)
246 		return qp_inuse;
247 
248 	ql = &rdi->qp_dev->qpt_lock;
249 	spin_lock_irqsave(ql, flags);
250 	for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
251 		qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
252 					       lockdep_is_held(ql));
253 		RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
254 
255 		for (; qp; qp = rcu_dereference_protected(qp->next,
256 							  lockdep_is_held(ql)))
257 			qp_inuse++;
258 	}
259 	spin_unlock_irqrestore(ql, flags);
260 	synchronize_rcu();
261 	return qp_inuse;
262 }
263 
264 void rvt_qp_exit(struct rvt_dev_info *rdi)
265 {
266 	u32 qps_inuse = rvt_free_all_qps(rdi);
267 
268 	if (qps_inuse)
269 		rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
270 			   qps_inuse);
271 	if (!rdi->qp_dev)
272 		return;
273 
274 	if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER)
275 		return; /* driver did the qp init so nothing else to do */
276 
277 	kfree(rdi->qp_dev->qp_table);
278 	free_qpn_table(&rdi->qp_dev->qpn_table);
279 	kfree(rdi->qp_dev);
280 }
281 
282 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
283 			      struct rvt_qpn_map *map, unsigned off)
284 {
285 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
286 }
287 
288 /*
289  * Allocate the next available QPN or
290  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
291  */
292 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
293 		     enum ib_qp_type type, u8 port, gfp_t gfp)
294 {
295 	u32 i, offset, max_scan, qpn;
296 	struct rvt_qpn_map *map;
297 	u32 ret;
298 
299 	if (rdi->driver_f.alloc_qpn)
300 		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port,
301 					       GFP_KERNEL);
302 
303 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
304 		unsigned n;
305 
306 		ret = type == IB_QPT_GSI;
307 		n = 1 << (ret + 2 * (port - 1));
308 		spin_lock(&qpt->lock);
309 		if (qpt->flags & n)
310 			ret = -EINVAL;
311 		else
312 			qpt->flags |= n;
313 		spin_unlock(&qpt->lock);
314 		goto bail;
315 	}
316 
317 	qpn = qpt->last + qpt->incr;
318 	if (qpn >= RVT_QPN_MAX)
319 		qpn = qpt->incr | ((qpt->last & 1) ^ 1);
320 	/* offset carries bit 0 */
321 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
322 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
323 	max_scan = qpt->nmaps - !offset;
324 	for (i = 0;;) {
325 		if (unlikely(!map->page)) {
326 			get_map_page(qpt, map, gfp);
327 			if (unlikely(!map->page))
328 				break;
329 		}
330 		do {
331 			if (!test_and_set_bit(offset, map->page)) {
332 				qpt->last = qpn;
333 				ret = qpn;
334 				goto bail;
335 			}
336 			offset += qpt->incr;
337 			/*
338 			 * This qpn might be bogus if offset >= BITS_PER_PAGE.
339 			 * That is OK.   It gets re-assigned below
340 			 */
341 			qpn = mk_qpn(qpt, map, offset);
342 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
343 		/*
344 		 * In order to keep the number of pages allocated to a
345 		 * minimum, we scan the all existing pages before increasing
346 		 * the size of the bitmap table.
347 		 */
348 		if (++i > max_scan) {
349 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
350 				break;
351 			map = &qpt->map[qpt->nmaps++];
352 			/* start at incr with current bit 0 */
353 			offset = qpt->incr | (offset & 1);
354 		} else if (map < &qpt->map[qpt->nmaps]) {
355 			++map;
356 			/* start at incr with current bit 0 */
357 			offset = qpt->incr | (offset & 1);
358 		} else {
359 			map = &qpt->map[0];
360 			/* wrap to first map page, invert bit 0 */
361 			offset = qpt->incr | ((offset & 1) ^ 1);
362 		}
363 		/* there can be no bits at shift and below */
364 		WARN_ON(offset & (rdi->dparms.qos_shift - 1));
365 		qpn = mk_qpn(qpt, map, offset);
366 	}
367 
368 	ret = -ENOMEM;
369 
370 bail:
371 	return ret;
372 }
373 
374 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
375 {
376 	struct rvt_qpn_map *map;
377 
378 	map = qpt->map + qpn / RVT_BITS_PER_PAGE;
379 	if (map->page)
380 		clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
381 }
382 
383 /**
384  * reset_qp - initialize the QP state to the reset state
385  * @qp: the QP to reset
386  * @type: the QP type
387  * r and s lock are required to be held by the caller
388  */
389 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
390 		  enum ib_qp_type type)
391 {
392 	if (qp->state != IB_QPS_RESET) {
393 		qp->state = IB_QPS_RESET;
394 
395 		/* Let drivers flush their waitlist */
396 		rdi->driver_f.flush_qp_waiters(qp);
397 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
398 		spin_unlock(&qp->s_lock);
399 		spin_unlock_irq(&qp->r_lock);
400 
401 		/* Stop the send queue and the retry timer */
402 		rdi->driver_f.stop_send_queue(qp);
403 		del_timer_sync(&qp->s_timer);
404 
405 		/* Wait for things to stop */
406 		rdi->driver_f.quiesce_qp(qp);
407 
408 		/* take qp out the hash and wait for it to be unused */
409 		rvt_remove_qp(rdi, qp);
410 		wait_event(qp->wait, !atomic_read(&qp->refcount));
411 
412 		/* grab the lock b/c it was locked at call time */
413 		spin_lock_irq(&qp->r_lock);
414 		spin_lock(&qp->s_lock);
415 
416 		rvt_clear_mr_refs(qp, 1);
417 	}
418 
419 	/*
420 	 * Let the driver do any tear down it needs to for a qp
421 	 * that has been reset
422 	 */
423 	rdi->driver_f.notify_qp_reset(qp);
424 
425 	qp->remote_qpn = 0;
426 	qp->qkey = 0;
427 	qp->qp_access_flags = 0;
428 	qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
429 	qp->s_hdrwords = 0;
430 	qp->s_wqe = NULL;
431 	qp->s_draining = 0;
432 	qp->s_next_psn = 0;
433 	qp->s_last_psn = 0;
434 	qp->s_sending_psn = 0;
435 	qp->s_sending_hpsn = 0;
436 	qp->s_psn = 0;
437 	qp->r_psn = 0;
438 	qp->r_msn = 0;
439 	if (type == IB_QPT_RC) {
440 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
441 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
442 	} else {
443 		qp->s_state = IB_OPCODE_UC_SEND_LAST;
444 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
445 	}
446 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
447 	qp->r_nak_state = 0;
448 	qp->r_aflags = 0;
449 	qp->r_flags = 0;
450 	qp->s_head = 0;
451 	qp->s_tail = 0;
452 	qp->s_cur = 0;
453 	qp->s_acked = 0;
454 	qp->s_last = 0;
455 	qp->s_ssn = 1;
456 	qp->s_lsn = 0;
457 	qp->s_mig_state = IB_MIG_MIGRATED;
458 	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
459 	qp->r_head_ack_queue = 0;
460 	qp->s_tail_ack_queue = 0;
461 	qp->s_num_rd_atomic = 0;
462 	if (qp->r_rq.wq) {
463 		qp->r_rq.wq->head = 0;
464 		qp->r_rq.wq->tail = 0;
465 	}
466 	qp->r_sge.num_sge = 0;
467 }
468 EXPORT_SYMBOL(rvt_reset_qp);
469 
470 /**
471  * rvt_create_qp - create a queue pair for a device
472  * @ibpd: the protection domain who's device we create the queue pair for
473  * @init_attr: the attributes of the queue pair
474  * @udata: user data for libibverbs.so
475  *
476  * Queue pair creation is mostly an rvt issue. However, drivers have their own
477  * unique idea of what queue pair numbers mean. For instance there is a reserved
478  * range for PSM.
479  *
480  * Returns the queue pair on success, otherwise returns an errno.
481  *
482  * Called by the ib_create_qp() core verbs function.
483  */
484 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
485 			    struct ib_qp_init_attr *init_attr,
486 			    struct ib_udata *udata)
487 {
488 	struct rvt_qp *qp;
489 	int err;
490 	struct rvt_swqe *swq = NULL;
491 	size_t sz;
492 	size_t sg_list_sz;
493 	struct ib_qp *ret = ERR_PTR(-ENOMEM);
494 	struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
495 	void *priv = NULL;
496 	gfp_t gfp;
497 
498 	if (!rdi)
499 		return ERR_PTR(-EINVAL);
500 
501 	if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
502 	    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
503 	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
504 		return ERR_PTR(-EINVAL);
505 
506 	/* GFP_NOIO is applicable to RC QP's only */
507 
508 	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
509 	    init_attr->qp_type != IB_QPT_RC)
510 		return ERR_PTR(-EINVAL);
511 
512 	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
513 						GFP_NOIO : GFP_KERNEL;
514 
515 	/* Check receive queue parameters if no SRQ is specified. */
516 	if (!init_attr->srq) {
517 		if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
518 		    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
519 			return ERR_PTR(-EINVAL);
520 
521 		if (init_attr->cap.max_send_sge +
522 		    init_attr->cap.max_send_wr +
523 		    init_attr->cap.max_recv_sge +
524 		    init_attr->cap.max_recv_wr == 0)
525 			return ERR_PTR(-EINVAL);
526 	}
527 
528 	switch (init_attr->qp_type) {
529 	case IB_QPT_SMI:
530 	case IB_QPT_GSI:
531 		if (init_attr->port_num == 0 ||
532 		    init_attr->port_num > ibpd->device->phys_port_cnt)
533 			return ERR_PTR(-EINVAL);
534 	case IB_QPT_UC:
535 	case IB_QPT_RC:
536 	case IB_QPT_UD:
537 		sz = sizeof(struct rvt_sge) *
538 			init_attr->cap.max_send_sge +
539 			sizeof(struct rvt_swqe);
540 		if (gfp == GFP_NOIO)
541 			swq = __vmalloc(
542 				(init_attr->cap.max_send_wr + 1) * sz,
543 				gfp, PAGE_KERNEL);
544 		else
545 			swq = vmalloc(
546 				(init_attr->cap.max_send_wr + 1) * sz);
547 		if (!swq)
548 			return ERR_PTR(-ENOMEM);
549 
550 		sz = sizeof(*qp);
551 		sg_list_sz = 0;
552 		if (init_attr->srq) {
553 			struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
554 
555 			if (srq->rq.max_sge > 1)
556 				sg_list_sz = sizeof(*qp->r_sg_list) *
557 					(srq->rq.max_sge - 1);
558 		} else if (init_attr->cap.max_recv_sge > 1)
559 			sg_list_sz = sizeof(*qp->r_sg_list) *
560 				(init_attr->cap.max_recv_sge - 1);
561 		qp = kzalloc(sz + sg_list_sz, gfp);
562 		if (!qp)
563 			goto bail_swq;
564 
565 		RCU_INIT_POINTER(qp->next, NULL);
566 
567 		/*
568 		 * Driver needs to set up it's private QP structure and do any
569 		 * initialization that is needed.
570 		 */
571 		priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
572 		if (!priv)
573 			goto bail_qp;
574 		qp->priv = priv;
575 		qp->timeout_jiffies =
576 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
577 				1000UL);
578 		if (init_attr->srq) {
579 			sz = 0;
580 		} else {
581 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
582 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
583 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
584 				sizeof(struct rvt_rwqe);
585 			if (udata)
586 				qp->r_rq.wq = vmalloc_user(
587 						sizeof(struct rvt_rwq) +
588 						qp->r_rq.size * sz);
589 			else if (gfp == GFP_NOIO)
590 				qp->r_rq.wq = __vmalloc(
591 						sizeof(struct rvt_rwq) +
592 						qp->r_rq.size * sz,
593 						gfp, PAGE_KERNEL);
594 			else
595 				qp->r_rq.wq = vmalloc(
596 						sizeof(struct rvt_rwq) +
597 						qp->r_rq.size * sz);
598 			if (!qp->r_rq.wq)
599 				goto bail_driver_priv;
600 		}
601 
602 		/*
603 		 * ib_create_qp() will initialize qp->ibqp
604 		 * except for qp->ibqp.qp_num.
605 		 */
606 		spin_lock_init(&qp->r_lock);
607 		spin_lock_init(&qp->s_lock);
608 		spin_lock_init(&qp->r_rq.lock);
609 		atomic_set(&qp->refcount, 0);
610 		init_waitqueue_head(&qp->wait);
611 		init_timer(&qp->s_timer);
612 		qp->s_timer.data = (unsigned long)qp;
613 		INIT_LIST_HEAD(&qp->rspwait);
614 		qp->state = IB_QPS_RESET;
615 		qp->s_wq = swq;
616 		qp->s_size = init_attr->cap.max_send_wr + 1;
617 		qp->s_max_sge = init_attr->cap.max_send_sge;
618 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
619 			qp->s_flags = RVT_S_SIGNAL_REQ_WR;
620 
621 		err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
622 				init_attr->qp_type,
623 				init_attr->port_num, gfp);
624 		if (err < 0) {
625 			ret = ERR_PTR(err);
626 			goto bail_rq_wq;
627 		}
628 		qp->ibqp.qp_num = err;
629 		qp->port_num = init_attr->port_num;
630 		rvt_reset_qp(rdi, qp, init_attr->qp_type);
631 		break;
632 
633 	default:
634 		/* Don't support raw QPs */
635 		return ERR_PTR(-EINVAL);
636 	}
637 
638 	init_attr->cap.max_inline_data = 0;
639 
640 	/*
641 	 * Return the address of the RWQ as the offset to mmap.
642 	 * See rvt_mmap() for details.
643 	 */
644 	if (udata && udata->outlen >= sizeof(__u64)) {
645 		if (!qp->r_rq.wq) {
646 			__u64 offset = 0;
647 
648 			err = ib_copy_to_udata(udata, &offset,
649 					       sizeof(offset));
650 			if (err) {
651 				ret = ERR_PTR(err);
652 				goto bail_qpn;
653 			}
654 		} else {
655 			u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
656 
657 			qp->ip = rvt_create_mmap_info(rdi, s,
658 						      ibpd->uobject->context,
659 						      qp->r_rq.wq);
660 			if (!qp->ip) {
661 				ret = ERR_PTR(-ENOMEM);
662 				goto bail_qpn;
663 			}
664 
665 			err = ib_copy_to_udata(udata, &qp->ip->offset,
666 					       sizeof(qp->ip->offset));
667 			if (err) {
668 				ret = ERR_PTR(err);
669 				goto bail_ip;
670 			}
671 		}
672 	}
673 
674 	spin_lock(&rdi->n_qps_lock);
675 	if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
676 		spin_unlock(&rdi->n_qps_lock);
677 		ret = ERR_PTR(-ENOMEM);
678 		goto bail_ip;
679 	}
680 
681 	rdi->n_qps_allocated++;
682 	spin_unlock(&rdi->n_qps_lock);
683 
684 	if (qp->ip) {
685 		spin_lock_irq(&rdi->pending_lock);
686 		list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
687 		spin_unlock_irq(&rdi->pending_lock);
688 	}
689 
690 	ret = &qp->ibqp;
691 
692 	/*
693 	 * We have our QP and its good, now keep track of what types of opcodes
694 	 * can be processed on this QP. We do this by keeping track of what the
695 	 * 3 high order bits of the opcode are.
696 	 */
697 	switch (init_attr->qp_type) {
698 	case IB_QPT_SMI:
699 	case IB_QPT_GSI:
700 	case IB_QPT_UD:
701 		qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
702 		break;
703 	case IB_QPT_RC:
704 		qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
705 		break;
706 	case IB_QPT_UC:
707 		qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
708 		break;
709 	default:
710 		ret = ERR_PTR(-EINVAL);
711 		goto bail_ip;
712 	}
713 
714 	return ret;
715 
716 bail_ip:
717 	kref_put(&qp->ip->ref, rvt_release_mmap_info);
718 
719 bail_qpn:
720 	free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
721 
722 bail_rq_wq:
723 	vfree(qp->r_rq.wq);
724 
725 bail_driver_priv:
726 	rdi->driver_f.qp_priv_free(rdi, qp);
727 
728 bail_qp:
729 	kfree(qp);
730 
731 bail_swq:
732 	vfree(swq);
733 
734 	return ret;
735 }
736 
737 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
738 {
739 	unsigned n;
740 
741 	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
742 		rvt_put_ss(&qp->s_rdma_read_sge);
743 
744 	rvt_put_ss(&qp->r_sge);
745 
746 	if (clr_sends) {
747 		while (qp->s_last != qp->s_head) {
748 			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
749 			unsigned i;
750 
751 			for (i = 0; i < wqe->wr.num_sge; i++) {
752 				struct rvt_sge *sge = &wqe->sg_list[i];
753 
754 				rvt_put_mr(sge->mr);
755 			}
756 			if (qp->ibqp.qp_type == IB_QPT_UD ||
757 			    qp->ibqp.qp_type == IB_QPT_SMI ||
758 			    qp->ibqp.qp_type == IB_QPT_GSI)
759 				atomic_dec(&ibah_to_rvtah(
760 						wqe->ud_wr.ah)->refcount);
761 			if (++qp->s_last >= qp->s_size)
762 				qp->s_last = 0;
763 		}
764 		if (qp->s_rdma_mr) {
765 			rvt_put_mr(qp->s_rdma_mr);
766 			qp->s_rdma_mr = NULL;
767 		}
768 	}
769 
770 	if (qp->ibqp.qp_type != IB_QPT_RC)
771 		return;
772 
773 	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
774 		struct rvt_ack_entry *e = &qp->s_ack_queue[n];
775 
776 		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
777 		    e->rdma_sge.mr) {
778 			rvt_put_mr(e->rdma_sge.mr);
779 			e->rdma_sge.mr = NULL;
780 		}
781 	}
782 }
783 EXPORT_SYMBOL(rvt_clear_mr_refs);
784 
785 /**
786  * rvt_error_qp - put a QP into the error state
787  * @qp: the QP to put into the error state
788  * @err: the receive completion error to signal if a RWQE is active
789  *
790  * Flushes both send and receive work queues.
791  * Returns true if last WQE event should be generated.
792  * The QP r_lock and s_lock should be held and interrupts disabled.
793  * If we are already in error state, just return.
794  */
795 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
796 {
797 	struct ib_wc wc;
798 	int ret = 0;
799 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
800 
801 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
802 		goto bail;
803 
804 	qp->state = IB_QPS_ERR;
805 
806 	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
807 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
808 		del_timer(&qp->s_timer);
809 	}
810 
811 	if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
812 		qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
813 
814 	rdi->driver_f.notify_error_qp(qp);
815 
816 	/* Schedule the sending tasklet to drain the send work queue. */
817 	if (qp->s_last != qp->s_head)
818 		rdi->driver_f.schedule_send(qp);
819 
820 	rvt_clear_mr_refs(qp, 0);
821 
822 	memset(&wc, 0, sizeof(wc));
823 	wc.qp = &qp->ibqp;
824 	wc.opcode = IB_WC_RECV;
825 
826 	if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
827 		wc.wr_id = qp->r_wr_id;
828 		wc.status = err;
829 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
830 	}
831 	wc.status = IB_WC_WR_FLUSH_ERR;
832 
833 	if (qp->r_rq.wq) {
834 		struct rvt_rwq *wq;
835 		u32 head;
836 		u32 tail;
837 
838 		spin_lock(&qp->r_rq.lock);
839 
840 		/* sanity check pointers before trusting them */
841 		wq = qp->r_rq.wq;
842 		head = wq->head;
843 		if (head >= qp->r_rq.size)
844 			head = 0;
845 		tail = wq->tail;
846 		if (tail >= qp->r_rq.size)
847 			tail = 0;
848 		while (tail != head) {
849 			wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
850 			if (++tail >= qp->r_rq.size)
851 				tail = 0;
852 			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
853 		}
854 		wq->tail = tail;
855 
856 		spin_unlock(&qp->r_rq.lock);
857 	} else if (qp->ibqp.event_handler) {
858 		ret = 1;
859 	}
860 
861 bail:
862 	return ret;
863 }
864 EXPORT_SYMBOL(rvt_error_qp);
865 
866 /*
867  * Put the QP into the hash table.
868  * The hash table holds a reference to the QP.
869  */
870 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
871 {
872 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
873 	unsigned long flags;
874 
875 	atomic_inc(&qp->refcount);
876 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
877 
878 	if (qp->ibqp.qp_num <= 1) {
879 		rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
880 	} else {
881 		u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
882 
883 		qp->next = rdi->qp_dev->qp_table[n];
884 		rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
885 		trace_rvt_qpinsert(qp, n);
886 	}
887 
888 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
889 }
890 
891 /*
892  * Remove the QP from the table so it can't be found asynchronously by
893  * the receive routine.
894  */
895 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
896 {
897 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
898 	u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
899 	unsigned long flags;
900 	int removed = 1;
901 
902 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
903 
904 	if (rcu_dereference_protected(rvp->qp[0],
905 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
906 		RCU_INIT_POINTER(rvp->qp[0], NULL);
907 	} else if (rcu_dereference_protected(rvp->qp[1],
908 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
909 		RCU_INIT_POINTER(rvp->qp[1], NULL);
910 	} else {
911 		struct rvt_qp *q;
912 		struct rvt_qp __rcu **qpp;
913 
914 		removed = 0;
915 		qpp = &rdi->qp_dev->qp_table[n];
916 		for (; (q = rcu_dereference_protected(*qpp,
917 			lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
918 			qpp = &q->next) {
919 			if (q == qp) {
920 				RCU_INIT_POINTER(*qpp,
921 				     rcu_dereference_protected(qp->next,
922 				     lockdep_is_held(&rdi->qp_dev->qpt_lock)));
923 				removed = 1;
924 				trace_rvt_qpremove(qp, n);
925 				break;
926 			}
927 		}
928 	}
929 
930 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
931 	if (removed) {
932 		synchronize_rcu();
933 		if (atomic_dec_and_test(&qp->refcount))
934 			wake_up(&qp->wait);
935 	}
936 }
937 EXPORT_SYMBOL(rvt_remove_qp);
938 
939 /**
940  * qib_modify_qp - modify the attributes of a queue pair
941  * @ibqp: the queue pair who's attributes we're modifying
942  * @attr: the new attributes
943  * @attr_mask: the mask of attributes to modify
944  * @udata: user data for libibverbs.so
945  *
946  * Returns 0 on success, otherwise returns an errno.
947  */
948 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
949 		  int attr_mask, struct ib_udata *udata)
950 {
951 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
952 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
953 	enum ib_qp_state cur_state, new_state;
954 	struct ib_event ev;
955 	int lastwqe = 0;
956 	int mig = 0;
957 	int pmtu = 0; /* for gcc warning only */
958 	enum rdma_link_layer link;
959 
960 	link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
961 
962 	spin_lock_irq(&qp->r_lock);
963 	spin_lock(&qp->s_lock);
964 
965 	cur_state = attr_mask & IB_QP_CUR_STATE ?
966 		attr->cur_qp_state : qp->state;
967 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
968 
969 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
970 				attr_mask, link))
971 		goto inval;
972 
973 	if (rdi->driver_f.check_modify_qp &&
974 	    rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
975 		goto inval;
976 
977 	if (attr_mask & IB_QP_AV) {
978 		if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
979 			goto inval;
980 		if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
981 			goto inval;
982 	}
983 
984 	if (attr_mask & IB_QP_ALT_PATH) {
985 		if (attr->alt_ah_attr.dlid >=
986 		    be16_to_cpu(IB_MULTICAST_LID_BASE))
987 			goto inval;
988 		if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
989 			goto inval;
990 		if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
991 			goto inval;
992 	}
993 
994 	if (attr_mask & IB_QP_PKEY_INDEX)
995 		if (attr->pkey_index >= rvt_get_npkeys(rdi))
996 			goto inval;
997 
998 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
999 		if (attr->min_rnr_timer > 31)
1000 			goto inval;
1001 
1002 	if (attr_mask & IB_QP_PORT)
1003 		if (qp->ibqp.qp_type == IB_QPT_SMI ||
1004 		    qp->ibqp.qp_type == IB_QPT_GSI ||
1005 		    attr->port_num == 0 ||
1006 		    attr->port_num > ibqp->device->phys_port_cnt)
1007 			goto inval;
1008 
1009 	if (attr_mask & IB_QP_DEST_QPN)
1010 		if (attr->dest_qp_num > RVT_QPN_MASK)
1011 			goto inval;
1012 
1013 	if (attr_mask & IB_QP_RETRY_CNT)
1014 		if (attr->retry_cnt > 7)
1015 			goto inval;
1016 
1017 	if (attr_mask & IB_QP_RNR_RETRY)
1018 		if (attr->rnr_retry > 7)
1019 			goto inval;
1020 
1021 	/*
1022 	 * Don't allow invalid path_mtu values.  OK to set greater
1023 	 * than the active mtu (or even the max_cap, if we have tuned
1024 	 * that to a small mtu.  We'll set qp->path_mtu
1025 	 * to the lesser of requested attribute mtu and active,
1026 	 * for packetizing messages.
1027 	 * Note that the QP port has to be set in INIT and MTU in RTR.
1028 	 */
1029 	if (attr_mask & IB_QP_PATH_MTU) {
1030 		pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1031 		if (pmtu < 0)
1032 			goto inval;
1033 	}
1034 
1035 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1036 		if (attr->path_mig_state == IB_MIG_REARM) {
1037 			if (qp->s_mig_state == IB_MIG_ARMED)
1038 				goto inval;
1039 			if (new_state != IB_QPS_RTS)
1040 				goto inval;
1041 		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1042 			if (qp->s_mig_state == IB_MIG_REARM)
1043 				goto inval;
1044 			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1045 				goto inval;
1046 			if (qp->s_mig_state == IB_MIG_ARMED)
1047 				mig = 1;
1048 		} else {
1049 			goto inval;
1050 		}
1051 	}
1052 
1053 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1054 		if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1055 			goto inval;
1056 
1057 	switch (new_state) {
1058 	case IB_QPS_RESET:
1059 		if (qp->state != IB_QPS_RESET)
1060 			rvt_reset_qp(rdi, qp, ibqp->qp_type);
1061 		break;
1062 
1063 	case IB_QPS_RTR:
1064 		/* Allow event to re-trigger if QP set to RTR more than once */
1065 		qp->r_flags &= ~RVT_R_COMM_EST;
1066 		qp->state = new_state;
1067 		break;
1068 
1069 	case IB_QPS_SQD:
1070 		qp->s_draining = qp->s_last != qp->s_cur;
1071 		qp->state = new_state;
1072 		break;
1073 
1074 	case IB_QPS_SQE:
1075 		if (qp->ibqp.qp_type == IB_QPT_RC)
1076 			goto inval;
1077 		qp->state = new_state;
1078 		break;
1079 
1080 	case IB_QPS_ERR:
1081 		lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1082 		break;
1083 
1084 	default:
1085 		qp->state = new_state;
1086 		break;
1087 	}
1088 
1089 	if (attr_mask & IB_QP_PKEY_INDEX)
1090 		qp->s_pkey_index = attr->pkey_index;
1091 
1092 	if (attr_mask & IB_QP_PORT)
1093 		qp->port_num = attr->port_num;
1094 
1095 	if (attr_mask & IB_QP_DEST_QPN)
1096 		qp->remote_qpn = attr->dest_qp_num;
1097 
1098 	if (attr_mask & IB_QP_SQ_PSN) {
1099 		qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1100 		qp->s_psn = qp->s_next_psn;
1101 		qp->s_sending_psn = qp->s_next_psn;
1102 		qp->s_last_psn = qp->s_next_psn - 1;
1103 		qp->s_sending_hpsn = qp->s_last_psn;
1104 	}
1105 
1106 	if (attr_mask & IB_QP_RQ_PSN)
1107 		qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1108 
1109 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1110 		qp->qp_access_flags = attr->qp_access_flags;
1111 
1112 	if (attr_mask & IB_QP_AV) {
1113 		qp->remote_ah_attr = attr->ah_attr;
1114 		qp->s_srate = attr->ah_attr.static_rate;
1115 		qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1116 	}
1117 
1118 	if (attr_mask & IB_QP_ALT_PATH) {
1119 		qp->alt_ah_attr = attr->alt_ah_attr;
1120 		qp->s_alt_pkey_index = attr->alt_pkey_index;
1121 	}
1122 
1123 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1124 		qp->s_mig_state = attr->path_mig_state;
1125 		if (mig) {
1126 			qp->remote_ah_attr = qp->alt_ah_attr;
1127 			qp->port_num = qp->alt_ah_attr.port_num;
1128 			qp->s_pkey_index = qp->s_alt_pkey_index;
1129 
1130 			/*
1131 			 * Ignored by drivers which do not support it. Not
1132 			 * really worth creating a call back into the driver
1133 			 * just to set a flag.
1134 			 */
1135 			qp->s_flags |= RVT_S_AHG_CLEAR;
1136 		}
1137 	}
1138 
1139 	if (attr_mask & IB_QP_PATH_MTU) {
1140 		qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1141 		qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1142 	}
1143 
1144 	if (attr_mask & IB_QP_RETRY_CNT) {
1145 		qp->s_retry_cnt = attr->retry_cnt;
1146 		qp->s_retry = attr->retry_cnt;
1147 	}
1148 
1149 	if (attr_mask & IB_QP_RNR_RETRY) {
1150 		qp->s_rnr_retry_cnt = attr->rnr_retry;
1151 		qp->s_rnr_retry = attr->rnr_retry;
1152 	}
1153 
1154 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
1155 		qp->r_min_rnr_timer = attr->min_rnr_timer;
1156 
1157 	if (attr_mask & IB_QP_TIMEOUT) {
1158 		qp->timeout = attr->timeout;
1159 		qp->timeout_jiffies =
1160 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1161 				1000UL);
1162 	}
1163 
1164 	if (attr_mask & IB_QP_QKEY)
1165 		qp->qkey = attr->qkey;
1166 
1167 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1168 		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1169 
1170 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1171 		qp->s_max_rd_atomic = attr->max_rd_atomic;
1172 
1173 	if (rdi->driver_f.modify_qp)
1174 		rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1175 
1176 	spin_unlock(&qp->s_lock);
1177 	spin_unlock_irq(&qp->r_lock);
1178 
1179 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1180 		rvt_insert_qp(rdi, qp);
1181 
1182 	if (lastwqe) {
1183 		ev.device = qp->ibqp.device;
1184 		ev.element.qp = &qp->ibqp;
1185 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1186 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1187 	}
1188 	if (mig) {
1189 		ev.device = qp->ibqp.device;
1190 		ev.element.qp = &qp->ibqp;
1191 		ev.event = IB_EVENT_PATH_MIG;
1192 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1193 	}
1194 	return 0;
1195 
1196 inval:
1197 	spin_unlock(&qp->s_lock);
1198 	spin_unlock_irq(&qp->r_lock);
1199 	return -EINVAL;
1200 }
1201 
1202 /**
1203  * rvt_destroy_qp - destroy a queue pair
1204  * @ibqp: the queue pair to destroy
1205  *
1206  * Returns 0 on success.
1207  *
1208  * Note that this can be called while the QP is actively sending or
1209  * receiving!
1210  */
1211 int rvt_destroy_qp(struct ib_qp *ibqp)
1212 {
1213 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1214 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1215 
1216 	spin_lock_irq(&qp->r_lock);
1217 	spin_lock(&qp->s_lock);
1218 	rvt_reset_qp(rdi, qp, ibqp->qp_type);
1219 	spin_unlock(&qp->s_lock);
1220 	spin_unlock_irq(&qp->r_lock);
1221 
1222 	/* qpn is now available for use again */
1223 	rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1224 
1225 	spin_lock(&rdi->n_qps_lock);
1226 	rdi->n_qps_allocated--;
1227 	spin_unlock(&rdi->n_qps_lock);
1228 
1229 	if (qp->ip)
1230 		kref_put(&qp->ip->ref, rvt_release_mmap_info);
1231 	else
1232 		vfree(qp->r_rq.wq);
1233 	vfree(qp->s_wq);
1234 	rdi->driver_f.qp_priv_free(rdi, qp);
1235 	kfree(qp);
1236 	return 0;
1237 }
1238 
1239 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1240 		 int attr_mask, struct ib_qp_init_attr *init_attr)
1241 {
1242 	return -EOPNOTSUPP;
1243 }
1244 
1245 /**
1246  * rvt_post_receive - post a receive on a QP
1247  * @ibqp: the QP to post the receive on
1248  * @wr: the WR to post
1249  * @bad_wr: the first bad WR is put here
1250  *
1251  * This may be called from interrupt context.
1252  */
1253 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1254 		  struct ib_recv_wr **bad_wr)
1255 {
1256 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1257 	struct rvt_rwq *wq = qp->r_rq.wq;
1258 	unsigned long flags;
1259 
1260 	/* Check that state is OK to post receive. */
1261 	if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1262 		*bad_wr = wr;
1263 		return -EINVAL;
1264 	}
1265 
1266 	for (; wr; wr = wr->next) {
1267 		struct rvt_rwqe *wqe;
1268 		u32 next;
1269 		int i;
1270 
1271 		if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1272 			*bad_wr = wr;
1273 			return -EINVAL;
1274 		}
1275 
1276 		spin_lock_irqsave(&qp->r_rq.lock, flags);
1277 		next = wq->head + 1;
1278 		if (next >= qp->r_rq.size)
1279 			next = 0;
1280 		if (next == wq->tail) {
1281 			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1282 			*bad_wr = wr;
1283 			return -ENOMEM;
1284 		}
1285 
1286 		wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1287 		wqe->wr_id = wr->wr_id;
1288 		wqe->num_sge = wr->num_sge;
1289 		for (i = 0; i < wr->num_sge; i++)
1290 			wqe->sg_list[i] = wr->sg_list[i];
1291 		/* Make sure queue entry is written before the head index. */
1292 		smp_wmb();
1293 		wq->head = next;
1294 		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1295 	}
1296 	return 0;
1297 }
1298 
1299 /**
1300  * rvt_post_one_wr - post one RC, UC, or UD send work request
1301  * @qp: the QP to post on
1302  * @wr: the work request to send
1303  */
1304 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
1305 {
1306 	struct rvt_swqe *wqe;
1307 	u32 next;
1308 	int i;
1309 	int j;
1310 	int acc;
1311 	struct rvt_lkey_table *rkt;
1312 	struct rvt_pd *pd;
1313 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1314 
1315 	/* IB spec says that num_sge == 0 is OK. */
1316 	if (unlikely(wr->num_sge > qp->s_max_sge))
1317 		return -EINVAL;
1318 
1319 	/*
1320 	 * Don't allow RDMA reads or atomic operations on UC or
1321 	 * undefined operations.
1322 	 * Make sure buffer is large enough to hold the result for atomics.
1323 	 */
1324 	if (qp->ibqp.qp_type == IB_QPT_UC) {
1325 		if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
1326 			return -EINVAL;
1327 	} else if (qp->ibqp.qp_type != IB_QPT_RC) {
1328 		/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1329 		if (wr->opcode != IB_WR_SEND &&
1330 		    wr->opcode != IB_WR_SEND_WITH_IMM)
1331 			return -EINVAL;
1332 		/* Check UD destination address PD */
1333 		if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1334 			return -EINVAL;
1335 	} else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
1336 		return -EINVAL;
1337 	} else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
1338 		   (wr->num_sge == 0 ||
1339 		    wr->sg_list[0].length < sizeof(u64) ||
1340 		    wr->sg_list[0].addr & (sizeof(u64) - 1))) {
1341 		return -EINVAL;
1342 	} else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
1343 		return -EINVAL;
1344 	}
1345 
1346 	next = qp->s_head + 1;
1347 	if (next >= qp->s_size)
1348 		next = 0;
1349 	if (next == qp->s_last)
1350 		return -ENOMEM;
1351 
1352 	rkt = &rdi->lkey_table;
1353 	pd = ibpd_to_rvtpd(qp->ibqp.pd);
1354 	wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1355 
1356 	if (qp->ibqp.qp_type != IB_QPT_UC &&
1357 	    qp->ibqp.qp_type != IB_QPT_RC)
1358 		memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
1359 	else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
1360 		 wr->opcode == IB_WR_RDMA_WRITE ||
1361 		 wr->opcode == IB_WR_RDMA_READ)
1362 		memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
1363 	else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1364 		 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1365 		memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
1366 	else
1367 		memcpy(&wqe->wr, wr, sizeof(wqe->wr));
1368 
1369 	wqe->length = 0;
1370 	j = 0;
1371 	if (wr->num_sge) {
1372 		acc = wr->opcode >= IB_WR_RDMA_READ ?
1373 			IB_ACCESS_LOCAL_WRITE : 0;
1374 		for (i = 0; i < wr->num_sge; i++) {
1375 			u32 length = wr->sg_list[i].length;
1376 			int ok;
1377 
1378 			if (length == 0)
1379 				continue;
1380 			ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1381 					 &wr->sg_list[i], acc);
1382 			if (!ok)
1383 				goto bail_inval_free;
1384 			wqe->length += length;
1385 			j++;
1386 		}
1387 		wqe->wr.num_sge = j;
1388 	}
1389 	if (qp->ibqp.qp_type == IB_QPT_UC ||
1390 	    qp->ibqp.qp_type == IB_QPT_RC) {
1391 		if (wqe->length > 0x80000000U)
1392 			goto bail_inval_free;
1393 	} else {
1394 		atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1395 	}
1396 	wqe->ssn = qp->s_ssn++;
1397 	qp->s_head = next;
1398 
1399 	return 0;
1400 
1401 bail_inval_free:
1402 	/* release mr holds */
1403 	while (j) {
1404 		struct rvt_sge *sge = &wqe->sg_list[--j];
1405 
1406 		rvt_put_mr(sge->mr);
1407 	}
1408 	return -EINVAL;
1409 }
1410 
1411 /**
1412  * rvt_post_send - post a send on a QP
1413  * @ibqp: the QP to post the send on
1414  * @wr: the list of work requests to post
1415  * @bad_wr: the first bad WR is put here
1416  *
1417  * This may be called from interrupt context.
1418  */
1419 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1420 		  struct ib_send_wr **bad_wr)
1421 {
1422 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1423 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1424 	unsigned long flags = 0;
1425 	int call_send;
1426 	unsigned nreq = 0;
1427 	int err = 0;
1428 
1429 	spin_lock_irqsave(&qp->s_lock, flags);
1430 
1431 	/*
1432 	 * Ensure QP state is such that we can send. If not bail out early,
1433 	 * there is no need to do this every time we post a send.
1434 	 */
1435 	if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1436 		spin_unlock_irqrestore(&qp->s_lock, flags);
1437 		return -EINVAL;
1438 	}
1439 
1440 	/*
1441 	 * If the send queue is empty, and we only have a single WR then just go
1442 	 * ahead and kick the send engine into gear. Otherwise we will always
1443 	 * just schedule the send to happen later.
1444 	 */
1445 	call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1446 
1447 	for (; wr; wr = wr->next) {
1448 		err = rvt_post_one_wr(qp, wr);
1449 		if (unlikely(err)) {
1450 			*bad_wr = wr;
1451 			goto bail;
1452 		}
1453 		nreq++;
1454 	}
1455 bail:
1456 	if (nreq && !call_send)
1457 		rdi->driver_f.schedule_send(qp);
1458 	spin_unlock_irqrestore(&qp->s_lock, flags);
1459 	if (nreq && call_send)
1460 		rdi->driver_f.do_send(qp);
1461 	return err;
1462 }
1463 
1464 /**
1465  * rvt_post_srq_receive - post a receive on a shared receive queue
1466  * @ibsrq: the SRQ to post the receive on
1467  * @wr: the list of work requests to post
1468  * @bad_wr: A pointer to the first WR to cause a problem is put here
1469  *
1470  * This may be called from interrupt context.
1471  */
1472 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1473 		      struct ib_recv_wr **bad_wr)
1474 {
1475 	return -EOPNOTSUPP;
1476 }
1477 
1478 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1479 {
1480 	struct rvt_qpn_map *map;
1481 
1482 	map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1483 	if (map->page)
1484 		clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1485 }
1486 EXPORT_SYMBOL(rvt_free_qpn);
1487 
1488 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi)
1489 {
1490 	spin_lock(&rdi->n_qps_lock);
1491 	rdi->n_qps_allocated--;
1492 	spin_unlock(&rdi->n_qps_lock);
1493 }
1494 EXPORT_SYMBOL(rvt_dec_qp_cnt);
1495