xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_qp.c (revision 174cd4b1)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 
38 #include "rxe.h"
39 #include "rxe_loc.h"
40 #include "rxe_queue.h"
41 #include "rxe_task.h"
42 
43 char *rxe_qp_state_name[] = {
44 	[QP_STATE_RESET]	= "RESET",
45 	[QP_STATE_INIT]		= "INIT",
46 	[QP_STATE_READY]	= "READY",
47 	[QP_STATE_DRAIN]	= "DRAIN",
48 	[QP_STATE_DRAINED]	= "DRAINED",
49 	[QP_STATE_ERROR]	= "ERROR",
50 };
51 
52 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
53 			  int has_srq)
54 {
55 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
56 		pr_warn("invalid send wr = %d > %d\n",
57 			cap->max_send_wr, rxe->attr.max_qp_wr);
58 		goto err1;
59 	}
60 
61 	if (cap->max_send_sge > rxe->attr.max_sge) {
62 		pr_warn("invalid send sge = %d > %d\n",
63 			cap->max_send_sge, rxe->attr.max_sge);
64 		goto err1;
65 	}
66 
67 	if (!has_srq) {
68 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
69 			pr_warn("invalid recv wr = %d > %d\n",
70 				cap->max_recv_wr, rxe->attr.max_qp_wr);
71 			goto err1;
72 		}
73 
74 		if (cap->max_recv_sge > rxe->attr.max_sge) {
75 			pr_warn("invalid recv sge = %d > %d\n",
76 				cap->max_recv_sge, rxe->attr.max_sge);
77 			goto err1;
78 		}
79 	}
80 
81 	if (cap->max_inline_data > rxe->max_inline_data) {
82 		pr_warn("invalid max inline data = %d > %d\n",
83 			cap->max_inline_data, rxe->max_inline_data);
84 		goto err1;
85 	}
86 
87 	return 0;
88 
89 err1:
90 	return -EINVAL;
91 }
92 
93 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
94 {
95 	struct ib_qp_cap *cap = &init->cap;
96 	struct rxe_port *port;
97 	int port_num = init->port_num;
98 
99 	if (!init->recv_cq || !init->send_cq) {
100 		pr_warn("missing cq\n");
101 		goto err1;
102 	}
103 
104 	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
105 		goto err1;
106 
107 	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
108 		if (port_num != 1) {
109 			pr_warn("invalid port = %d\n", port_num);
110 			goto err1;
111 		}
112 
113 		port = &rxe->port;
114 
115 		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
116 			pr_warn("SMI QP exists for port %d\n", port_num);
117 			goto err1;
118 		}
119 
120 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
121 			pr_warn("GSI QP exists for port %d\n", port_num);
122 			goto err1;
123 		}
124 	}
125 
126 	return 0;
127 
128 err1:
129 	return -EINVAL;
130 }
131 
132 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
133 {
134 	qp->resp.res_head = 0;
135 	qp->resp.res_tail = 0;
136 	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
137 
138 	if (!qp->resp.resources)
139 		return -ENOMEM;
140 
141 	return 0;
142 }
143 
144 static void free_rd_atomic_resources(struct rxe_qp *qp)
145 {
146 	if (qp->resp.resources) {
147 		int i;
148 
149 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
150 			struct resp_res *res = &qp->resp.resources[i];
151 
152 			free_rd_atomic_resource(qp, res);
153 		}
154 		kfree(qp->resp.resources);
155 		qp->resp.resources = NULL;
156 	}
157 }
158 
159 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
160 {
161 	if (res->type == RXE_ATOMIC_MASK) {
162 		rxe_drop_ref(qp);
163 		kfree_skb(res->atomic.skb);
164 	} else if (res->type == RXE_READ_MASK) {
165 		if (res->read.mr)
166 			rxe_drop_ref(res->read.mr);
167 	}
168 	res->type = 0;
169 }
170 
171 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
172 {
173 	int i;
174 	struct resp_res *res;
175 
176 	if (qp->resp.resources) {
177 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
178 			res = &qp->resp.resources[i];
179 			free_rd_atomic_resource(qp, res);
180 		}
181 	}
182 }
183 
184 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
185 			     struct ib_qp_init_attr *init)
186 {
187 	struct rxe_port *port;
188 	u32 qpn;
189 
190 	qp->sq_sig_type		= init->sq_sig_type;
191 	qp->attr.path_mtu	= 1;
192 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
193 
194 	qpn			= qp->pelem.index;
195 	port			= &rxe->port;
196 
197 	switch (init->qp_type) {
198 	case IB_QPT_SMI:
199 		qp->ibqp.qp_num		= 0;
200 		port->qp_smi_index	= qpn;
201 		qp->attr.port_num	= init->port_num;
202 		break;
203 
204 	case IB_QPT_GSI:
205 		qp->ibqp.qp_num		= 1;
206 		port->qp_gsi_index	= qpn;
207 		qp->attr.port_num	= init->port_num;
208 		break;
209 
210 	default:
211 		qp->ibqp.qp_num		= qpn;
212 		break;
213 	}
214 
215 	INIT_LIST_HEAD(&qp->grp_list);
216 
217 	skb_queue_head_init(&qp->send_pkts);
218 
219 	spin_lock_init(&qp->grp_lock);
220 	spin_lock_init(&qp->state_lock);
221 
222 	atomic_set(&qp->ssn, 0);
223 	atomic_set(&qp->skb_out, 0);
224 }
225 
226 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
227 			   struct ib_qp_init_attr *init,
228 			   struct ib_ucontext *context, struct ib_udata *udata)
229 {
230 	int err;
231 	int wqe_size;
232 
233 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
234 	if (err < 0)
235 		return err;
236 	qp->sk->sk->sk_user_data = qp;
237 
238 	qp->sq.max_wr		= init->cap.max_send_wr;
239 	qp->sq.max_sge		= init->cap.max_send_sge;
240 	qp->sq.max_inline	= init->cap.max_inline_data;
241 
242 	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
243 			 qp->sq.max_sge * sizeof(struct ib_sge),
244 			 sizeof(struct rxe_send_wqe) +
245 			 qp->sq.max_inline);
246 
247 	qp->sq.queue = rxe_queue_init(rxe,
248 				      &qp->sq.max_wr,
249 				      wqe_size);
250 	if (!qp->sq.queue)
251 		return -ENOMEM;
252 
253 	err = do_mmap_info(rxe, udata, true,
254 			   context, qp->sq.queue->buf,
255 			   qp->sq.queue->buf_size, &qp->sq.queue->ip);
256 
257 	if (err) {
258 		kvfree(qp->sq.queue->buf);
259 		kfree(qp->sq.queue);
260 		return err;
261 	}
262 
263 	qp->req.wqe_index	= producer_index(qp->sq.queue);
264 	qp->req.state		= QP_STATE_RESET;
265 	qp->req.opcode		= -1;
266 	qp->comp.opcode		= -1;
267 
268 	spin_lock_init(&qp->sq.sq_lock);
269 	skb_queue_head_init(&qp->req_pkts);
270 
271 	rxe_init_task(rxe, &qp->req.task, qp,
272 		      rxe_requester, "req");
273 	rxe_init_task(rxe, &qp->comp.task, qp,
274 		      rxe_completer, "comp");
275 
276 	setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
277 	setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
278 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
279 
280 	return 0;
281 }
282 
283 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
284 			    struct ib_qp_init_attr *init,
285 			    struct ib_ucontext *context, struct ib_udata *udata)
286 {
287 	int err;
288 	int wqe_size;
289 
290 	if (!qp->srq) {
291 		qp->rq.max_wr		= init->cap.max_recv_wr;
292 		qp->rq.max_sge		= init->cap.max_recv_sge;
293 
294 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
295 
296 		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
297 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
298 
299 		qp->rq.queue = rxe_queue_init(rxe,
300 					      &qp->rq.max_wr,
301 					      wqe_size);
302 		if (!qp->rq.queue)
303 			return -ENOMEM;
304 
305 		err = do_mmap_info(rxe, udata, false, context,
306 				   qp->rq.queue->buf,
307 				   qp->rq.queue->buf_size,
308 				   &qp->rq.queue->ip);
309 		if (err) {
310 			kvfree(qp->rq.queue->buf);
311 			kfree(qp->rq.queue);
312 			return err;
313 		}
314 	}
315 
316 	spin_lock_init(&qp->rq.producer_lock);
317 	spin_lock_init(&qp->rq.consumer_lock);
318 
319 	skb_queue_head_init(&qp->resp_pkts);
320 
321 	rxe_init_task(rxe, &qp->resp.task, qp,
322 		      rxe_responder, "resp");
323 
324 	qp->resp.opcode		= OPCODE_NONE;
325 	qp->resp.msn		= 0;
326 	qp->resp.state		= QP_STATE_RESET;
327 
328 	return 0;
329 }
330 
331 /* called by the create qp verb */
332 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
333 		     struct ib_qp_init_attr *init, struct ib_udata *udata,
334 		     struct ib_pd *ibpd)
335 {
336 	int err;
337 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
338 	struct rxe_cq *scq = to_rcq(init->send_cq);
339 	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
340 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
341 
342 	rxe_add_ref(pd);
343 	rxe_add_ref(rcq);
344 	rxe_add_ref(scq);
345 	if (srq)
346 		rxe_add_ref(srq);
347 
348 	qp->pd			= pd;
349 	qp->rcq			= rcq;
350 	qp->scq			= scq;
351 	qp->srq			= srq;
352 
353 	rxe_qp_init_misc(rxe, qp, init);
354 
355 	err = rxe_qp_init_req(rxe, qp, init, context, udata);
356 	if (err)
357 		goto err1;
358 
359 	err = rxe_qp_init_resp(rxe, qp, init, context, udata);
360 	if (err)
361 		goto err2;
362 
363 	qp->attr.qp_state = IB_QPS_RESET;
364 	qp->valid = 1;
365 
366 	return 0;
367 
368 err2:
369 	rxe_queue_cleanup(qp->sq.queue);
370 err1:
371 	if (srq)
372 		rxe_drop_ref(srq);
373 	rxe_drop_ref(scq);
374 	rxe_drop_ref(rcq);
375 	rxe_drop_ref(pd);
376 
377 	return err;
378 }
379 
380 /* called by the query qp verb */
381 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
382 {
383 	init->event_handler		= qp->ibqp.event_handler;
384 	init->qp_context		= qp->ibqp.qp_context;
385 	init->send_cq			= qp->ibqp.send_cq;
386 	init->recv_cq			= qp->ibqp.recv_cq;
387 	init->srq			= qp->ibqp.srq;
388 
389 	init->cap.max_send_wr		= qp->sq.max_wr;
390 	init->cap.max_send_sge		= qp->sq.max_sge;
391 	init->cap.max_inline_data	= qp->sq.max_inline;
392 
393 	if (!qp->srq) {
394 		init->cap.max_recv_wr		= qp->rq.max_wr;
395 		init->cap.max_recv_sge		= qp->rq.max_sge;
396 	}
397 
398 	init->sq_sig_type		= qp->sq_sig_type;
399 
400 	init->qp_type			= qp->ibqp.qp_type;
401 	init->port_num			= 1;
402 
403 	return 0;
404 }
405 
406 /* called by the modify qp verb, this routine checks all the parameters before
407  * making any changes
408  */
409 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
410 		    struct ib_qp_attr *attr, int mask)
411 {
412 	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
413 					attr->cur_qp_state : qp->attr.qp_state;
414 	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
415 					attr->qp_state : cur_state;
416 
417 	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
418 				IB_LINK_LAYER_ETHERNET)) {
419 		pr_warn("invalid mask or state for qp\n");
420 		goto err1;
421 	}
422 
423 	if (mask & IB_QP_STATE) {
424 		if (cur_state == IB_QPS_SQD) {
425 			if (qp->req.state == QP_STATE_DRAIN &&
426 			    new_state != IB_QPS_ERR)
427 				goto err1;
428 		}
429 	}
430 
431 	if (mask & IB_QP_PORT) {
432 		if (attr->port_num != 1) {
433 			pr_warn("invalid port %d\n", attr->port_num);
434 			goto err1;
435 		}
436 	}
437 
438 	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
439 		goto err1;
440 
441 	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
442 		goto err1;
443 
444 	if (mask & IB_QP_ALT_PATH) {
445 		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
446 			goto err1;
447 		if (attr->alt_port_num != 1) {
448 			pr_warn("invalid alt port %d\n", attr->alt_port_num);
449 			goto err1;
450 		}
451 		if (attr->alt_timeout > 31) {
452 			pr_warn("invalid QP alt timeout %d > 31\n",
453 				attr->alt_timeout);
454 			goto err1;
455 		}
456 	}
457 
458 	if (mask & IB_QP_PATH_MTU) {
459 		struct rxe_port *port = &rxe->port;
460 
461 		enum ib_mtu max_mtu = port->attr.max_mtu;
462 		enum ib_mtu mtu = attr->path_mtu;
463 
464 		if (mtu > max_mtu) {
465 			pr_debug("invalid mtu (%d) > (%d)\n",
466 				 ib_mtu_enum_to_int(mtu),
467 				 ib_mtu_enum_to_int(max_mtu));
468 			goto err1;
469 		}
470 	}
471 
472 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
473 		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
474 			pr_warn("invalid max_rd_atomic %d > %d\n",
475 				attr->max_rd_atomic,
476 				rxe->attr.max_qp_rd_atom);
477 			goto err1;
478 		}
479 	}
480 
481 	if (mask & IB_QP_TIMEOUT) {
482 		if (attr->timeout > 31) {
483 			pr_warn("invalid QP timeout %d > 31\n",
484 				attr->timeout);
485 			goto err1;
486 		}
487 	}
488 
489 	return 0;
490 
491 err1:
492 	return -EINVAL;
493 }
494 
495 /* move the qp to the reset state */
496 static void rxe_qp_reset(struct rxe_qp *qp)
497 {
498 	/* stop tasks from running */
499 	rxe_disable_task(&qp->resp.task);
500 
501 	/* stop request/comp */
502 	if (qp->sq.queue) {
503 		if (qp_type(qp) == IB_QPT_RC)
504 			rxe_disable_task(&qp->comp.task);
505 		rxe_disable_task(&qp->req.task);
506 	}
507 
508 	/* move qp to the reset state */
509 	qp->req.state = QP_STATE_RESET;
510 	qp->resp.state = QP_STATE_RESET;
511 
512 	/* let state machines reset themselves drain work and packet queues
513 	 * etc.
514 	 */
515 	__rxe_do_task(&qp->resp.task);
516 
517 	if (qp->sq.queue) {
518 		__rxe_do_task(&qp->comp.task);
519 		__rxe_do_task(&qp->req.task);
520 		rxe_queue_reset(qp->sq.queue);
521 	}
522 
523 	/* cleanup attributes */
524 	atomic_set(&qp->ssn, 0);
525 	qp->req.opcode = -1;
526 	qp->req.need_retry = 0;
527 	qp->req.noack_pkts = 0;
528 	qp->resp.msn = 0;
529 	qp->resp.opcode = -1;
530 	qp->resp.drop_msg = 0;
531 	qp->resp.goto_error = 0;
532 	qp->resp.sent_psn_nak = 0;
533 
534 	if (qp->resp.mr) {
535 		rxe_drop_ref(qp->resp.mr);
536 		qp->resp.mr = NULL;
537 	}
538 
539 	cleanup_rd_atomic_resources(qp);
540 
541 	/* reenable tasks */
542 	rxe_enable_task(&qp->resp.task);
543 
544 	if (qp->sq.queue) {
545 		if (qp_type(qp) == IB_QPT_RC)
546 			rxe_enable_task(&qp->comp.task);
547 
548 		rxe_enable_task(&qp->req.task);
549 	}
550 }
551 
552 /* drain the send queue */
553 static void rxe_qp_drain(struct rxe_qp *qp)
554 {
555 	if (qp->sq.queue) {
556 		if (qp->req.state != QP_STATE_DRAINED) {
557 			qp->req.state = QP_STATE_DRAIN;
558 			if (qp_type(qp) == IB_QPT_RC)
559 				rxe_run_task(&qp->comp.task, 1);
560 			else
561 				__rxe_do_task(&qp->comp.task);
562 			rxe_run_task(&qp->req.task, 1);
563 		}
564 	}
565 }
566 
567 /* move the qp to the error state */
568 void rxe_qp_error(struct rxe_qp *qp)
569 {
570 	qp->req.state = QP_STATE_ERROR;
571 	qp->resp.state = QP_STATE_ERROR;
572 	qp->attr.qp_state = IB_QPS_ERR;
573 
574 	/* drain work and packet queues */
575 	rxe_run_task(&qp->resp.task, 1);
576 
577 	if (qp_type(qp) == IB_QPT_RC)
578 		rxe_run_task(&qp->comp.task, 1);
579 	else
580 		__rxe_do_task(&qp->comp.task);
581 	rxe_run_task(&qp->req.task, 1);
582 }
583 
584 /* called by the modify qp verb */
585 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
586 		     struct ib_udata *udata)
587 {
588 	int err;
589 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
590 	union ib_gid sgid;
591 	struct ib_gid_attr sgid_attr;
592 
593 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
594 		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
595 
596 		qp->attr.max_rd_atomic = max_rd_atomic;
597 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
598 	}
599 
600 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
601 		int max_dest_rd_atomic =
602 			__roundup_pow_of_two(attr->max_dest_rd_atomic);
603 
604 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
605 
606 		free_rd_atomic_resources(qp);
607 
608 		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
609 		if (err)
610 			return err;
611 	}
612 
613 	if (mask & IB_QP_CUR_STATE)
614 		qp->attr.cur_qp_state = attr->qp_state;
615 
616 	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
617 		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
618 
619 	if (mask & IB_QP_ACCESS_FLAGS)
620 		qp->attr.qp_access_flags = attr->qp_access_flags;
621 
622 	if (mask & IB_QP_PKEY_INDEX)
623 		qp->attr.pkey_index = attr->pkey_index;
624 
625 	if (mask & IB_QP_PORT)
626 		qp->attr.port_num = attr->port_num;
627 
628 	if (mask & IB_QP_QKEY)
629 		qp->attr.qkey = attr->qkey;
630 
631 	if (mask & IB_QP_AV) {
632 		ib_get_cached_gid(&rxe->ib_dev, 1,
633 				  attr->ah_attr.grh.sgid_index, &sgid,
634 				  &sgid_attr);
635 		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
636 				 &attr->ah_attr);
637 		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
638 				    &sgid_attr, &sgid);
639 		if (sgid_attr.ndev)
640 			dev_put(sgid_attr.ndev);
641 	}
642 
643 	if (mask & IB_QP_ALT_PATH) {
644 		ib_get_cached_gid(&rxe->ib_dev, 1,
645 				  attr->alt_ah_attr.grh.sgid_index, &sgid,
646 				  &sgid_attr);
647 
648 		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
649 				 &attr->alt_ah_attr);
650 		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
651 				    &sgid_attr, &sgid);
652 		if (sgid_attr.ndev)
653 			dev_put(sgid_attr.ndev);
654 
655 		qp->attr.alt_port_num = attr->alt_port_num;
656 		qp->attr.alt_pkey_index = attr->alt_pkey_index;
657 		qp->attr.alt_timeout = attr->alt_timeout;
658 	}
659 
660 	if (mask & IB_QP_PATH_MTU) {
661 		qp->attr.path_mtu = attr->path_mtu;
662 		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
663 	}
664 
665 	if (mask & IB_QP_TIMEOUT) {
666 		qp->attr.timeout = attr->timeout;
667 		if (attr->timeout == 0) {
668 			qp->qp_timeout_jiffies = 0;
669 		} else {
670 			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
671 			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
672 
673 			qp->qp_timeout_jiffies = j ? j : 1;
674 		}
675 	}
676 
677 	if (mask & IB_QP_RETRY_CNT) {
678 		qp->attr.retry_cnt = attr->retry_cnt;
679 		qp->comp.retry_cnt = attr->retry_cnt;
680 		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
681 			 attr->retry_cnt);
682 	}
683 
684 	if (mask & IB_QP_RNR_RETRY) {
685 		qp->attr.rnr_retry = attr->rnr_retry;
686 		qp->comp.rnr_retry = attr->rnr_retry;
687 		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
688 			 attr->rnr_retry);
689 	}
690 
691 	if (mask & IB_QP_RQ_PSN) {
692 		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
693 		qp->resp.psn = qp->attr.rq_psn;
694 		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
695 			 qp->resp.psn);
696 	}
697 
698 	if (mask & IB_QP_MIN_RNR_TIMER) {
699 		qp->attr.min_rnr_timer = attr->min_rnr_timer;
700 		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
701 			 attr->min_rnr_timer);
702 	}
703 
704 	if (mask & IB_QP_SQ_PSN) {
705 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
706 		qp->req.psn = qp->attr.sq_psn;
707 		qp->comp.psn = qp->attr.sq_psn;
708 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
709 	}
710 
711 	if (mask & IB_QP_PATH_MIG_STATE)
712 		qp->attr.path_mig_state = attr->path_mig_state;
713 
714 	if (mask & IB_QP_DEST_QPN)
715 		qp->attr.dest_qp_num = attr->dest_qp_num;
716 
717 	if (mask & IB_QP_STATE) {
718 		qp->attr.qp_state = attr->qp_state;
719 
720 		switch (attr->qp_state) {
721 		case IB_QPS_RESET:
722 			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
723 			rxe_qp_reset(qp);
724 			break;
725 
726 		case IB_QPS_INIT:
727 			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
728 			qp->req.state = QP_STATE_INIT;
729 			qp->resp.state = QP_STATE_INIT;
730 			break;
731 
732 		case IB_QPS_RTR:
733 			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
734 			qp->resp.state = QP_STATE_READY;
735 			break;
736 
737 		case IB_QPS_RTS:
738 			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
739 			qp->req.state = QP_STATE_READY;
740 			break;
741 
742 		case IB_QPS_SQD:
743 			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
744 			rxe_qp_drain(qp);
745 			break;
746 
747 		case IB_QPS_SQE:
748 			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
749 			/* Not possible from modify_qp. */
750 			break;
751 
752 		case IB_QPS_ERR:
753 			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
754 			rxe_qp_error(qp);
755 			break;
756 		}
757 	}
758 
759 	return 0;
760 }
761 
762 /* called by the query qp verb */
763 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
764 {
765 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
766 
767 	*attr = qp->attr;
768 
769 	attr->rq_psn				= qp->resp.psn;
770 	attr->sq_psn				= qp->req.psn;
771 
772 	attr->cap.max_send_wr			= qp->sq.max_wr;
773 	attr->cap.max_send_sge			= qp->sq.max_sge;
774 	attr->cap.max_inline_data		= qp->sq.max_inline;
775 
776 	if (!qp->srq) {
777 		attr->cap.max_recv_wr		= qp->rq.max_wr;
778 		attr->cap.max_recv_sge		= qp->rq.max_sge;
779 	}
780 
781 	rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
782 	rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
783 
784 	if (qp->req.state == QP_STATE_DRAIN) {
785 		attr->sq_draining = 1;
786 		/* applications that get this state
787 		 * typically spin on it. yield the
788 		 * processor
789 		 */
790 		cond_resched();
791 	} else {
792 		attr->sq_draining = 0;
793 	}
794 
795 	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
796 
797 	return 0;
798 }
799 
800 /* called by the destroy qp verb */
801 void rxe_qp_destroy(struct rxe_qp *qp)
802 {
803 	qp->valid = 0;
804 	qp->qp_timeout_jiffies = 0;
805 	rxe_cleanup_task(&qp->resp.task);
806 
807 	del_timer_sync(&qp->retrans_timer);
808 	del_timer_sync(&qp->rnr_nak_timer);
809 
810 	rxe_cleanup_task(&qp->req.task);
811 	rxe_cleanup_task(&qp->comp.task);
812 
813 	/* flush out any receive wr's or pending requests */
814 	__rxe_do_task(&qp->req.task);
815 	if (qp->sq.queue) {
816 		__rxe_do_task(&qp->comp.task);
817 		__rxe_do_task(&qp->req.task);
818 	}
819 }
820 
821 /* called when the last reference to the qp is dropped */
822 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
823 {
824 	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
825 
826 	rxe_drop_all_mcast_groups(qp);
827 
828 	if (qp->sq.queue)
829 		rxe_queue_cleanup(qp->sq.queue);
830 
831 	if (qp->srq)
832 		rxe_drop_ref(qp->srq);
833 
834 	if (qp->rq.queue)
835 		rxe_queue_cleanup(qp->rq.queue);
836 
837 	if (qp->scq)
838 		rxe_drop_ref(qp->scq);
839 	if (qp->rcq)
840 		rxe_drop_ref(qp->rcq);
841 	if (qp->pd)
842 		rxe_drop_ref(qp->pd);
843 
844 	if (qp->resp.mr) {
845 		rxe_drop_ref(qp->resp.mr);
846 		qp->resp.mr = NULL;
847 	}
848 
849 	free_rd_atomic_resources(qp);
850 
851 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
852 	sock_release(qp->sk);
853 }
854