xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_qp.c (revision ba61bb17)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 
38 #include "rxe.h"
39 #include "rxe_loc.h"
40 #include "rxe_queue.h"
41 #include "rxe_task.h"
42 
43 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
44 			  int has_srq)
45 {
46 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
47 		pr_warn("invalid send wr = %d > %d\n",
48 			cap->max_send_wr, rxe->attr.max_qp_wr);
49 		goto err1;
50 	}
51 
52 	if (cap->max_send_sge > rxe->attr.max_sge) {
53 		pr_warn("invalid send sge = %d > %d\n",
54 			cap->max_send_sge, rxe->attr.max_sge);
55 		goto err1;
56 	}
57 
58 	if (!has_srq) {
59 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
60 			pr_warn("invalid recv wr = %d > %d\n",
61 				cap->max_recv_wr, rxe->attr.max_qp_wr);
62 			goto err1;
63 		}
64 
65 		if (cap->max_recv_sge > rxe->attr.max_sge) {
66 			pr_warn("invalid recv sge = %d > %d\n",
67 				cap->max_recv_sge, rxe->attr.max_sge);
68 			goto err1;
69 		}
70 	}
71 
72 	if (cap->max_inline_data > rxe->max_inline_data) {
73 		pr_warn("invalid max inline data = %d > %d\n",
74 			cap->max_inline_data, rxe->max_inline_data);
75 		goto err1;
76 	}
77 
78 	return 0;
79 
80 err1:
81 	return -EINVAL;
82 }
83 
84 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
85 {
86 	struct ib_qp_cap *cap = &init->cap;
87 	struct rxe_port *port;
88 	int port_num = init->port_num;
89 
90 	if (!init->recv_cq || !init->send_cq) {
91 		pr_warn("missing cq\n");
92 		goto err1;
93 	}
94 
95 	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
96 		goto err1;
97 
98 	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
99 		if (port_num != 1) {
100 			pr_warn("invalid port = %d\n", port_num);
101 			goto err1;
102 		}
103 
104 		port = &rxe->port;
105 
106 		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
107 			pr_warn("SMI QP exists for port %d\n", port_num);
108 			goto err1;
109 		}
110 
111 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
112 			pr_warn("GSI QP exists for port %d\n", port_num);
113 			goto err1;
114 		}
115 	}
116 
117 	return 0;
118 
119 err1:
120 	return -EINVAL;
121 }
122 
123 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
124 {
125 	qp->resp.res_head = 0;
126 	qp->resp.res_tail = 0;
127 	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
128 
129 	if (!qp->resp.resources)
130 		return -ENOMEM;
131 
132 	return 0;
133 }
134 
135 static void free_rd_atomic_resources(struct rxe_qp *qp)
136 {
137 	if (qp->resp.resources) {
138 		int i;
139 
140 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
141 			struct resp_res *res = &qp->resp.resources[i];
142 
143 			free_rd_atomic_resource(qp, res);
144 		}
145 		kfree(qp->resp.resources);
146 		qp->resp.resources = NULL;
147 	}
148 }
149 
150 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
151 {
152 	if (res->type == RXE_ATOMIC_MASK) {
153 		rxe_drop_ref(qp);
154 		kfree_skb(res->atomic.skb);
155 	} else if (res->type == RXE_READ_MASK) {
156 		if (res->read.mr)
157 			rxe_drop_ref(res->read.mr);
158 	}
159 	res->type = 0;
160 }
161 
162 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
163 {
164 	int i;
165 	struct resp_res *res;
166 
167 	if (qp->resp.resources) {
168 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
169 			res = &qp->resp.resources[i];
170 			free_rd_atomic_resource(qp, res);
171 		}
172 	}
173 }
174 
175 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
176 			     struct ib_qp_init_attr *init)
177 {
178 	struct rxe_port *port;
179 	u32 qpn;
180 
181 	qp->sq_sig_type		= init->sq_sig_type;
182 	qp->attr.path_mtu	= 1;
183 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
184 
185 	qpn			= qp->pelem.index;
186 	port			= &rxe->port;
187 
188 	switch (init->qp_type) {
189 	case IB_QPT_SMI:
190 		qp->ibqp.qp_num		= 0;
191 		port->qp_smi_index	= qpn;
192 		qp->attr.port_num	= init->port_num;
193 		break;
194 
195 	case IB_QPT_GSI:
196 		qp->ibqp.qp_num		= 1;
197 		port->qp_gsi_index	= qpn;
198 		qp->attr.port_num	= init->port_num;
199 		break;
200 
201 	default:
202 		qp->ibqp.qp_num		= qpn;
203 		break;
204 	}
205 
206 	INIT_LIST_HEAD(&qp->grp_list);
207 
208 	skb_queue_head_init(&qp->send_pkts);
209 
210 	spin_lock_init(&qp->grp_lock);
211 	spin_lock_init(&qp->state_lock);
212 
213 	atomic_set(&qp->ssn, 0);
214 	atomic_set(&qp->skb_out, 0);
215 }
216 
217 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
218 			   struct ib_qp_init_attr *init,
219 			   struct ib_ucontext *context,
220 			   struct rxe_create_qp_resp __user *uresp)
221 {
222 	int err;
223 	int wqe_size;
224 
225 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
226 	if (err < 0)
227 		return err;
228 	qp->sk->sk->sk_user_data = qp;
229 
230 	qp->sq.max_wr		= init->cap.max_send_wr;
231 	qp->sq.max_sge		= init->cap.max_send_sge;
232 	qp->sq.max_inline	= init->cap.max_inline_data;
233 
234 	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
235 			 qp->sq.max_sge * sizeof(struct ib_sge),
236 			 sizeof(struct rxe_send_wqe) +
237 			 qp->sq.max_inline);
238 
239 	qp->sq.queue = rxe_queue_init(rxe,
240 				      &qp->sq.max_wr,
241 				      wqe_size);
242 	if (!qp->sq.queue)
243 		return -ENOMEM;
244 
245 	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
246 			   qp->sq.queue->buf, qp->sq.queue->buf_size,
247 			   &qp->sq.queue->ip);
248 
249 	if (err) {
250 		kvfree(qp->sq.queue->buf);
251 		kfree(qp->sq.queue);
252 		return err;
253 	}
254 
255 	qp->req.wqe_index	= producer_index(qp->sq.queue);
256 	qp->req.state		= QP_STATE_RESET;
257 	qp->req.opcode		= -1;
258 	qp->comp.opcode		= -1;
259 
260 	spin_lock_init(&qp->sq.sq_lock);
261 	skb_queue_head_init(&qp->req_pkts);
262 
263 	rxe_init_task(rxe, &qp->req.task, qp,
264 		      rxe_requester, "req");
265 	rxe_init_task(rxe, &qp->comp.task, qp,
266 		      rxe_completer, "comp");
267 
268 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
269 	if (init->qp_type == IB_QPT_RC) {
270 		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
271 		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
272 	}
273 	return 0;
274 }
275 
276 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
277 			    struct ib_qp_init_attr *init,
278 			    struct ib_ucontext *context,
279 			    struct rxe_create_qp_resp __user *uresp)
280 {
281 	int err;
282 	int wqe_size;
283 
284 	if (!qp->srq) {
285 		qp->rq.max_wr		= init->cap.max_recv_wr;
286 		qp->rq.max_sge		= init->cap.max_recv_sge;
287 
288 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
289 
290 		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
291 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
292 
293 		qp->rq.queue = rxe_queue_init(rxe,
294 					      &qp->rq.max_wr,
295 					      wqe_size);
296 		if (!qp->rq.queue)
297 			return -ENOMEM;
298 
299 		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
300 				   qp->rq.queue->buf, qp->rq.queue->buf_size,
301 				   &qp->rq.queue->ip);
302 		if (err) {
303 			kvfree(qp->rq.queue->buf);
304 			kfree(qp->rq.queue);
305 			return err;
306 		}
307 	}
308 
309 	spin_lock_init(&qp->rq.producer_lock);
310 	spin_lock_init(&qp->rq.consumer_lock);
311 
312 	skb_queue_head_init(&qp->resp_pkts);
313 
314 	rxe_init_task(rxe, &qp->resp.task, qp,
315 		      rxe_responder, "resp");
316 
317 	qp->resp.opcode		= OPCODE_NONE;
318 	qp->resp.msn		= 0;
319 	qp->resp.state		= QP_STATE_RESET;
320 
321 	return 0;
322 }
323 
324 /* called by the create qp verb */
325 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
326 		     struct ib_qp_init_attr *init,
327 		     struct rxe_create_qp_resp __user *uresp,
328 		     struct ib_pd *ibpd)
329 {
330 	int err;
331 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
332 	struct rxe_cq *scq = to_rcq(init->send_cq);
333 	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
334 	struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
335 
336 	rxe_add_ref(pd);
337 	rxe_add_ref(rcq);
338 	rxe_add_ref(scq);
339 	if (srq)
340 		rxe_add_ref(srq);
341 
342 	qp->pd			= pd;
343 	qp->rcq			= rcq;
344 	qp->scq			= scq;
345 	qp->srq			= srq;
346 
347 	rxe_qp_init_misc(rxe, qp, init);
348 
349 	err = rxe_qp_init_req(rxe, qp, init, context, uresp);
350 	if (err)
351 		goto err1;
352 
353 	err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
354 	if (err)
355 		goto err2;
356 
357 	qp->attr.qp_state = IB_QPS_RESET;
358 	qp->valid = 1;
359 
360 	return 0;
361 
362 err2:
363 	rxe_queue_cleanup(qp->sq.queue);
364 err1:
365 	if (srq)
366 		rxe_drop_ref(srq);
367 	rxe_drop_ref(scq);
368 	rxe_drop_ref(rcq);
369 	rxe_drop_ref(pd);
370 
371 	return err;
372 }
373 
374 /* called by the query qp verb */
375 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
376 {
377 	init->event_handler		= qp->ibqp.event_handler;
378 	init->qp_context		= qp->ibqp.qp_context;
379 	init->send_cq			= qp->ibqp.send_cq;
380 	init->recv_cq			= qp->ibqp.recv_cq;
381 	init->srq			= qp->ibqp.srq;
382 
383 	init->cap.max_send_wr		= qp->sq.max_wr;
384 	init->cap.max_send_sge		= qp->sq.max_sge;
385 	init->cap.max_inline_data	= qp->sq.max_inline;
386 
387 	if (!qp->srq) {
388 		init->cap.max_recv_wr		= qp->rq.max_wr;
389 		init->cap.max_recv_sge		= qp->rq.max_sge;
390 	}
391 
392 	init->sq_sig_type		= qp->sq_sig_type;
393 
394 	init->qp_type			= qp->ibqp.qp_type;
395 	init->port_num			= 1;
396 
397 	return 0;
398 }
399 
400 /* called by the modify qp verb, this routine checks all the parameters before
401  * making any changes
402  */
403 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
404 		    struct ib_qp_attr *attr, int mask)
405 {
406 	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
407 					attr->cur_qp_state : qp->attr.qp_state;
408 	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
409 					attr->qp_state : cur_state;
410 
411 	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
412 				IB_LINK_LAYER_ETHERNET)) {
413 		pr_warn("invalid mask or state for qp\n");
414 		goto err1;
415 	}
416 
417 	if (mask & IB_QP_STATE) {
418 		if (cur_state == IB_QPS_SQD) {
419 			if (qp->req.state == QP_STATE_DRAIN &&
420 			    new_state != IB_QPS_ERR)
421 				goto err1;
422 		}
423 	}
424 
425 	if (mask & IB_QP_PORT) {
426 		if (attr->port_num != 1) {
427 			pr_warn("invalid port %d\n", attr->port_num);
428 			goto err1;
429 		}
430 	}
431 
432 	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
433 		goto err1;
434 
435 	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
436 		goto err1;
437 
438 	if (mask & IB_QP_ALT_PATH) {
439 		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
440 			goto err1;
441 		if (attr->alt_port_num != 1) {
442 			pr_warn("invalid alt port %d\n", attr->alt_port_num);
443 			goto err1;
444 		}
445 		if (attr->alt_timeout > 31) {
446 			pr_warn("invalid QP alt timeout %d > 31\n",
447 				attr->alt_timeout);
448 			goto err1;
449 		}
450 	}
451 
452 	if (mask & IB_QP_PATH_MTU) {
453 		struct rxe_port *port = &rxe->port;
454 
455 		enum ib_mtu max_mtu = port->attr.max_mtu;
456 		enum ib_mtu mtu = attr->path_mtu;
457 
458 		if (mtu > max_mtu) {
459 			pr_debug("invalid mtu (%d) > (%d)\n",
460 				 ib_mtu_enum_to_int(mtu),
461 				 ib_mtu_enum_to_int(max_mtu));
462 			goto err1;
463 		}
464 	}
465 
466 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
467 		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
468 			pr_warn("invalid max_rd_atomic %d > %d\n",
469 				attr->max_rd_atomic,
470 				rxe->attr.max_qp_rd_atom);
471 			goto err1;
472 		}
473 	}
474 
475 	if (mask & IB_QP_TIMEOUT) {
476 		if (attr->timeout > 31) {
477 			pr_warn("invalid QP timeout %d > 31\n",
478 				attr->timeout);
479 			goto err1;
480 		}
481 	}
482 
483 	return 0;
484 
485 err1:
486 	return -EINVAL;
487 }
488 
489 /* move the qp to the reset state */
490 static void rxe_qp_reset(struct rxe_qp *qp)
491 {
492 	/* stop tasks from running */
493 	rxe_disable_task(&qp->resp.task);
494 
495 	/* stop request/comp */
496 	if (qp->sq.queue) {
497 		if (qp_type(qp) == IB_QPT_RC)
498 			rxe_disable_task(&qp->comp.task);
499 		rxe_disable_task(&qp->req.task);
500 	}
501 
502 	/* move qp to the reset state */
503 	qp->req.state = QP_STATE_RESET;
504 	qp->resp.state = QP_STATE_RESET;
505 
506 	/* let state machines reset themselves drain work and packet queues
507 	 * etc.
508 	 */
509 	__rxe_do_task(&qp->resp.task);
510 
511 	if (qp->sq.queue) {
512 		__rxe_do_task(&qp->comp.task);
513 		__rxe_do_task(&qp->req.task);
514 		rxe_queue_reset(qp->sq.queue);
515 	}
516 
517 	/* cleanup attributes */
518 	atomic_set(&qp->ssn, 0);
519 	qp->req.opcode = -1;
520 	qp->req.need_retry = 0;
521 	qp->req.noack_pkts = 0;
522 	qp->resp.msn = 0;
523 	qp->resp.opcode = -1;
524 	qp->resp.drop_msg = 0;
525 	qp->resp.goto_error = 0;
526 	qp->resp.sent_psn_nak = 0;
527 
528 	if (qp->resp.mr) {
529 		rxe_drop_ref(qp->resp.mr);
530 		qp->resp.mr = NULL;
531 	}
532 
533 	cleanup_rd_atomic_resources(qp);
534 
535 	/* reenable tasks */
536 	rxe_enable_task(&qp->resp.task);
537 
538 	if (qp->sq.queue) {
539 		if (qp_type(qp) == IB_QPT_RC)
540 			rxe_enable_task(&qp->comp.task);
541 
542 		rxe_enable_task(&qp->req.task);
543 	}
544 }
545 
546 /* drain the send queue */
547 static void rxe_qp_drain(struct rxe_qp *qp)
548 {
549 	if (qp->sq.queue) {
550 		if (qp->req.state != QP_STATE_DRAINED) {
551 			qp->req.state = QP_STATE_DRAIN;
552 			if (qp_type(qp) == IB_QPT_RC)
553 				rxe_run_task(&qp->comp.task, 1);
554 			else
555 				__rxe_do_task(&qp->comp.task);
556 			rxe_run_task(&qp->req.task, 1);
557 		}
558 	}
559 }
560 
561 /* move the qp to the error state */
562 void rxe_qp_error(struct rxe_qp *qp)
563 {
564 	qp->req.state = QP_STATE_ERROR;
565 	qp->resp.state = QP_STATE_ERROR;
566 	qp->attr.qp_state = IB_QPS_ERR;
567 
568 	/* drain work and packet queues */
569 	rxe_run_task(&qp->resp.task, 1);
570 
571 	if (qp_type(qp) == IB_QPT_RC)
572 		rxe_run_task(&qp->comp.task, 1);
573 	else
574 		__rxe_do_task(&qp->comp.task);
575 	rxe_run_task(&qp->req.task, 1);
576 }
577 
578 /* called by the modify qp verb */
579 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
580 		     struct ib_udata *udata)
581 {
582 	int err;
583 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
584 	union ib_gid sgid;
585 	struct ib_gid_attr sgid_attr;
586 
587 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
588 		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
589 
590 		qp->attr.max_rd_atomic = max_rd_atomic;
591 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
592 	}
593 
594 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
595 		int max_dest_rd_atomic =
596 			__roundup_pow_of_two(attr->max_dest_rd_atomic);
597 
598 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
599 
600 		free_rd_atomic_resources(qp);
601 
602 		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
603 		if (err)
604 			return err;
605 	}
606 
607 	if (mask & IB_QP_CUR_STATE)
608 		qp->attr.cur_qp_state = attr->qp_state;
609 
610 	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
611 		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
612 
613 	if (mask & IB_QP_ACCESS_FLAGS)
614 		qp->attr.qp_access_flags = attr->qp_access_flags;
615 
616 	if (mask & IB_QP_PKEY_INDEX)
617 		qp->attr.pkey_index = attr->pkey_index;
618 
619 	if (mask & IB_QP_PORT)
620 		qp->attr.port_num = attr->port_num;
621 
622 	if (mask & IB_QP_QKEY)
623 		qp->attr.qkey = attr->qkey;
624 
625 	if (mask & IB_QP_AV) {
626 		ib_get_cached_gid(&rxe->ib_dev, 1,
627 				  rdma_ah_read_grh(&attr->ah_attr)->sgid_index,
628 				  &sgid, &sgid_attr);
629 		rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
630 		rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr,
631 				    &sgid_attr, &sgid);
632 		if (sgid_attr.ndev)
633 			dev_put(sgid_attr.ndev);
634 	}
635 
636 	if (mask & IB_QP_ALT_PATH) {
637 		u8 sgid_index =
638 			rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index;
639 
640 		ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index,
641 				  &sgid, &sgid_attr);
642 
643 		rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
644 				 &attr->alt_ah_attr);
645 		rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr,
646 				    &sgid_attr, &sgid);
647 		if (sgid_attr.ndev)
648 			dev_put(sgid_attr.ndev);
649 
650 		qp->attr.alt_port_num = attr->alt_port_num;
651 		qp->attr.alt_pkey_index = attr->alt_pkey_index;
652 		qp->attr.alt_timeout = attr->alt_timeout;
653 	}
654 
655 	if (mask & IB_QP_PATH_MTU) {
656 		qp->attr.path_mtu = attr->path_mtu;
657 		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
658 	}
659 
660 	if (mask & IB_QP_TIMEOUT) {
661 		qp->attr.timeout = attr->timeout;
662 		if (attr->timeout == 0) {
663 			qp->qp_timeout_jiffies = 0;
664 		} else {
665 			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
666 			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
667 
668 			qp->qp_timeout_jiffies = j ? j : 1;
669 		}
670 	}
671 
672 	if (mask & IB_QP_RETRY_CNT) {
673 		qp->attr.retry_cnt = attr->retry_cnt;
674 		qp->comp.retry_cnt = attr->retry_cnt;
675 		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
676 			 attr->retry_cnt);
677 	}
678 
679 	if (mask & IB_QP_RNR_RETRY) {
680 		qp->attr.rnr_retry = attr->rnr_retry;
681 		qp->comp.rnr_retry = attr->rnr_retry;
682 		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
683 			 attr->rnr_retry);
684 	}
685 
686 	if (mask & IB_QP_RQ_PSN) {
687 		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
688 		qp->resp.psn = qp->attr.rq_psn;
689 		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
690 			 qp->resp.psn);
691 	}
692 
693 	if (mask & IB_QP_MIN_RNR_TIMER) {
694 		qp->attr.min_rnr_timer = attr->min_rnr_timer;
695 		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
696 			 attr->min_rnr_timer);
697 	}
698 
699 	if (mask & IB_QP_SQ_PSN) {
700 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
701 		qp->req.psn = qp->attr.sq_psn;
702 		qp->comp.psn = qp->attr.sq_psn;
703 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
704 	}
705 
706 	if (mask & IB_QP_PATH_MIG_STATE)
707 		qp->attr.path_mig_state = attr->path_mig_state;
708 
709 	if (mask & IB_QP_DEST_QPN)
710 		qp->attr.dest_qp_num = attr->dest_qp_num;
711 
712 	if (mask & IB_QP_STATE) {
713 		qp->attr.qp_state = attr->qp_state;
714 
715 		switch (attr->qp_state) {
716 		case IB_QPS_RESET:
717 			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
718 			rxe_qp_reset(qp);
719 			break;
720 
721 		case IB_QPS_INIT:
722 			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
723 			qp->req.state = QP_STATE_INIT;
724 			qp->resp.state = QP_STATE_INIT;
725 			break;
726 
727 		case IB_QPS_RTR:
728 			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
729 			qp->resp.state = QP_STATE_READY;
730 			break;
731 
732 		case IB_QPS_RTS:
733 			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
734 			qp->req.state = QP_STATE_READY;
735 			break;
736 
737 		case IB_QPS_SQD:
738 			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
739 			rxe_qp_drain(qp);
740 			break;
741 
742 		case IB_QPS_SQE:
743 			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
744 			/* Not possible from modify_qp. */
745 			break;
746 
747 		case IB_QPS_ERR:
748 			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
749 			rxe_qp_error(qp);
750 			break;
751 		}
752 	}
753 
754 	return 0;
755 }
756 
757 /* called by the query qp verb */
758 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
759 {
760 	*attr = qp->attr;
761 
762 	attr->rq_psn				= qp->resp.psn;
763 	attr->sq_psn				= qp->req.psn;
764 
765 	attr->cap.max_send_wr			= qp->sq.max_wr;
766 	attr->cap.max_send_sge			= qp->sq.max_sge;
767 	attr->cap.max_inline_data		= qp->sq.max_inline;
768 
769 	if (!qp->srq) {
770 		attr->cap.max_recv_wr		= qp->rq.max_wr;
771 		attr->cap.max_recv_sge		= qp->rq.max_sge;
772 	}
773 
774 	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
775 	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
776 
777 	if (qp->req.state == QP_STATE_DRAIN) {
778 		attr->sq_draining = 1;
779 		/* applications that get this state
780 		 * typically spin on it. yield the
781 		 * processor
782 		 */
783 		cond_resched();
784 	} else {
785 		attr->sq_draining = 0;
786 	}
787 
788 	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
789 
790 	return 0;
791 }
792 
793 /* called by the destroy qp verb */
794 void rxe_qp_destroy(struct rxe_qp *qp)
795 {
796 	qp->valid = 0;
797 	qp->qp_timeout_jiffies = 0;
798 	rxe_cleanup_task(&qp->resp.task);
799 
800 	if (qp_type(qp) == IB_QPT_RC) {
801 		del_timer_sync(&qp->retrans_timer);
802 		del_timer_sync(&qp->rnr_nak_timer);
803 	}
804 
805 	rxe_cleanup_task(&qp->req.task);
806 	rxe_cleanup_task(&qp->comp.task);
807 
808 	/* flush out any receive wr's or pending requests */
809 	__rxe_do_task(&qp->req.task);
810 	if (qp->sq.queue) {
811 		__rxe_do_task(&qp->comp.task);
812 		__rxe_do_task(&qp->req.task);
813 	}
814 }
815 
816 /* called when the last reference to the qp is dropped */
817 static void rxe_qp_do_cleanup(struct work_struct *work)
818 {
819 	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
820 
821 	rxe_drop_all_mcast_groups(qp);
822 
823 	if (qp->sq.queue)
824 		rxe_queue_cleanup(qp->sq.queue);
825 
826 	if (qp->srq)
827 		rxe_drop_ref(qp->srq);
828 
829 	if (qp->rq.queue)
830 		rxe_queue_cleanup(qp->rq.queue);
831 
832 	if (qp->scq)
833 		rxe_drop_ref(qp->scq);
834 	if (qp->rcq)
835 		rxe_drop_ref(qp->rcq);
836 	if (qp->pd)
837 		rxe_drop_ref(qp->pd);
838 
839 	if (qp->resp.mr) {
840 		rxe_drop_ref(qp->resp.mr);
841 		qp->resp.mr = NULL;
842 	}
843 
844 	if (qp_type(qp) == IB_QPT_RC)
845 		sk_dst_reset(qp->sk->sk);
846 
847 	free_rd_atomic_resources(qp);
848 
849 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
850 	sock_release(qp->sk);
851 }
852 
853 /* called when the last reference to the qp is dropped */
854 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
855 {
856 	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
857 
858 	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
859 }
860