xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_qp.c (revision 8fdf9062)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 
39 #include "rxe.h"
40 #include "rxe_loc.h"
41 #include "rxe_queue.h"
42 #include "rxe_task.h"
43 
44 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
45 			  int has_srq)
46 {
47 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
48 		pr_warn("invalid send wr = %d > %d\n",
49 			cap->max_send_wr, rxe->attr.max_qp_wr);
50 		goto err1;
51 	}
52 
53 	if (cap->max_send_sge > rxe->attr.max_send_sge) {
54 		pr_warn("invalid send sge = %d > %d\n",
55 			cap->max_send_sge, rxe->attr.max_send_sge);
56 		goto err1;
57 	}
58 
59 	if (!has_srq) {
60 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
61 			pr_warn("invalid recv wr = %d > %d\n",
62 				cap->max_recv_wr, rxe->attr.max_qp_wr);
63 			goto err1;
64 		}
65 
66 		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
67 			pr_warn("invalid recv sge = %d > %d\n",
68 				cap->max_recv_sge, rxe->attr.max_recv_sge);
69 			goto err1;
70 		}
71 	}
72 
73 	if (cap->max_inline_data > rxe->max_inline_data) {
74 		pr_warn("invalid max inline data = %d > %d\n",
75 			cap->max_inline_data, rxe->max_inline_data);
76 		goto err1;
77 	}
78 
79 	return 0;
80 
81 err1:
82 	return -EINVAL;
83 }
84 
85 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
86 {
87 	struct ib_qp_cap *cap = &init->cap;
88 	struct rxe_port *port;
89 	int port_num = init->port_num;
90 
91 	if (!init->recv_cq || !init->send_cq) {
92 		pr_warn("missing cq\n");
93 		goto err1;
94 	}
95 
96 	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
97 		goto err1;
98 
99 	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
100 		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
101 			pr_warn("invalid port = %d\n", port_num);
102 			goto err1;
103 		}
104 
105 		port = &rxe->port;
106 
107 		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
108 			pr_warn("SMI QP exists for port %d\n", port_num);
109 			goto err1;
110 		}
111 
112 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
113 			pr_warn("GSI QP exists for port %d\n", port_num);
114 			goto err1;
115 		}
116 	}
117 
118 	return 0;
119 
120 err1:
121 	return -EINVAL;
122 }
123 
124 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
125 {
126 	qp->resp.res_head = 0;
127 	qp->resp.res_tail = 0;
128 	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
129 
130 	if (!qp->resp.resources)
131 		return -ENOMEM;
132 
133 	return 0;
134 }
135 
136 static void free_rd_atomic_resources(struct rxe_qp *qp)
137 {
138 	if (qp->resp.resources) {
139 		int i;
140 
141 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142 			struct resp_res *res = &qp->resp.resources[i];
143 
144 			free_rd_atomic_resource(qp, res);
145 		}
146 		kfree(qp->resp.resources);
147 		qp->resp.resources = NULL;
148 	}
149 }
150 
151 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
152 {
153 	if (res->type == RXE_ATOMIC_MASK) {
154 		rxe_drop_ref(qp);
155 		kfree_skb(res->atomic.skb);
156 	} else if (res->type == RXE_READ_MASK) {
157 		if (res->read.mr)
158 			rxe_drop_ref(res->read.mr);
159 	}
160 	res->type = 0;
161 }
162 
163 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
164 {
165 	int i;
166 	struct resp_res *res;
167 
168 	if (qp->resp.resources) {
169 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
170 			res = &qp->resp.resources[i];
171 			free_rd_atomic_resource(qp, res);
172 		}
173 	}
174 }
175 
176 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
177 			     struct ib_qp_init_attr *init)
178 {
179 	struct rxe_port *port;
180 	u32 qpn;
181 
182 	qp->sq_sig_type		= init->sq_sig_type;
183 	qp->attr.path_mtu	= 1;
184 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
185 
186 	qpn			= qp->pelem.index;
187 	port			= &rxe->port;
188 
189 	switch (init->qp_type) {
190 	case IB_QPT_SMI:
191 		qp->ibqp.qp_num		= 0;
192 		port->qp_smi_index	= qpn;
193 		qp->attr.port_num	= init->port_num;
194 		break;
195 
196 	case IB_QPT_GSI:
197 		qp->ibqp.qp_num		= 1;
198 		port->qp_gsi_index	= qpn;
199 		qp->attr.port_num	= init->port_num;
200 		break;
201 
202 	default:
203 		qp->ibqp.qp_num		= qpn;
204 		break;
205 	}
206 
207 	INIT_LIST_HEAD(&qp->grp_list);
208 
209 	skb_queue_head_init(&qp->send_pkts);
210 
211 	spin_lock_init(&qp->grp_lock);
212 	spin_lock_init(&qp->state_lock);
213 
214 	atomic_set(&qp->ssn, 0);
215 	atomic_set(&qp->skb_out, 0);
216 }
217 
218 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
219 			   struct ib_qp_init_attr *init,
220 			   struct ib_ucontext *context,
221 			   struct rxe_create_qp_resp __user *uresp)
222 {
223 	int err;
224 	int wqe_size;
225 
226 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
227 	if (err < 0)
228 		return err;
229 	qp->sk->sk->sk_user_data = qp;
230 
231 	/* pick a source UDP port number for this QP based on
232 	 * the source QPN. this spreads traffic for different QPs
233 	 * across different NIC RX queues (while using a single
234 	 * flow for a given QP to maintain packet order).
235 	 * the port number must be in the Dynamic Ports range
236 	 * (0xc000 - 0xffff).
237 	 */
238 	qp->src_port = RXE_ROCE_V2_SPORT +
239 		(hash_32_generic(qp_num(qp), 14) & 0x3fff);
240 
241 	qp->sq.max_wr		= init->cap.max_send_wr;
242 	qp->sq.max_sge		= init->cap.max_send_sge;
243 	qp->sq.max_inline	= init->cap.max_inline_data;
244 
245 	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
246 			 qp->sq.max_sge * sizeof(struct ib_sge),
247 			 sizeof(struct rxe_send_wqe) +
248 			 qp->sq.max_inline);
249 
250 	qp->sq.queue = rxe_queue_init(rxe,
251 				      &qp->sq.max_wr,
252 				      wqe_size);
253 	if (!qp->sq.queue)
254 		return -ENOMEM;
255 
256 	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
257 			   qp->sq.queue->buf, qp->sq.queue->buf_size,
258 			   &qp->sq.queue->ip);
259 
260 	if (err) {
261 		vfree(qp->sq.queue->buf);
262 		kfree(qp->sq.queue);
263 		return err;
264 	}
265 
266 	qp->req.wqe_index	= producer_index(qp->sq.queue);
267 	qp->req.state		= QP_STATE_RESET;
268 	qp->req.opcode		= -1;
269 	qp->comp.opcode		= -1;
270 
271 	spin_lock_init(&qp->sq.sq_lock);
272 	skb_queue_head_init(&qp->req_pkts);
273 
274 	rxe_init_task(rxe, &qp->req.task, qp,
275 		      rxe_requester, "req");
276 	rxe_init_task(rxe, &qp->comp.task, qp,
277 		      rxe_completer, "comp");
278 
279 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
280 	if (init->qp_type == IB_QPT_RC) {
281 		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
282 		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
283 	}
284 	return 0;
285 }
286 
287 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
288 			    struct ib_qp_init_attr *init,
289 			    struct ib_ucontext *context,
290 			    struct rxe_create_qp_resp __user *uresp)
291 {
292 	int err;
293 	int wqe_size;
294 
295 	if (!qp->srq) {
296 		qp->rq.max_wr		= init->cap.max_recv_wr;
297 		qp->rq.max_sge		= init->cap.max_recv_sge;
298 
299 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
300 
301 		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
302 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
303 
304 		qp->rq.queue = rxe_queue_init(rxe,
305 					      &qp->rq.max_wr,
306 					      wqe_size);
307 		if (!qp->rq.queue)
308 			return -ENOMEM;
309 
310 		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
311 				   qp->rq.queue->buf, qp->rq.queue->buf_size,
312 				   &qp->rq.queue->ip);
313 		if (err) {
314 			vfree(qp->rq.queue->buf);
315 			kfree(qp->rq.queue);
316 			return err;
317 		}
318 	}
319 
320 	spin_lock_init(&qp->rq.producer_lock);
321 	spin_lock_init(&qp->rq.consumer_lock);
322 
323 	skb_queue_head_init(&qp->resp_pkts);
324 
325 	rxe_init_task(rxe, &qp->resp.task, qp,
326 		      rxe_responder, "resp");
327 
328 	qp->resp.opcode		= OPCODE_NONE;
329 	qp->resp.msn		= 0;
330 	qp->resp.state		= QP_STATE_RESET;
331 
332 	return 0;
333 }
334 
335 /* called by the create qp verb */
336 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
337 		     struct ib_qp_init_attr *init,
338 		     struct rxe_create_qp_resp __user *uresp,
339 		     struct ib_pd *ibpd,
340 		     struct ib_udata *udata)
341 {
342 	int err;
343 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
344 	struct rxe_cq *scq = to_rcq(init->send_cq);
345 	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
346 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
347 
348 	rxe_add_ref(pd);
349 	rxe_add_ref(rcq);
350 	rxe_add_ref(scq);
351 	if (srq)
352 		rxe_add_ref(srq);
353 
354 	qp->pd			= pd;
355 	qp->rcq			= rcq;
356 	qp->scq			= scq;
357 	qp->srq			= srq;
358 
359 	rxe_qp_init_misc(rxe, qp, init);
360 
361 	err = rxe_qp_init_req(rxe, qp, init, context, uresp);
362 	if (err)
363 		goto err1;
364 
365 	err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
366 	if (err)
367 		goto err2;
368 
369 	qp->attr.qp_state = IB_QPS_RESET;
370 	qp->valid = 1;
371 
372 	return 0;
373 
374 err2:
375 	rxe_queue_cleanup(qp->sq.queue);
376 err1:
377 	if (srq)
378 		rxe_drop_ref(srq);
379 	rxe_drop_ref(scq);
380 	rxe_drop_ref(rcq);
381 	rxe_drop_ref(pd);
382 
383 	return err;
384 }
385 
386 /* called by the query qp verb */
387 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
388 {
389 	init->event_handler		= qp->ibqp.event_handler;
390 	init->qp_context		= qp->ibqp.qp_context;
391 	init->send_cq			= qp->ibqp.send_cq;
392 	init->recv_cq			= qp->ibqp.recv_cq;
393 	init->srq			= qp->ibqp.srq;
394 
395 	init->cap.max_send_wr		= qp->sq.max_wr;
396 	init->cap.max_send_sge		= qp->sq.max_sge;
397 	init->cap.max_inline_data	= qp->sq.max_inline;
398 
399 	if (!qp->srq) {
400 		init->cap.max_recv_wr		= qp->rq.max_wr;
401 		init->cap.max_recv_sge		= qp->rq.max_sge;
402 	}
403 
404 	init->sq_sig_type		= qp->sq_sig_type;
405 
406 	init->qp_type			= qp->ibqp.qp_type;
407 	init->port_num			= 1;
408 
409 	return 0;
410 }
411 
412 /* called by the modify qp verb, this routine checks all the parameters before
413  * making any changes
414  */
415 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
416 		    struct ib_qp_attr *attr, int mask)
417 {
418 	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
419 					attr->cur_qp_state : qp->attr.qp_state;
420 	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
421 					attr->qp_state : cur_state;
422 
423 	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
424 		pr_warn("invalid mask or state for qp\n");
425 		goto err1;
426 	}
427 
428 	if (mask & IB_QP_STATE) {
429 		if (cur_state == IB_QPS_SQD) {
430 			if (qp->req.state == QP_STATE_DRAIN &&
431 			    new_state != IB_QPS_ERR)
432 				goto err1;
433 		}
434 	}
435 
436 	if (mask & IB_QP_PORT) {
437 		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
438 			pr_warn("invalid port %d\n", attr->port_num);
439 			goto err1;
440 		}
441 	}
442 
443 	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
444 		goto err1;
445 
446 	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
447 		goto err1;
448 
449 	if (mask & IB_QP_ALT_PATH) {
450 		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
451 			goto err1;
452 		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
453 			pr_warn("invalid alt port %d\n", attr->alt_port_num);
454 			goto err1;
455 		}
456 		if (attr->alt_timeout > 31) {
457 			pr_warn("invalid QP alt timeout %d > 31\n",
458 				attr->alt_timeout);
459 			goto err1;
460 		}
461 	}
462 
463 	if (mask & IB_QP_PATH_MTU) {
464 		struct rxe_port *port = &rxe->port;
465 
466 		enum ib_mtu max_mtu = port->attr.max_mtu;
467 		enum ib_mtu mtu = attr->path_mtu;
468 
469 		if (mtu > max_mtu) {
470 			pr_debug("invalid mtu (%d) > (%d)\n",
471 				 ib_mtu_enum_to_int(mtu),
472 				 ib_mtu_enum_to_int(max_mtu));
473 			goto err1;
474 		}
475 	}
476 
477 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
478 		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
479 			pr_warn("invalid max_rd_atomic %d > %d\n",
480 				attr->max_rd_atomic,
481 				rxe->attr.max_qp_rd_atom);
482 			goto err1;
483 		}
484 	}
485 
486 	if (mask & IB_QP_TIMEOUT) {
487 		if (attr->timeout > 31) {
488 			pr_warn("invalid QP timeout %d > 31\n",
489 				attr->timeout);
490 			goto err1;
491 		}
492 	}
493 
494 	return 0;
495 
496 err1:
497 	return -EINVAL;
498 }
499 
500 /* move the qp to the reset state */
501 static void rxe_qp_reset(struct rxe_qp *qp)
502 {
503 	/* stop tasks from running */
504 	rxe_disable_task(&qp->resp.task);
505 
506 	/* stop request/comp */
507 	if (qp->sq.queue) {
508 		if (qp_type(qp) == IB_QPT_RC)
509 			rxe_disable_task(&qp->comp.task);
510 		rxe_disable_task(&qp->req.task);
511 	}
512 
513 	/* move qp to the reset state */
514 	qp->req.state = QP_STATE_RESET;
515 	qp->resp.state = QP_STATE_RESET;
516 
517 	/* let state machines reset themselves drain work and packet queues
518 	 * etc.
519 	 */
520 	__rxe_do_task(&qp->resp.task);
521 
522 	if (qp->sq.queue) {
523 		__rxe_do_task(&qp->comp.task);
524 		__rxe_do_task(&qp->req.task);
525 		rxe_queue_reset(qp->sq.queue);
526 	}
527 
528 	/* cleanup attributes */
529 	atomic_set(&qp->ssn, 0);
530 	qp->req.opcode = -1;
531 	qp->req.need_retry = 0;
532 	qp->req.noack_pkts = 0;
533 	qp->resp.msn = 0;
534 	qp->resp.opcode = -1;
535 	qp->resp.drop_msg = 0;
536 	qp->resp.goto_error = 0;
537 	qp->resp.sent_psn_nak = 0;
538 
539 	if (qp->resp.mr) {
540 		rxe_drop_ref(qp->resp.mr);
541 		qp->resp.mr = NULL;
542 	}
543 
544 	cleanup_rd_atomic_resources(qp);
545 
546 	/* reenable tasks */
547 	rxe_enable_task(&qp->resp.task);
548 
549 	if (qp->sq.queue) {
550 		if (qp_type(qp) == IB_QPT_RC)
551 			rxe_enable_task(&qp->comp.task);
552 
553 		rxe_enable_task(&qp->req.task);
554 	}
555 }
556 
557 /* drain the send queue */
558 static void rxe_qp_drain(struct rxe_qp *qp)
559 {
560 	if (qp->sq.queue) {
561 		if (qp->req.state != QP_STATE_DRAINED) {
562 			qp->req.state = QP_STATE_DRAIN;
563 			if (qp_type(qp) == IB_QPT_RC)
564 				rxe_run_task(&qp->comp.task, 1);
565 			else
566 				__rxe_do_task(&qp->comp.task);
567 			rxe_run_task(&qp->req.task, 1);
568 		}
569 	}
570 }
571 
572 /* move the qp to the error state */
573 void rxe_qp_error(struct rxe_qp *qp)
574 {
575 	qp->req.state = QP_STATE_ERROR;
576 	qp->resp.state = QP_STATE_ERROR;
577 	qp->attr.qp_state = IB_QPS_ERR;
578 
579 	/* drain work and packet queues */
580 	rxe_run_task(&qp->resp.task, 1);
581 
582 	if (qp_type(qp) == IB_QPT_RC)
583 		rxe_run_task(&qp->comp.task, 1);
584 	else
585 		__rxe_do_task(&qp->comp.task);
586 	rxe_run_task(&qp->req.task, 1);
587 }
588 
589 /* called by the modify qp verb */
590 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
591 		     struct ib_udata *udata)
592 {
593 	int err;
594 
595 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
596 		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
597 
598 		qp->attr.max_rd_atomic = max_rd_atomic;
599 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
600 	}
601 
602 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
603 		int max_dest_rd_atomic =
604 			__roundup_pow_of_two(attr->max_dest_rd_atomic);
605 
606 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
607 
608 		free_rd_atomic_resources(qp);
609 
610 		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
611 		if (err)
612 			return err;
613 	}
614 
615 	if (mask & IB_QP_CUR_STATE)
616 		qp->attr.cur_qp_state = attr->qp_state;
617 
618 	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
619 		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
620 
621 	if (mask & IB_QP_ACCESS_FLAGS)
622 		qp->attr.qp_access_flags = attr->qp_access_flags;
623 
624 	if (mask & IB_QP_PKEY_INDEX)
625 		qp->attr.pkey_index = attr->pkey_index;
626 
627 	if (mask & IB_QP_PORT)
628 		qp->attr.port_num = attr->port_num;
629 
630 	if (mask & IB_QP_QKEY)
631 		qp->attr.qkey = attr->qkey;
632 
633 	if (mask & IB_QP_AV) {
634 		rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
635 		rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr);
636 	}
637 
638 	if (mask & IB_QP_ALT_PATH) {
639 		rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
640 				 &attr->alt_ah_attr);
641 		rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr);
642 		qp->attr.alt_port_num = attr->alt_port_num;
643 		qp->attr.alt_pkey_index = attr->alt_pkey_index;
644 		qp->attr.alt_timeout = attr->alt_timeout;
645 	}
646 
647 	if (mask & IB_QP_PATH_MTU) {
648 		qp->attr.path_mtu = attr->path_mtu;
649 		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
650 	}
651 
652 	if (mask & IB_QP_TIMEOUT) {
653 		qp->attr.timeout = attr->timeout;
654 		if (attr->timeout == 0) {
655 			qp->qp_timeout_jiffies = 0;
656 		} else {
657 			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
658 			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
659 
660 			qp->qp_timeout_jiffies = j ? j : 1;
661 		}
662 	}
663 
664 	if (mask & IB_QP_RETRY_CNT) {
665 		qp->attr.retry_cnt = attr->retry_cnt;
666 		qp->comp.retry_cnt = attr->retry_cnt;
667 		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
668 			 attr->retry_cnt);
669 	}
670 
671 	if (mask & IB_QP_RNR_RETRY) {
672 		qp->attr.rnr_retry = attr->rnr_retry;
673 		qp->comp.rnr_retry = attr->rnr_retry;
674 		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
675 			 attr->rnr_retry);
676 	}
677 
678 	if (mask & IB_QP_RQ_PSN) {
679 		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
680 		qp->resp.psn = qp->attr.rq_psn;
681 		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
682 			 qp->resp.psn);
683 	}
684 
685 	if (mask & IB_QP_MIN_RNR_TIMER) {
686 		qp->attr.min_rnr_timer = attr->min_rnr_timer;
687 		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
688 			 attr->min_rnr_timer);
689 	}
690 
691 	if (mask & IB_QP_SQ_PSN) {
692 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
693 		qp->req.psn = qp->attr.sq_psn;
694 		qp->comp.psn = qp->attr.sq_psn;
695 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
696 	}
697 
698 	if (mask & IB_QP_PATH_MIG_STATE)
699 		qp->attr.path_mig_state = attr->path_mig_state;
700 
701 	if (mask & IB_QP_DEST_QPN)
702 		qp->attr.dest_qp_num = attr->dest_qp_num;
703 
704 	if (mask & IB_QP_STATE) {
705 		qp->attr.qp_state = attr->qp_state;
706 
707 		switch (attr->qp_state) {
708 		case IB_QPS_RESET:
709 			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
710 			rxe_qp_reset(qp);
711 			break;
712 
713 		case IB_QPS_INIT:
714 			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
715 			qp->req.state = QP_STATE_INIT;
716 			qp->resp.state = QP_STATE_INIT;
717 			break;
718 
719 		case IB_QPS_RTR:
720 			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
721 			qp->resp.state = QP_STATE_READY;
722 			break;
723 
724 		case IB_QPS_RTS:
725 			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
726 			qp->req.state = QP_STATE_READY;
727 			break;
728 
729 		case IB_QPS_SQD:
730 			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
731 			rxe_qp_drain(qp);
732 			break;
733 
734 		case IB_QPS_SQE:
735 			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
736 			/* Not possible from modify_qp. */
737 			break;
738 
739 		case IB_QPS_ERR:
740 			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
741 			rxe_qp_error(qp);
742 			break;
743 		}
744 	}
745 
746 	return 0;
747 }
748 
749 /* called by the query qp verb */
750 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
751 {
752 	*attr = qp->attr;
753 
754 	attr->rq_psn				= qp->resp.psn;
755 	attr->sq_psn				= qp->req.psn;
756 
757 	attr->cap.max_send_wr			= qp->sq.max_wr;
758 	attr->cap.max_send_sge			= qp->sq.max_sge;
759 	attr->cap.max_inline_data		= qp->sq.max_inline;
760 
761 	if (!qp->srq) {
762 		attr->cap.max_recv_wr		= qp->rq.max_wr;
763 		attr->cap.max_recv_sge		= qp->rq.max_sge;
764 	}
765 
766 	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
767 	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
768 
769 	if (qp->req.state == QP_STATE_DRAIN) {
770 		attr->sq_draining = 1;
771 		/* applications that get this state
772 		 * typically spin on it. yield the
773 		 * processor
774 		 */
775 		cond_resched();
776 	} else {
777 		attr->sq_draining = 0;
778 	}
779 
780 	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
781 
782 	return 0;
783 }
784 
785 /* called by the destroy qp verb */
786 void rxe_qp_destroy(struct rxe_qp *qp)
787 {
788 	qp->valid = 0;
789 	qp->qp_timeout_jiffies = 0;
790 	rxe_cleanup_task(&qp->resp.task);
791 
792 	if (qp_type(qp) == IB_QPT_RC) {
793 		del_timer_sync(&qp->retrans_timer);
794 		del_timer_sync(&qp->rnr_nak_timer);
795 	}
796 
797 	rxe_cleanup_task(&qp->req.task);
798 	rxe_cleanup_task(&qp->comp.task);
799 
800 	/* flush out any receive wr's or pending requests */
801 	__rxe_do_task(&qp->req.task);
802 	if (qp->sq.queue) {
803 		__rxe_do_task(&qp->comp.task);
804 		__rxe_do_task(&qp->req.task);
805 	}
806 }
807 
808 /* called when the last reference to the qp is dropped */
809 static void rxe_qp_do_cleanup(struct work_struct *work)
810 {
811 	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
812 
813 	rxe_drop_all_mcast_groups(qp);
814 
815 	if (qp->sq.queue)
816 		rxe_queue_cleanup(qp->sq.queue);
817 
818 	if (qp->srq)
819 		rxe_drop_ref(qp->srq);
820 
821 	if (qp->rq.queue)
822 		rxe_queue_cleanup(qp->rq.queue);
823 
824 	if (qp->scq)
825 		rxe_drop_ref(qp->scq);
826 	if (qp->rcq)
827 		rxe_drop_ref(qp->rcq);
828 	if (qp->pd)
829 		rxe_drop_ref(qp->pd);
830 
831 	if (qp->resp.mr) {
832 		rxe_drop_ref(qp->resp.mr);
833 		qp->resp.mr = NULL;
834 	}
835 
836 	if (qp_type(qp) == IB_QPT_RC)
837 		sk_dst_reset(qp->sk->sk);
838 
839 	free_rd_atomic_resources(qp);
840 
841 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
842 	sock_release(qp->sk);
843 }
844 
845 /* called when the last reference to the qp is dropped */
846 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
847 {
848 	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
849 
850 	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
851 }
852