xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_qp.c (revision e657c18a)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *	   Redistribution and use in source and binary forms, with or
12  *	   without modification, are permitted provided that the following
13  *	   conditions are met:
14  *
15  *		- Redistributions of source code must retain the above
16  *		  copyright notice, this list of conditions and the following
17  *		  disclaimer.
18  *
19  *		- Redistributions in binary form must reproduce the above
20  *		  copyright notice, this list of conditions and the following
21  *		  disclaimer in the documentation and/or other materials
22  *		  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <rdma/uverbs_ioctl.h>
39 
40 #include "rxe.h"
41 #include "rxe_loc.h"
42 #include "rxe_queue.h"
43 #include "rxe_task.h"
44 
45 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
46 			  int has_srq)
47 {
48 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
49 		pr_warn("invalid send wr = %d > %d\n",
50 			cap->max_send_wr, rxe->attr.max_qp_wr);
51 		goto err1;
52 	}
53 
54 	if (cap->max_send_sge > rxe->attr.max_send_sge) {
55 		pr_warn("invalid send sge = %d > %d\n",
56 			cap->max_send_sge, rxe->attr.max_send_sge);
57 		goto err1;
58 	}
59 
60 	if (!has_srq) {
61 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
62 			pr_warn("invalid recv wr = %d > %d\n",
63 				cap->max_recv_wr, rxe->attr.max_qp_wr);
64 			goto err1;
65 		}
66 
67 		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
68 			pr_warn("invalid recv sge = %d > %d\n",
69 				cap->max_recv_sge, rxe->attr.max_recv_sge);
70 			goto err1;
71 		}
72 	}
73 
74 	if (cap->max_inline_data > rxe->max_inline_data) {
75 		pr_warn("invalid max inline data = %d > %d\n",
76 			cap->max_inline_data, rxe->max_inline_data);
77 		goto err1;
78 	}
79 
80 	return 0;
81 
82 err1:
83 	return -EINVAL;
84 }
85 
86 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
87 {
88 	struct ib_qp_cap *cap = &init->cap;
89 	struct rxe_port *port;
90 	int port_num = init->port_num;
91 
92 	if (!init->recv_cq || !init->send_cq) {
93 		pr_warn("missing cq\n");
94 		goto err1;
95 	}
96 
97 	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
98 		goto err1;
99 
100 	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
101 		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
102 			pr_warn("invalid port = %d\n", port_num);
103 			goto err1;
104 		}
105 
106 		port = &rxe->port;
107 
108 		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
109 			pr_warn("SMI QP exists for port %d\n", port_num);
110 			goto err1;
111 		}
112 
113 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
114 			pr_warn("GSI QP exists for port %d\n", port_num);
115 			goto err1;
116 		}
117 	}
118 
119 	return 0;
120 
121 err1:
122 	return -EINVAL;
123 }
124 
125 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
126 {
127 	qp->resp.res_head = 0;
128 	qp->resp.res_tail = 0;
129 	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
130 
131 	if (!qp->resp.resources)
132 		return -ENOMEM;
133 
134 	return 0;
135 }
136 
137 static void free_rd_atomic_resources(struct rxe_qp *qp)
138 {
139 	if (qp->resp.resources) {
140 		int i;
141 
142 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143 			struct resp_res *res = &qp->resp.resources[i];
144 
145 			free_rd_atomic_resource(qp, res);
146 		}
147 		kfree(qp->resp.resources);
148 		qp->resp.resources = NULL;
149 	}
150 }
151 
152 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
153 {
154 	if (res->type == RXE_ATOMIC_MASK) {
155 		rxe_drop_ref(qp);
156 		kfree_skb(res->atomic.skb);
157 	} else if (res->type == RXE_READ_MASK) {
158 		if (res->read.mr)
159 			rxe_drop_ref(res->read.mr);
160 	}
161 	res->type = 0;
162 }
163 
164 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
165 {
166 	int i;
167 	struct resp_res *res;
168 
169 	if (qp->resp.resources) {
170 		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
171 			res = &qp->resp.resources[i];
172 			free_rd_atomic_resource(qp, res);
173 		}
174 	}
175 }
176 
177 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
178 			     struct ib_qp_init_attr *init)
179 {
180 	struct rxe_port *port;
181 	u32 qpn;
182 
183 	qp->sq_sig_type		= init->sq_sig_type;
184 	qp->attr.path_mtu	= 1;
185 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
186 
187 	qpn			= qp->pelem.index;
188 	port			= &rxe->port;
189 
190 	switch (init->qp_type) {
191 	case IB_QPT_SMI:
192 		qp->ibqp.qp_num		= 0;
193 		port->qp_smi_index	= qpn;
194 		qp->attr.port_num	= init->port_num;
195 		break;
196 
197 	case IB_QPT_GSI:
198 		qp->ibqp.qp_num		= 1;
199 		port->qp_gsi_index	= qpn;
200 		qp->attr.port_num	= init->port_num;
201 		break;
202 
203 	default:
204 		qp->ibqp.qp_num		= qpn;
205 		break;
206 	}
207 
208 	INIT_LIST_HEAD(&qp->grp_list);
209 
210 	skb_queue_head_init(&qp->send_pkts);
211 
212 	spin_lock_init(&qp->grp_lock);
213 	spin_lock_init(&qp->state_lock);
214 
215 	atomic_set(&qp->ssn, 0);
216 	atomic_set(&qp->skb_out, 0);
217 }
218 
219 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
220 			   struct ib_qp_init_attr *init,
221 			   struct ib_ucontext *context,
222 			   struct rxe_create_qp_resp __user *uresp)
223 {
224 	int err;
225 	int wqe_size;
226 
227 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
228 	if (err < 0)
229 		return err;
230 	qp->sk->sk->sk_user_data = qp;
231 
232 	/* pick a source UDP port number for this QP based on
233 	 * the source QPN. this spreads traffic for different QPs
234 	 * across different NIC RX queues (while using a single
235 	 * flow for a given QP to maintain packet order).
236 	 * the port number must be in the Dynamic Ports range
237 	 * (0xc000 - 0xffff).
238 	 */
239 	qp->src_port = RXE_ROCE_V2_SPORT +
240 		(hash_32_generic(qp_num(qp), 14) & 0x3fff);
241 
242 	qp->sq.max_wr		= init->cap.max_send_wr;
243 	qp->sq.max_sge		= init->cap.max_send_sge;
244 	qp->sq.max_inline	= init->cap.max_inline_data;
245 
246 	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
247 			 qp->sq.max_sge * sizeof(struct ib_sge),
248 			 sizeof(struct rxe_send_wqe) +
249 			 qp->sq.max_inline);
250 
251 	qp->sq.queue = rxe_queue_init(rxe,
252 				      &qp->sq.max_wr,
253 				      wqe_size);
254 	if (!qp->sq.queue)
255 		return -ENOMEM;
256 
257 	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
258 			   qp->sq.queue->buf, qp->sq.queue->buf_size,
259 			   &qp->sq.queue->ip);
260 
261 	if (err) {
262 		vfree(qp->sq.queue->buf);
263 		kfree(qp->sq.queue);
264 		return err;
265 	}
266 
267 	qp->req.wqe_index	= producer_index(qp->sq.queue);
268 	qp->req.state		= QP_STATE_RESET;
269 	qp->req.opcode		= -1;
270 	qp->comp.opcode		= -1;
271 
272 	spin_lock_init(&qp->sq.sq_lock);
273 	skb_queue_head_init(&qp->req_pkts);
274 
275 	rxe_init_task(rxe, &qp->req.task, qp,
276 		      rxe_requester, "req");
277 	rxe_init_task(rxe, &qp->comp.task, qp,
278 		      rxe_completer, "comp");
279 
280 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
281 	if (init->qp_type == IB_QPT_RC) {
282 		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
283 		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
284 	}
285 	return 0;
286 }
287 
288 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
289 			    struct ib_qp_init_attr *init,
290 			    struct ib_ucontext *context,
291 			    struct rxe_create_qp_resp __user *uresp)
292 {
293 	int err;
294 	int wqe_size;
295 
296 	if (!qp->srq) {
297 		qp->rq.max_wr		= init->cap.max_recv_wr;
298 		qp->rq.max_sge		= init->cap.max_recv_sge;
299 
300 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
301 
302 		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
303 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
304 
305 		qp->rq.queue = rxe_queue_init(rxe,
306 					      &qp->rq.max_wr,
307 					      wqe_size);
308 		if (!qp->rq.queue)
309 			return -ENOMEM;
310 
311 		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
312 				   qp->rq.queue->buf, qp->rq.queue->buf_size,
313 				   &qp->rq.queue->ip);
314 		if (err) {
315 			vfree(qp->rq.queue->buf);
316 			kfree(qp->rq.queue);
317 			return err;
318 		}
319 	}
320 
321 	spin_lock_init(&qp->rq.producer_lock);
322 	spin_lock_init(&qp->rq.consumer_lock);
323 
324 	skb_queue_head_init(&qp->resp_pkts);
325 
326 	rxe_init_task(rxe, &qp->resp.task, qp,
327 		      rxe_responder, "resp");
328 
329 	qp->resp.opcode		= OPCODE_NONE;
330 	qp->resp.msn		= 0;
331 	qp->resp.state		= QP_STATE_RESET;
332 
333 	return 0;
334 }
335 
336 /* called by the create qp verb */
337 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
338 		     struct ib_qp_init_attr *init,
339 		     struct rxe_create_qp_resp __user *uresp,
340 		     struct ib_pd *ibpd,
341 		     struct ib_udata *udata)
342 {
343 	int err;
344 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
345 	struct rxe_cq *scq = to_rcq(init->send_cq);
346 	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
347 	struct rxe_ucontext *ucontext =
348 		rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
349 
350 	rxe_add_ref(pd);
351 	rxe_add_ref(rcq);
352 	rxe_add_ref(scq);
353 	if (srq)
354 		rxe_add_ref(srq);
355 
356 	qp->pd			= pd;
357 	qp->rcq			= rcq;
358 	qp->scq			= scq;
359 	qp->srq			= srq;
360 
361 	rxe_qp_init_misc(rxe, qp, init);
362 
363 	err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp);
364 	if (err)
365 		goto err1;
366 
367 	err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp);
368 	if (err)
369 		goto err2;
370 
371 	qp->attr.qp_state = IB_QPS_RESET;
372 	qp->valid = 1;
373 
374 	return 0;
375 
376 err2:
377 	rxe_queue_cleanup(qp->sq.queue);
378 err1:
379 	if (srq)
380 		rxe_drop_ref(srq);
381 	rxe_drop_ref(scq);
382 	rxe_drop_ref(rcq);
383 	rxe_drop_ref(pd);
384 
385 	return err;
386 }
387 
388 /* called by the query qp verb */
389 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
390 {
391 	init->event_handler		= qp->ibqp.event_handler;
392 	init->qp_context		= qp->ibqp.qp_context;
393 	init->send_cq			= qp->ibqp.send_cq;
394 	init->recv_cq			= qp->ibqp.recv_cq;
395 	init->srq			= qp->ibqp.srq;
396 
397 	init->cap.max_send_wr		= qp->sq.max_wr;
398 	init->cap.max_send_sge		= qp->sq.max_sge;
399 	init->cap.max_inline_data	= qp->sq.max_inline;
400 
401 	if (!qp->srq) {
402 		init->cap.max_recv_wr		= qp->rq.max_wr;
403 		init->cap.max_recv_sge		= qp->rq.max_sge;
404 	}
405 
406 	init->sq_sig_type		= qp->sq_sig_type;
407 
408 	init->qp_type			= qp->ibqp.qp_type;
409 	init->port_num			= 1;
410 
411 	return 0;
412 }
413 
414 /* called by the modify qp verb, this routine checks all the parameters before
415  * making any changes
416  */
417 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
418 		    struct ib_qp_attr *attr, int mask)
419 {
420 	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
421 					attr->cur_qp_state : qp->attr.qp_state;
422 	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
423 					attr->qp_state : cur_state;
424 
425 	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
426 		pr_warn("invalid mask or state for qp\n");
427 		goto err1;
428 	}
429 
430 	if (mask & IB_QP_STATE) {
431 		if (cur_state == IB_QPS_SQD) {
432 			if (qp->req.state == QP_STATE_DRAIN &&
433 			    new_state != IB_QPS_ERR)
434 				goto err1;
435 		}
436 	}
437 
438 	if (mask & IB_QP_PORT) {
439 		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
440 			pr_warn("invalid port %d\n", attr->port_num);
441 			goto err1;
442 		}
443 	}
444 
445 	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
446 		goto err1;
447 
448 	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
449 		goto err1;
450 
451 	if (mask & IB_QP_ALT_PATH) {
452 		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
453 			goto err1;
454 		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
455 			pr_warn("invalid alt port %d\n", attr->alt_port_num);
456 			goto err1;
457 		}
458 		if (attr->alt_timeout > 31) {
459 			pr_warn("invalid QP alt timeout %d > 31\n",
460 				attr->alt_timeout);
461 			goto err1;
462 		}
463 	}
464 
465 	if (mask & IB_QP_PATH_MTU) {
466 		struct rxe_port *port = &rxe->port;
467 
468 		enum ib_mtu max_mtu = port->attr.max_mtu;
469 		enum ib_mtu mtu = attr->path_mtu;
470 
471 		if (mtu > max_mtu) {
472 			pr_debug("invalid mtu (%d) > (%d)\n",
473 				 ib_mtu_enum_to_int(mtu),
474 				 ib_mtu_enum_to_int(max_mtu));
475 			goto err1;
476 		}
477 	}
478 
479 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
480 		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
481 			pr_warn("invalid max_rd_atomic %d > %d\n",
482 				attr->max_rd_atomic,
483 				rxe->attr.max_qp_rd_atom);
484 			goto err1;
485 		}
486 	}
487 
488 	if (mask & IB_QP_TIMEOUT) {
489 		if (attr->timeout > 31) {
490 			pr_warn("invalid QP timeout %d > 31\n",
491 				attr->timeout);
492 			goto err1;
493 		}
494 	}
495 
496 	return 0;
497 
498 err1:
499 	return -EINVAL;
500 }
501 
502 /* move the qp to the reset state */
503 static void rxe_qp_reset(struct rxe_qp *qp)
504 {
505 	/* stop tasks from running */
506 	rxe_disable_task(&qp->resp.task);
507 
508 	/* stop request/comp */
509 	if (qp->sq.queue) {
510 		if (qp_type(qp) == IB_QPT_RC)
511 			rxe_disable_task(&qp->comp.task);
512 		rxe_disable_task(&qp->req.task);
513 	}
514 
515 	/* move qp to the reset state */
516 	qp->req.state = QP_STATE_RESET;
517 	qp->resp.state = QP_STATE_RESET;
518 
519 	/* let state machines reset themselves drain work and packet queues
520 	 * etc.
521 	 */
522 	__rxe_do_task(&qp->resp.task);
523 
524 	if (qp->sq.queue) {
525 		__rxe_do_task(&qp->comp.task);
526 		__rxe_do_task(&qp->req.task);
527 		rxe_queue_reset(qp->sq.queue);
528 	}
529 
530 	/* cleanup attributes */
531 	atomic_set(&qp->ssn, 0);
532 	qp->req.opcode = -1;
533 	qp->req.need_retry = 0;
534 	qp->req.noack_pkts = 0;
535 	qp->resp.msn = 0;
536 	qp->resp.opcode = -1;
537 	qp->resp.drop_msg = 0;
538 	qp->resp.goto_error = 0;
539 	qp->resp.sent_psn_nak = 0;
540 
541 	if (qp->resp.mr) {
542 		rxe_drop_ref(qp->resp.mr);
543 		qp->resp.mr = NULL;
544 	}
545 
546 	cleanup_rd_atomic_resources(qp);
547 
548 	/* reenable tasks */
549 	rxe_enable_task(&qp->resp.task);
550 
551 	if (qp->sq.queue) {
552 		if (qp_type(qp) == IB_QPT_RC)
553 			rxe_enable_task(&qp->comp.task);
554 
555 		rxe_enable_task(&qp->req.task);
556 	}
557 }
558 
559 /* drain the send queue */
560 static void rxe_qp_drain(struct rxe_qp *qp)
561 {
562 	if (qp->sq.queue) {
563 		if (qp->req.state != QP_STATE_DRAINED) {
564 			qp->req.state = QP_STATE_DRAIN;
565 			if (qp_type(qp) == IB_QPT_RC)
566 				rxe_run_task(&qp->comp.task, 1);
567 			else
568 				__rxe_do_task(&qp->comp.task);
569 			rxe_run_task(&qp->req.task, 1);
570 		}
571 	}
572 }
573 
574 /* move the qp to the error state */
575 void rxe_qp_error(struct rxe_qp *qp)
576 {
577 	qp->req.state = QP_STATE_ERROR;
578 	qp->resp.state = QP_STATE_ERROR;
579 	qp->attr.qp_state = IB_QPS_ERR;
580 
581 	/* drain work and packet queues */
582 	rxe_run_task(&qp->resp.task, 1);
583 
584 	if (qp_type(qp) == IB_QPT_RC)
585 		rxe_run_task(&qp->comp.task, 1);
586 	else
587 		__rxe_do_task(&qp->comp.task);
588 	rxe_run_task(&qp->req.task, 1);
589 }
590 
591 /* called by the modify qp verb */
592 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
593 		     struct ib_udata *udata)
594 {
595 	int err;
596 
597 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
598 		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
599 
600 		qp->attr.max_rd_atomic = max_rd_atomic;
601 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
602 	}
603 
604 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
605 		int max_dest_rd_atomic =
606 			__roundup_pow_of_two(attr->max_dest_rd_atomic);
607 
608 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
609 
610 		free_rd_atomic_resources(qp);
611 
612 		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
613 		if (err)
614 			return err;
615 	}
616 
617 	if (mask & IB_QP_CUR_STATE)
618 		qp->attr.cur_qp_state = attr->qp_state;
619 
620 	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
621 		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
622 
623 	if (mask & IB_QP_ACCESS_FLAGS)
624 		qp->attr.qp_access_flags = attr->qp_access_flags;
625 
626 	if (mask & IB_QP_PKEY_INDEX)
627 		qp->attr.pkey_index = attr->pkey_index;
628 
629 	if (mask & IB_QP_PORT)
630 		qp->attr.port_num = attr->port_num;
631 
632 	if (mask & IB_QP_QKEY)
633 		qp->attr.qkey = attr->qkey;
634 
635 	if (mask & IB_QP_AV) {
636 		rxe_init_av(&attr->ah_attr, &qp->pri_av);
637 	}
638 
639 	if (mask & IB_QP_ALT_PATH) {
640 		rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
641 		qp->attr.alt_port_num = attr->alt_port_num;
642 		qp->attr.alt_pkey_index = attr->alt_pkey_index;
643 		qp->attr.alt_timeout = attr->alt_timeout;
644 	}
645 
646 	if (mask & IB_QP_PATH_MTU) {
647 		qp->attr.path_mtu = attr->path_mtu;
648 		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
649 	}
650 
651 	if (mask & IB_QP_TIMEOUT) {
652 		qp->attr.timeout = attr->timeout;
653 		if (attr->timeout == 0) {
654 			qp->qp_timeout_jiffies = 0;
655 		} else {
656 			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
657 			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
658 
659 			qp->qp_timeout_jiffies = j ? j : 1;
660 		}
661 	}
662 
663 	if (mask & IB_QP_RETRY_CNT) {
664 		qp->attr.retry_cnt = attr->retry_cnt;
665 		qp->comp.retry_cnt = attr->retry_cnt;
666 		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
667 			 attr->retry_cnt);
668 	}
669 
670 	if (mask & IB_QP_RNR_RETRY) {
671 		qp->attr.rnr_retry = attr->rnr_retry;
672 		qp->comp.rnr_retry = attr->rnr_retry;
673 		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
674 			 attr->rnr_retry);
675 	}
676 
677 	if (mask & IB_QP_RQ_PSN) {
678 		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
679 		qp->resp.psn = qp->attr.rq_psn;
680 		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
681 			 qp->resp.psn);
682 	}
683 
684 	if (mask & IB_QP_MIN_RNR_TIMER) {
685 		qp->attr.min_rnr_timer = attr->min_rnr_timer;
686 		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
687 			 attr->min_rnr_timer);
688 	}
689 
690 	if (mask & IB_QP_SQ_PSN) {
691 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
692 		qp->req.psn = qp->attr.sq_psn;
693 		qp->comp.psn = qp->attr.sq_psn;
694 		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
695 	}
696 
697 	if (mask & IB_QP_PATH_MIG_STATE)
698 		qp->attr.path_mig_state = attr->path_mig_state;
699 
700 	if (mask & IB_QP_DEST_QPN)
701 		qp->attr.dest_qp_num = attr->dest_qp_num;
702 
703 	if (mask & IB_QP_STATE) {
704 		qp->attr.qp_state = attr->qp_state;
705 
706 		switch (attr->qp_state) {
707 		case IB_QPS_RESET:
708 			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
709 			rxe_qp_reset(qp);
710 			break;
711 
712 		case IB_QPS_INIT:
713 			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
714 			qp->req.state = QP_STATE_INIT;
715 			qp->resp.state = QP_STATE_INIT;
716 			break;
717 
718 		case IB_QPS_RTR:
719 			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
720 			qp->resp.state = QP_STATE_READY;
721 			break;
722 
723 		case IB_QPS_RTS:
724 			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
725 			qp->req.state = QP_STATE_READY;
726 			break;
727 
728 		case IB_QPS_SQD:
729 			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
730 			rxe_qp_drain(qp);
731 			break;
732 
733 		case IB_QPS_SQE:
734 			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
735 			/* Not possible from modify_qp. */
736 			break;
737 
738 		case IB_QPS_ERR:
739 			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
740 			rxe_qp_error(qp);
741 			break;
742 		}
743 	}
744 
745 	return 0;
746 }
747 
748 /* called by the query qp verb */
749 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
750 {
751 	*attr = qp->attr;
752 
753 	attr->rq_psn				= qp->resp.psn;
754 	attr->sq_psn				= qp->req.psn;
755 
756 	attr->cap.max_send_wr			= qp->sq.max_wr;
757 	attr->cap.max_send_sge			= qp->sq.max_sge;
758 	attr->cap.max_inline_data		= qp->sq.max_inline;
759 
760 	if (!qp->srq) {
761 		attr->cap.max_recv_wr		= qp->rq.max_wr;
762 		attr->cap.max_recv_sge		= qp->rq.max_sge;
763 	}
764 
765 	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
766 	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
767 
768 	if (qp->req.state == QP_STATE_DRAIN) {
769 		attr->sq_draining = 1;
770 		/* applications that get this state
771 		 * typically spin on it. yield the
772 		 * processor
773 		 */
774 		cond_resched();
775 	} else {
776 		attr->sq_draining = 0;
777 	}
778 
779 	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
780 
781 	return 0;
782 }
783 
784 /* called by the destroy qp verb */
785 void rxe_qp_destroy(struct rxe_qp *qp)
786 {
787 	qp->valid = 0;
788 	qp->qp_timeout_jiffies = 0;
789 	rxe_cleanup_task(&qp->resp.task);
790 
791 	if (qp_type(qp) == IB_QPT_RC) {
792 		del_timer_sync(&qp->retrans_timer);
793 		del_timer_sync(&qp->rnr_nak_timer);
794 	}
795 
796 	rxe_cleanup_task(&qp->req.task);
797 	rxe_cleanup_task(&qp->comp.task);
798 
799 	/* flush out any receive wr's or pending requests */
800 	__rxe_do_task(&qp->req.task);
801 	if (qp->sq.queue) {
802 		__rxe_do_task(&qp->comp.task);
803 		__rxe_do_task(&qp->req.task);
804 	}
805 }
806 
807 /* called when the last reference to the qp is dropped */
808 static void rxe_qp_do_cleanup(struct work_struct *work)
809 {
810 	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
811 
812 	rxe_drop_all_mcast_groups(qp);
813 
814 	if (qp->sq.queue)
815 		rxe_queue_cleanup(qp->sq.queue);
816 
817 	if (qp->srq)
818 		rxe_drop_ref(qp->srq);
819 
820 	if (qp->rq.queue)
821 		rxe_queue_cleanup(qp->rq.queue);
822 
823 	if (qp->scq)
824 		rxe_drop_ref(qp->scq);
825 	if (qp->rcq)
826 		rxe_drop_ref(qp->rcq);
827 	if (qp->pd)
828 		rxe_drop_ref(qp->pd);
829 
830 	if (qp->resp.mr) {
831 		rxe_drop_ref(qp->resp.mr);
832 		qp->resp.mr = NULL;
833 	}
834 
835 	if (qp_type(qp) == IB_QPT_RC)
836 		sk_dst_reset(qp->sk->sk);
837 
838 	free_rd_atomic_resources(qp);
839 
840 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
841 	sock_release(qp->sk);
842 }
843 
844 /* called when the last reference to the qp is dropped */
845 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
846 {
847 	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
848 
849 	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
850 }
851