1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include <rdma/uverbs_ioctl.h>
37 #include "rxe.h"
38 #include "rxe_loc.h"
39 #include "rxe_queue.h"
40 #include "rxe_hw_counters.h"
41 
42 static int rxe_query_device(struct ib_device *dev,
43 			    struct ib_device_attr *attr,
44 			    struct ib_udata *uhw)
45 {
46 	struct rxe_dev *rxe = to_rdev(dev);
47 
48 	if (uhw->inlen || uhw->outlen)
49 		return -EINVAL;
50 
51 	*attr = rxe->attr;
52 	return 0;
53 }
54 
55 static int rxe_query_port(struct ib_device *dev,
56 			  u8 port_num, struct ib_port_attr *attr)
57 {
58 	struct rxe_dev *rxe = to_rdev(dev);
59 	struct rxe_port *port;
60 	int rc;
61 
62 	port = &rxe->port;
63 
64 	/* *attr being zeroed by the caller, avoid zeroing it here */
65 	*attr = port->attr;
66 
67 	mutex_lock(&rxe->usdev_lock);
68 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 			      &attr->active_width);
70 
71 	if (attr->state == IB_PORT_ACTIVE)
72 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
73 	else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
75 	else
76 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
77 
78 	mutex_unlock(&rxe->usdev_lock);
79 
80 	return rc;
81 }
82 
83 static int rxe_query_pkey(struct ib_device *device,
84 			  u8 port_num, u16 index, u16 *pkey)
85 {
86 	struct rxe_dev *rxe = to_rdev(device);
87 	struct rxe_port *port;
88 
89 	port = &rxe->port;
90 
91 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
92 		dev_warn(device->dev.parent, "invalid index = %d\n",
93 			 index);
94 		goto err1;
95 	}
96 
97 	*pkey = port->pkey_tbl[index];
98 	return 0;
99 
100 err1:
101 	return -EINVAL;
102 }
103 
104 static int rxe_modify_device(struct ib_device *dev,
105 			     int mask, struct ib_device_modify *attr)
106 {
107 	struct rxe_dev *rxe = to_rdev(dev);
108 
109 	if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
110 		     IB_DEVICE_MODIFY_NODE_DESC))
111 		return -EOPNOTSUPP;
112 
113 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
114 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
115 
116 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
117 		memcpy(rxe->ib_dev.node_desc,
118 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
119 	}
120 
121 	return 0;
122 }
123 
124 static int rxe_modify_port(struct ib_device *dev,
125 			   u8 port_num, int mask, struct ib_port_modify *attr)
126 {
127 	struct rxe_dev *rxe = to_rdev(dev);
128 	struct rxe_port *port;
129 
130 	port = &rxe->port;
131 
132 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
133 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
134 
135 	if (mask & IB_PORT_RESET_QKEY_CNTR)
136 		port->attr.qkey_viol_cntr = 0;
137 
138 	return 0;
139 }
140 
141 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
142 					       u8 port_num)
143 {
144 	struct rxe_dev *rxe = to_rdev(dev);
145 
146 	return rxe_link_layer(rxe, port_num);
147 }
148 
149 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
150 {
151 	struct rxe_dev *rxe = to_rdev(uctx->device);
152 	struct rxe_ucontext *uc = to_ruc(uctx);
153 
154 	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
155 }
156 
157 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
158 {
159 	struct rxe_ucontext *uc = to_ruc(ibuc);
160 
161 	rxe_drop_ref(uc);
162 }
163 
164 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
165 			      struct ib_port_immutable *immutable)
166 {
167 	int err;
168 	struct ib_port_attr attr;
169 
170 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
171 
172 	err = ib_query_port(dev, port_num, &attr);
173 	if (err)
174 		return err;
175 
176 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
177 	immutable->gid_tbl_len = attr.gid_tbl_len;
178 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
179 
180 	return 0;
181 }
182 
183 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
184 {
185 	struct rxe_dev *rxe = to_rdev(ibpd->device);
186 	struct rxe_pd *pd = to_rpd(ibpd);
187 
188 	return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
189 }
190 
191 static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
192 {
193 	struct rxe_pd *pd = to_rpd(ibpd);
194 
195 	rxe_drop_ref(pd);
196 }
197 
198 static int rxe_create_ah(struct ib_ah *ibah,
199 			 struct rdma_ah_init_attr *init_attr,
200 			 struct ib_udata *udata)
201 
202 {
203 	int err;
204 	struct rxe_dev *rxe = to_rdev(ibah->device);
205 	struct rxe_ah *ah = to_rah(ibah);
206 
207 	err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
208 	if (err)
209 		return err;
210 
211 	err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
212 	if (err)
213 		return err;
214 
215 	rxe_init_av(init_attr->ah_attr, &ah->av);
216 	return 0;
217 }
218 
219 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
220 {
221 	int err;
222 	struct rxe_dev *rxe = to_rdev(ibah->device);
223 	struct rxe_ah *ah = to_rah(ibah);
224 
225 	err = rxe_av_chk_attr(rxe, attr);
226 	if (err)
227 		return err;
228 
229 	rxe_init_av(attr, &ah->av);
230 	return 0;
231 }
232 
233 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
234 {
235 	struct rxe_ah *ah = to_rah(ibah);
236 
237 	memset(attr, 0, sizeof(*attr));
238 	attr->type = ibah->type;
239 	rxe_av_to_attr(&ah->av, attr);
240 	return 0;
241 }
242 
243 static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
244 {
245 	struct rxe_ah *ah = to_rah(ibah);
246 
247 	rxe_drop_ref(ah);
248 }
249 
250 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
251 {
252 	int err;
253 	int i;
254 	u32 length;
255 	struct rxe_recv_wqe *recv_wqe;
256 	int num_sge = ibwr->num_sge;
257 
258 	if (unlikely(queue_full(rq->queue))) {
259 		err = -ENOMEM;
260 		goto err1;
261 	}
262 
263 	if (unlikely(num_sge > rq->max_sge)) {
264 		err = -EINVAL;
265 		goto err1;
266 	}
267 
268 	length = 0;
269 	for (i = 0; i < num_sge; i++)
270 		length += ibwr->sg_list[i].length;
271 
272 	recv_wqe = producer_addr(rq->queue);
273 	recv_wqe->wr_id = ibwr->wr_id;
274 	recv_wqe->num_sge = num_sge;
275 
276 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
277 	       num_sge * sizeof(struct ib_sge));
278 
279 	recv_wqe->dma.length		= length;
280 	recv_wqe->dma.resid		= length;
281 	recv_wqe->dma.num_sge		= num_sge;
282 	recv_wqe->dma.cur_sge		= 0;
283 	recv_wqe->dma.sge_offset	= 0;
284 
285 	/* make sure all changes to the work queue are written before we
286 	 * update the producer pointer
287 	 */
288 	smp_wmb();
289 
290 	advance_producer(rq->queue);
291 	return 0;
292 
293 err1:
294 	return err;
295 }
296 
297 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
298 			  struct ib_udata *udata)
299 {
300 	int err;
301 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
302 	struct rxe_pd *pd = to_rpd(ibsrq->pd);
303 	struct rxe_srq *srq = to_rsrq(ibsrq);
304 	struct rxe_create_srq_resp __user *uresp = NULL;
305 
306 	if (udata) {
307 		if (udata->outlen < sizeof(*uresp))
308 			return -EINVAL;
309 		uresp = udata->outbuf;
310 	}
311 
312 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
313 	if (err)
314 		goto err1;
315 
316 	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
317 	if (err)
318 		goto err1;
319 
320 	rxe_add_ref(pd);
321 	srq->pd = pd;
322 
323 	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
324 	if (err)
325 		goto err2;
326 
327 	return 0;
328 
329 err2:
330 	rxe_drop_ref(pd);
331 	rxe_drop_ref(srq);
332 err1:
333 	return err;
334 }
335 
336 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
337 			  enum ib_srq_attr_mask mask,
338 			  struct ib_udata *udata)
339 {
340 	int err;
341 	struct rxe_srq *srq = to_rsrq(ibsrq);
342 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
343 	struct rxe_modify_srq_cmd ucmd = {};
344 
345 	if (udata) {
346 		if (udata->inlen < sizeof(ucmd))
347 			return -EINVAL;
348 
349 		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
350 		if (err)
351 			return err;
352 	}
353 
354 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
355 	if (err)
356 		goto err1;
357 
358 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
359 	if (err)
360 		goto err1;
361 
362 	return 0;
363 
364 err1:
365 	return err;
366 }
367 
368 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
369 {
370 	struct rxe_srq *srq = to_rsrq(ibsrq);
371 
372 	if (srq->error)
373 		return -EINVAL;
374 
375 	attr->max_wr = srq->rq.queue->buf->index_mask;
376 	attr->max_sge = srq->rq.max_sge;
377 	attr->srq_limit = srq->limit;
378 	return 0;
379 }
380 
381 static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
382 {
383 	struct rxe_srq *srq = to_rsrq(ibsrq);
384 
385 	if (srq->rq.queue)
386 		rxe_queue_cleanup(srq->rq.queue);
387 
388 	rxe_drop_ref(srq->pd);
389 	rxe_drop_ref(srq);
390 }
391 
392 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
393 			     const struct ib_recv_wr **bad_wr)
394 {
395 	int err = 0;
396 	unsigned long flags;
397 	struct rxe_srq *srq = to_rsrq(ibsrq);
398 
399 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
400 
401 	while (wr) {
402 		err = post_one_recv(&srq->rq, wr);
403 		if (unlikely(err))
404 			break;
405 		wr = wr->next;
406 	}
407 
408 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
409 
410 	if (err)
411 		*bad_wr = wr;
412 
413 	return err;
414 }
415 
416 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
417 				   struct ib_qp_init_attr *init,
418 				   struct ib_udata *udata)
419 {
420 	int err;
421 	struct rxe_dev *rxe = to_rdev(ibpd->device);
422 	struct rxe_pd *pd = to_rpd(ibpd);
423 	struct rxe_qp *qp;
424 	struct rxe_create_qp_resp __user *uresp = NULL;
425 
426 	if (udata) {
427 		if (udata->outlen < sizeof(*uresp))
428 			return ERR_PTR(-EINVAL);
429 		uresp = udata->outbuf;
430 	}
431 
432 	err = rxe_qp_chk_init(rxe, init);
433 	if (err)
434 		goto err1;
435 
436 	qp = rxe_alloc(&rxe->qp_pool);
437 	if (!qp) {
438 		err = -ENOMEM;
439 		goto err1;
440 	}
441 
442 	if (udata) {
443 		if (udata->inlen) {
444 			err = -EINVAL;
445 			goto err2;
446 		}
447 		qp->is_user = 1;
448 	}
449 
450 	rxe_add_index(qp);
451 
452 	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
453 	if (err)
454 		goto err3;
455 
456 	return &qp->ibqp;
457 
458 err3:
459 	rxe_drop_index(qp);
460 err2:
461 	rxe_drop_ref(qp);
462 err1:
463 	return ERR_PTR(err);
464 }
465 
466 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
467 			 int mask, struct ib_udata *udata)
468 {
469 	int err;
470 	struct rxe_dev *rxe = to_rdev(ibqp->device);
471 	struct rxe_qp *qp = to_rqp(ibqp);
472 
473 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
474 	if (err)
475 		goto err1;
476 
477 	err = rxe_qp_from_attr(qp, attr, mask, udata);
478 	if (err)
479 		goto err1;
480 
481 	return 0;
482 
483 err1:
484 	return err;
485 }
486 
487 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
488 			int mask, struct ib_qp_init_attr *init)
489 {
490 	struct rxe_qp *qp = to_rqp(ibqp);
491 
492 	rxe_qp_to_init(qp, init);
493 	rxe_qp_to_attr(qp, attr, mask);
494 
495 	return 0;
496 }
497 
498 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
499 {
500 	struct rxe_qp *qp = to_rqp(ibqp);
501 
502 	rxe_qp_destroy(qp);
503 	rxe_drop_index(qp);
504 	rxe_drop_ref(qp);
505 	return 0;
506 }
507 
508 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
509 			    unsigned int mask, unsigned int length)
510 {
511 	int num_sge = ibwr->num_sge;
512 	struct rxe_sq *sq = &qp->sq;
513 
514 	if (unlikely(num_sge > sq->max_sge))
515 		goto err1;
516 
517 	if (unlikely(mask & WR_ATOMIC_MASK)) {
518 		if (length < 8)
519 			goto err1;
520 
521 		if (atomic_wr(ibwr)->remote_addr & 0x7)
522 			goto err1;
523 	}
524 
525 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
526 		     (length > sq->max_inline)))
527 		goto err1;
528 
529 	return 0;
530 
531 err1:
532 	return -EINVAL;
533 }
534 
535 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
536 			 const struct ib_send_wr *ibwr)
537 {
538 	wr->wr_id = ibwr->wr_id;
539 	wr->num_sge = ibwr->num_sge;
540 	wr->opcode = ibwr->opcode;
541 	wr->send_flags = ibwr->send_flags;
542 
543 	if (qp_type(qp) == IB_QPT_UD ||
544 	    qp_type(qp) == IB_QPT_SMI ||
545 	    qp_type(qp) == IB_QPT_GSI) {
546 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
547 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
548 		if (qp_type(qp) == IB_QPT_GSI)
549 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
550 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
551 			wr->ex.imm_data = ibwr->ex.imm_data;
552 	} else {
553 		switch (wr->opcode) {
554 		case IB_WR_RDMA_WRITE_WITH_IMM:
555 			wr->ex.imm_data = ibwr->ex.imm_data;
556 			/* fall through */
557 		case IB_WR_RDMA_READ:
558 		case IB_WR_RDMA_WRITE:
559 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
560 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
561 			break;
562 		case IB_WR_SEND_WITH_IMM:
563 			wr->ex.imm_data = ibwr->ex.imm_data;
564 			break;
565 		case IB_WR_SEND_WITH_INV:
566 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
567 			break;
568 		case IB_WR_ATOMIC_CMP_AND_SWP:
569 		case IB_WR_ATOMIC_FETCH_AND_ADD:
570 			wr->wr.atomic.remote_addr =
571 				atomic_wr(ibwr)->remote_addr;
572 			wr->wr.atomic.compare_add =
573 				atomic_wr(ibwr)->compare_add;
574 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
575 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
576 			break;
577 		case IB_WR_LOCAL_INV:
578 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
579 		break;
580 		case IB_WR_REG_MR:
581 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
582 			wr->wr.reg.key = reg_wr(ibwr)->key;
583 			wr->wr.reg.access = reg_wr(ibwr)->access;
584 		break;
585 		default:
586 			break;
587 		}
588 	}
589 }
590 
591 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
592 			 unsigned int mask, unsigned int length,
593 			 struct rxe_send_wqe *wqe)
594 {
595 	int num_sge = ibwr->num_sge;
596 	struct ib_sge *sge;
597 	int i;
598 	u8 *p;
599 
600 	init_send_wr(qp, &wqe->wr, ibwr);
601 
602 	if (qp_type(qp) == IB_QPT_UD ||
603 	    qp_type(qp) == IB_QPT_SMI ||
604 	    qp_type(qp) == IB_QPT_GSI)
605 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
606 
607 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
608 		p = wqe->dma.inline_data;
609 
610 		sge = ibwr->sg_list;
611 		for (i = 0; i < num_sge; i++, sge++) {
612 			memcpy(p, (void *)(uintptr_t)sge->addr,
613 					sge->length);
614 
615 			p += sge->length;
616 		}
617 	} else if (mask & WR_REG_MASK) {
618 		wqe->mask = mask;
619 		wqe->state = wqe_state_posted;
620 		return 0;
621 	} else
622 		memcpy(wqe->dma.sge, ibwr->sg_list,
623 		       num_sge * sizeof(struct ib_sge));
624 
625 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
626 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
627 	wqe->mask		= mask;
628 	wqe->dma.length		= length;
629 	wqe->dma.resid		= length;
630 	wqe->dma.num_sge	= num_sge;
631 	wqe->dma.cur_sge	= 0;
632 	wqe->dma.sge_offset	= 0;
633 	wqe->state		= wqe_state_posted;
634 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
635 
636 	return 0;
637 }
638 
639 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
640 			 unsigned int mask, u32 length)
641 {
642 	int err;
643 	struct rxe_sq *sq = &qp->sq;
644 	struct rxe_send_wqe *send_wqe;
645 	unsigned long flags;
646 
647 	err = validate_send_wr(qp, ibwr, mask, length);
648 	if (err)
649 		return err;
650 
651 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
652 
653 	if (unlikely(queue_full(sq->queue))) {
654 		err = -ENOMEM;
655 		goto err1;
656 	}
657 
658 	send_wqe = producer_addr(sq->queue);
659 
660 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
661 	if (unlikely(err))
662 		goto err1;
663 
664 	/*
665 	 * make sure all changes to the work queue are
666 	 * written before we update the producer pointer
667 	 */
668 	smp_wmb();
669 
670 	advance_producer(sq->queue);
671 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
672 
673 	return 0;
674 
675 err1:
676 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
677 	return err;
678 }
679 
680 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
681 				const struct ib_send_wr **bad_wr)
682 {
683 	int err = 0;
684 	unsigned int mask;
685 	unsigned int length = 0;
686 	int i;
687 
688 	while (wr) {
689 		mask = wr_opcode_mask(wr->opcode, qp);
690 		if (unlikely(!mask)) {
691 			err = -EINVAL;
692 			*bad_wr = wr;
693 			break;
694 		}
695 
696 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
697 			     !(mask & WR_INLINE_MASK))) {
698 			err = -EINVAL;
699 			*bad_wr = wr;
700 			break;
701 		}
702 
703 		length = 0;
704 		for (i = 0; i < wr->num_sge; i++)
705 			length += wr->sg_list[i].length;
706 
707 		err = post_one_send(qp, wr, mask, length);
708 
709 		if (err) {
710 			*bad_wr = wr;
711 			break;
712 		}
713 		wr = wr->next;
714 	}
715 
716 	rxe_run_task(&qp->req.task, 1);
717 	if (unlikely(qp->req.state == QP_STATE_ERROR))
718 		rxe_run_task(&qp->comp.task, 1);
719 
720 	return err;
721 }
722 
723 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
724 			 const struct ib_send_wr **bad_wr)
725 {
726 	struct rxe_qp *qp = to_rqp(ibqp);
727 
728 	if (unlikely(!qp->valid)) {
729 		*bad_wr = wr;
730 		return -EINVAL;
731 	}
732 
733 	if (unlikely(qp->req.state < QP_STATE_READY)) {
734 		*bad_wr = wr;
735 		return -EINVAL;
736 	}
737 
738 	if (qp->is_user) {
739 		/* Utilize process context to do protocol processing */
740 		rxe_run_task(&qp->req.task, 0);
741 		return 0;
742 	} else
743 		return rxe_post_send_kernel(qp, wr, bad_wr);
744 }
745 
746 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
747 			 const struct ib_recv_wr **bad_wr)
748 {
749 	int err = 0;
750 	struct rxe_qp *qp = to_rqp(ibqp);
751 	struct rxe_rq *rq = &qp->rq;
752 	unsigned long flags;
753 
754 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
755 		*bad_wr = wr;
756 		err = -EINVAL;
757 		goto err1;
758 	}
759 
760 	if (unlikely(qp->srq)) {
761 		*bad_wr = wr;
762 		err = -EINVAL;
763 		goto err1;
764 	}
765 
766 	spin_lock_irqsave(&rq->producer_lock, flags);
767 
768 	while (wr) {
769 		err = post_one_recv(rq, wr);
770 		if (unlikely(err)) {
771 			*bad_wr = wr;
772 			break;
773 		}
774 		wr = wr->next;
775 	}
776 
777 	spin_unlock_irqrestore(&rq->producer_lock, flags);
778 
779 	if (qp->resp.state == QP_STATE_ERROR)
780 		rxe_run_task(&qp->resp.task, 1);
781 
782 err1:
783 	return err;
784 }
785 
786 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
787 			 struct ib_udata *udata)
788 {
789 	int err;
790 	struct ib_device *dev = ibcq->device;
791 	struct rxe_dev *rxe = to_rdev(dev);
792 	struct rxe_cq *cq = to_rcq(ibcq);
793 	struct rxe_create_cq_resp __user *uresp = NULL;
794 
795 	if (udata) {
796 		if (udata->outlen < sizeof(*uresp))
797 			return -EINVAL;
798 		uresp = udata->outbuf;
799 	}
800 
801 	if (attr->flags)
802 		return -EINVAL;
803 
804 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
805 	if (err)
806 		return err;
807 
808 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
809 			       uresp);
810 	if (err)
811 		return err;
812 
813 	return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
814 }
815 
816 static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
817 {
818 	struct rxe_cq *cq = to_rcq(ibcq);
819 
820 	rxe_cq_disable(cq);
821 
822 	rxe_drop_ref(cq);
823 }
824 
825 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
826 {
827 	int err;
828 	struct rxe_cq *cq = to_rcq(ibcq);
829 	struct rxe_dev *rxe = to_rdev(ibcq->device);
830 	struct rxe_resize_cq_resp __user *uresp = NULL;
831 
832 	if (udata) {
833 		if (udata->outlen < sizeof(*uresp))
834 			return -EINVAL;
835 		uresp = udata->outbuf;
836 	}
837 
838 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
839 	if (err)
840 		goto err1;
841 
842 	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
843 	if (err)
844 		goto err1;
845 
846 	return 0;
847 
848 err1:
849 	return err;
850 }
851 
852 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
853 {
854 	int i;
855 	struct rxe_cq *cq = to_rcq(ibcq);
856 	struct rxe_cqe *cqe;
857 	unsigned long flags;
858 
859 	spin_lock_irqsave(&cq->cq_lock, flags);
860 	for (i = 0; i < num_entries; i++) {
861 		cqe = queue_head(cq->queue);
862 		if (!cqe)
863 			break;
864 
865 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
866 		advance_consumer(cq->queue);
867 	}
868 	spin_unlock_irqrestore(&cq->cq_lock, flags);
869 
870 	return i;
871 }
872 
873 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
874 {
875 	struct rxe_cq *cq = to_rcq(ibcq);
876 	int count = queue_count(cq->queue);
877 
878 	return (count > wc_cnt) ? wc_cnt : count;
879 }
880 
881 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
882 {
883 	struct rxe_cq *cq = to_rcq(ibcq);
884 	unsigned long irq_flags;
885 	int ret = 0;
886 
887 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
888 	if (cq->notify != IB_CQ_NEXT_COMP)
889 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
890 
891 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
892 		ret = 1;
893 
894 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
895 
896 	return ret;
897 }
898 
899 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
900 {
901 	struct rxe_dev *rxe = to_rdev(ibpd->device);
902 	struct rxe_pd *pd = to_rpd(ibpd);
903 	struct rxe_mem *mr;
904 	int err;
905 
906 	mr = rxe_alloc(&rxe->mr_pool);
907 	if (!mr) {
908 		err = -ENOMEM;
909 		goto err1;
910 	}
911 
912 	rxe_add_index(mr);
913 
914 	rxe_add_ref(pd);
915 
916 	err = rxe_mem_init_dma(pd, access, mr);
917 	if (err)
918 		goto err2;
919 
920 	return &mr->ibmr;
921 
922 err2:
923 	rxe_drop_ref(pd);
924 	rxe_drop_index(mr);
925 	rxe_drop_ref(mr);
926 err1:
927 	return ERR_PTR(err);
928 }
929 
930 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
931 				     u64 start,
932 				     u64 length,
933 				     u64 iova,
934 				     int access, struct ib_udata *udata)
935 {
936 	int err;
937 	struct rxe_dev *rxe = to_rdev(ibpd->device);
938 	struct rxe_pd *pd = to_rpd(ibpd);
939 	struct rxe_mem *mr;
940 
941 	mr = rxe_alloc(&rxe->mr_pool);
942 	if (!mr) {
943 		err = -ENOMEM;
944 		goto err2;
945 	}
946 
947 	rxe_add_index(mr);
948 
949 	rxe_add_ref(pd);
950 
951 	err = rxe_mem_init_user(pd, start, length, iova,
952 				access, udata, mr);
953 	if (err)
954 		goto err3;
955 
956 	return &mr->ibmr;
957 
958 err3:
959 	rxe_drop_ref(pd);
960 	rxe_drop_index(mr);
961 	rxe_drop_ref(mr);
962 err2:
963 	return ERR_PTR(err);
964 }
965 
966 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
967 {
968 	struct rxe_mem *mr = to_rmr(ibmr);
969 
970 	mr->state = RXE_MEM_STATE_ZOMBIE;
971 	rxe_drop_ref(mr->pd);
972 	rxe_drop_index(mr);
973 	rxe_drop_ref(mr);
974 	return 0;
975 }
976 
977 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
978 				  u32 max_num_sg, struct ib_udata *udata)
979 {
980 	struct rxe_dev *rxe = to_rdev(ibpd->device);
981 	struct rxe_pd *pd = to_rpd(ibpd);
982 	struct rxe_mem *mr;
983 	int err;
984 
985 	if (mr_type != IB_MR_TYPE_MEM_REG)
986 		return ERR_PTR(-EINVAL);
987 
988 	mr = rxe_alloc(&rxe->mr_pool);
989 	if (!mr) {
990 		err = -ENOMEM;
991 		goto err1;
992 	}
993 
994 	rxe_add_index(mr);
995 
996 	rxe_add_ref(pd);
997 
998 	err = rxe_mem_init_fast(pd, max_num_sg, mr);
999 	if (err)
1000 		goto err2;
1001 
1002 	return &mr->ibmr;
1003 
1004 err2:
1005 	rxe_drop_ref(pd);
1006 	rxe_drop_index(mr);
1007 	rxe_drop_ref(mr);
1008 err1:
1009 	return ERR_PTR(err);
1010 }
1011 
1012 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1013 {
1014 	struct rxe_mem *mr = to_rmr(ibmr);
1015 	struct rxe_map *map;
1016 	struct rxe_phys_buf *buf;
1017 
1018 	if (unlikely(mr->nbuf == mr->num_buf))
1019 		return -ENOMEM;
1020 
1021 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1022 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1023 
1024 	buf->addr = addr;
1025 	buf->size = ibmr->page_size;
1026 	mr->nbuf++;
1027 
1028 	return 0;
1029 }
1030 
1031 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1032 			 int sg_nents, unsigned int *sg_offset)
1033 {
1034 	struct rxe_mem *mr = to_rmr(ibmr);
1035 	int n;
1036 
1037 	mr->nbuf = 0;
1038 
1039 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1040 
1041 	mr->va = ibmr->iova;
1042 	mr->iova = ibmr->iova;
1043 	mr->length = ibmr->length;
1044 	mr->page_shift = ilog2(ibmr->page_size);
1045 	mr->page_mask = ibmr->page_size - 1;
1046 	mr->offset = mr->iova & mr->page_mask;
1047 
1048 	return n;
1049 }
1050 
1051 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1052 {
1053 	int err;
1054 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1055 	struct rxe_qp *qp = to_rqp(ibqp);
1056 	struct rxe_mc_grp *grp;
1057 
1058 	/* takes a ref on grp if successful */
1059 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1060 	if (err)
1061 		return err;
1062 
1063 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1064 
1065 	rxe_drop_ref(grp);
1066 	return err;
1067 }
1068 
1069 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1070 {
1071 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1072 	struct rxe_qp *qp = to_rqp(ibqp);
1073 
1074 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1075 }
1076 
1077 static ssize_t parent_show(struct device *device,
1078 			   struct device_attribute *attr, char *buf)
1079 {
1080 	struct rxe_dev *rxe =
1081 		rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1082 
1083 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1084 }
1085 
1086 static DEVICE_ATTR_RO(parent);
1087 
1088 static struct attribute *rxe_dev_attributes[] = {
1089 	&dev_attr_parent.attr,
1090 	NULL
1091 };
1092 
1093 static const struct attribute_group rxe_attr_group = {
1094 	.attrs = rxe_dev_attributes,
1095 };
1096 
1097 static int rxe_enable_driver(struct ib_device *ib_dev)
1098 {
1099 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1100 
1101 	rxe_set_port_state(rxe);
1102 	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1103 	return 0;
1104 }
1105 
1106 static const struct ib_device_ops rxe_dev_ops = {
1107 	.owner = THIS_MODULE,
1108 	.driver_id = RDMA_DRIVER_RXE,
1109 	.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1110 
1111 	.alloc_hw_stats = rxe_ib_alloc_hw_stats,
1112 	.alloc_mr = rxe_alloc_mr,
1113 	.alloc_pd = rxe_alloc_pd,
1114 	.alloc_ucontext = rxe_alloc_ucontext,
1115 	.attach_mcast = rxe_attach_mcast,
1116 	.create_ah = rxe_create_ah,
1117 	.create_cq = rxe_create_cq,
1118 	.create_qp = rxe_create_qp,
1119 	.create_srq = rxe_create_srq,
1120 	.dealloc_driver = rxe_dealloc,
1121 	.dealloc_pd = rxe_dealloc_pd,
1122 	.dealloc_ucontext = rxe_dealloc_ucontext,
1123 	.dereg_mr = rxe_dereg_mr,
1124 	.destroy_ah = rxe_destroy_ah,
1125 	.destroy_cq = rxe_destroy_cq,
1126 	.destroy_qp = rxe_destroy_qp,
1127 	.destroy_srq = rxe_destroy_srq,
1128 	.detach_mcast = rxe_detach_mcast,
1129 	.enable_driver = rxe_enable_driver,
1130 	.get_dma_mr = rxe_get_dma_mr,
1131 	.get_hw_stats = rxe_ib_get_hw_stats,
1132 	.get_link_layer = rxe_get_link_layer,
1133 	.get_port_immutable = rxe_port_immutable,
1134 	.map_mr_sg = rxe_map_mr_sg,
1135 	.mmap = rxe_mmap,
1136 	.modify_ah = rxe_modify_ah,
1137 	.modify_device = rxe_modify_device,
1138 	.modify_port = rxe_modify_port,
1139 	.modify_qp = rxe_modify_qp,
1140 	.modify_srq = rxe_modify_srq,
1141 	.peek_cq = rxe_peek_cq,
1142 	.poll_cq = rxe_poll_cq,
1143 	.post_recv = rxe_post_recv,
1144 	.post_send = rxe_post_send,
1145 	.post_srq_recv = rxe_post_srq_recv,
1146 	.query_ah = rxe_query_ah,
1147 	.query_device = rxe_query_device,
1148 	.query_pkey = rxe_query_pkey,
1149 	.query_port = rxe_query_port,
1150 	.query_qp = rxe_query_qp,
1151 	.query_srq = rxe_query_srq,
1152 	.reg_user_mr = rxe_reg_user_mr,
1153 	.req_notify_cq = rxe_req_notify_cq,
1154 	.resize_cq = rxe_resize_cq,
1155 
1156 	INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1157 	INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1158 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1159 	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1160 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1161 };
1162 
1163 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1164 {
1165 	int err;
1166 	struct ib_device *dev = &rxe->ib_dev;
1167 	struct crypto_shash *tfm;
1168 
1169 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1170 
1171 	dev->node_type = RDMA_NODE_IB_CA;
1172 	dev->phys_port_cnt = 1;
1173 	dev->num_comp_vectors = num_possible_cpus();
1174 	dev->dev.parent = rxe_dma_device(rxe);
1175 	dev->local_dma_lkey = 0;
1176 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1177 			    rxe->ndev->dev_addr);
1178 	dev->dev.dma_ops = &dma_virt_ops;
1179 	dev->dev.dma_parms = &rxe->dma_parms;
1180 	rxe->dma_parms = (struct device_dma_parameters)
1181 		{ .max_segment_size = SZ_2G };
1182 	dma_coerce_mask_and_coherent(&dev->dev,
1183 				     dma_get_required_mask(&dev->dev));
1184 
1185 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1186 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1187 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1188 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1189 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1190 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1191 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1192 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1193 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1194 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1195 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1196 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1197 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1198 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1199 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1200 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1201 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1202 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1203 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1204 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1205 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1206 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1207 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1208 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1209 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1210 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1211 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1212 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1213 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1214 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1215 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1216 	    ;
1217 
1218 	ib_set_device_ops(dev, &rxe_dev_ops);
1219 	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1220 	if (err)
1221 		return err;
1222 
1223 	tfm = crypto_alloc_shash("crc32", 0, 0);
1224 	if (IS_ERR(tfm)) {
1225 		pr_err("failed to allocate crc algorithm err:%ld\n",
1226 		       PTR_ERR(tfm));
1227 		return PTR_ERR(tfm);
1228 	}
1229 	rxe->tfm = tfm;
1230 
1231 	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1232 	err = ib_register_device(dev, ibdev_name);
1233 	if (err)
1234 		pr_warn("%s failed with error %d\n", __func__, err);
1235 
1236 	/*
1237 	 * Note that rxe may be invalid at this point if another thread
1238 	 * unregistered it.
1239 	 */
1240 	return err;
1241 }
1242