1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include <rdma/uverbs_ioctl.h>
37 #include "rxe.h"
38 #include "rxe_loc.h"
39 #include "rxe_queue.h"
40 #include "rxe_hw_counters.h"
41 
42 static int rxe_query_device(struct ib_device *dev,
43 			    struct ib_device_attr *attr,
44 			    struct ib_udata *uhw)
45 {
46 	struct rxe_dev *rxe = to_rdev(dev);
47 
48 	if (uhw->inlen || uhw->outlen)
49 		return -EINVAL;
50 
51 	*attr = rxe->attr;
52 	return 0;
53 }
54 
55 static int rxe_query_port(struct ib_device *dev,
56 			  u8 port_num, struct ib_port_attr *attr)
57 {
58 	struct rxe_dev *rxe = to_rdev(dev);
59 	struct rxe_port *port;
60 	int rc;
61 
62 	port = &rxe->port;
63 
64 	/* *attr being zeroed by the caller, avoid zeroing it here */
65 	*attr = port->attr;
66 
67 	mutex_lock(&rxe->usdev_lock);
68 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 			      &attr->active_width);
70 
71 	if (attr->state == IB_PORT_ACTIVE)
72 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
73 	else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
75 	else
76 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
77 
78 	mutex_unlock(&rxe->usdev_lock);
79 
80 	return rc;
81 }
82 
83 static int rxe_query_pkey(struct ib_device *device,
84 			  u8 port_num, u16 index, u16 *pkey)
85 {
86 	if (index > 0)
87 		return -EINVAL;
88 
89 	*pkey = IB_DEFAULT_PKEY_FULL;
90 	return 0;
91 }
92 
93 static int rxe_modify_device(struct ib_device *dev,
94 			     int mask, struct ib_device_modify *attr)
95 {
96 	struct rxe_dev *rxe = to_rdev(dev);
97 
98 	if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
99 		     IB_DEVICE_MODIFY_NODE_DESC))
100 		return -EOPNOTSUPP;
101 
102 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
103 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
104 
105 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
106 		memcpy(rxe->ib_dev.node_desc,
107 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
108 	}
109 
110 	return 0;
111 }
112 
113 static int rxe_modify_port(struct ib_device *dev,
114 			   u8 port_num, int mask, struct ib_port_modify *attr)
115 {
116 	struct rxe_dev *rxe = to_rdev(dev);
117 	struct rxe_port *port;
118 
119 	port = &rxe->port;
120 
121 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
122 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
123 
124 	if (mask & IB_PORT_RESET_QKEY_CNTR)
125 		port->attr.qkey_viol_cntr = 0;
126 
127 	return 0;
128 }
129 
130 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
131 					       u8 port_num)
132 {
133 	return IB_LINK_LAYER_ETHERNET;
134 }
135 
136 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
137 {
138 	struct rxe_dev *rxe = to_rdev(uctx->device);
139 	struct rxe_ucontext *uc = to_ruc(uctx);
140 
141 	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
142 }
143 
144 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
145 {
146 	struct rxe_ucontext *uc = to_ruc(ibuc);
147 
148 	rxe_drop_ref(uc);
149 }
150 
151 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
152 			      struct ib_port_immutable *immutable)
153 {
154 	int err;
155 	struct ib_port_attr attr;
156 
157 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
158 
159 	err = ib_query_port(dev, port_num, &attr);
160 	if (err)
161 		return err;
162 
163 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
164 	immutable->gid_tbl_len = attr.gid_tbl_len;
165 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
166 
167 	return 0;
168 }
169 
170 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
171 {
172 	struct rxe_dev *rxe = to_rdev(ibpd->device);
173 	struct rxe_pd *pd = to_rpd(ibpd);
174 
175 	return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
176 }
177 
178 static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
179 {
180 	struct rxe_pd *pd = to_rpd(ibpd);
181 
182 	rxe_drop_ref(pd);
183 }
184 
185 static int rxe_create_ah(struct ib_ah *ibah,
186 			 struct rdma_ah_init_attr *init_attr,
187 			 struct ib_udata *udata)
188 
189 {
190 	int err;
191 	struct rxe_dev *rxe = to_rdev(ibah->device);
192 	struct rxe_ah *ah = to_rah(ibah);
193 
194 	err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
195 	if (err)
196 		return err;
197 
198 	err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
199 	if (err)
200 		return err;
201 
202 	rxe_init_av(init_attr->ah_attr, &ah->av);
203 	return 0;
204 }
205 
206 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
207 {
208 	int err;
209 	struct rxe_dev *rxe = to_rdev(ibah->device);
210 	struct rxe_ah *ah = to_rah(ibah);
211 
212 	err = rxe_av_chk_attr(rxe, attr);
213 	if (err)
214 		return err;
215 
216 	rxe_init_av(attr, &ah->av);
217 	return 0;
218 }
219 
220 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
221 {
222 	struct rxe_ah *ah = to_rah(ibah);
223 
224 	memset(attr, 0, sizeof(*attr));
225 	attr->type = ibah->type;
226 	rxe_av_to_attr(&ah->av, attr);
227 	return 0;
228 }
229 
230 static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
231 {
232 	struct rxe_ah *ah = to_rah(ibah);
233 
234 	rxe_drop_ref(ah);
235 }
236 
237 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
238 {
239 	int err;
240 	int i;
241 	u32 length;
242 	struct rxe_recv_wqe *recv_wqe;
243 	int num_sge = ibwr->num_sge;
244 
245 	if (unlikely(queue_full(rq->queue))) {
246 		err = -ENOMEM;
247 		goto err1;
248 	}
249 
250 	if (unlikely(num_sge > rq->max_sge)) {
251 		err = -EINVAL;
252 		goto err1;
253 	}
254 
255 	length = 0;
256 	for (i = 0; i < num_sge; i++)
257 		length += ibwr->sg_list[i].length;
258 
259 	recv_wqe = producer_addr(rq->queue);
260 	recv_wqe->wr_id = ibwr->wr_id;
261 	recv_wqe->num_sge = num_sge;
262 
263 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
264 	       num_sge * sizeof(struct ib_sge));
265 
266 	recv_wqe->dma.length		= length;
267 	recv_wqe->dma.resid		= length;
268 	recv_wqe->dma.num_sge		= num_sge;
269 	recv_wqe->dma.cur_sge		= 0;
270 	recv_wqe->dma.sge_offset	= 0;
271 
272 	/* make sure all changes to the work queue are written before we
273 	 * update the producer pointer
274 	 */
275 	smp_wmb();
276 
277 	advance_producer(rq->queue);
278 	return 0;
279 
280 err1:
281 	return err;
282 }
283 
284 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
285 			  struct ib_udata *udata)
286 {
287 	int err;
288 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
289 	struct rxe_pd *pd = to_rpd(ibsrq->pd);
290 	struct rxe_srq *srq = to_rsrq(ibsrq);
291 	struct rxe_create_srq_resp __user *uresp = NULL;
292 
293 	if (udata) {
294 		if (udata->outlen < sizeof(*uresp))
295 			return -EINVAL;
296 		uresp = udata->outbuf;
297 	}
298 
299 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
300 	if (err)
301 		goto err1;
302 
303 	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
304 	if (err)
305 		goto err1;
306 
307 	rxe_add_ref(pd);
308 	srq->pd = pd;
309 
310 	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
311 	if (err)
312 		goto err2;
313 
314 	return 0;
315 
316 err2:
317 	rxe_drop_ref(pd);
318 	rxe_drop_ref(srq);
319 err1:
320 	return err;
321 }
322 
323 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
324 			  enum ib_srq_attr_mask mask,
325 			  struct ib_udata *udata)
326 {
327 	int err;
328 	struct rxe_srq *srq = to_rsrq(ibsrq);
329 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
330 	struct rxe_modify_srq_cmd ucmd = {};
331 
332 	if (udata) {
333 		if (udata->inlen < sizeof(ucmd))
334 			return -EINVAL;
335 
336 		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
337 		if (err)
338 			return err;
339 	}
340 
341 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
342 	if (err)
343 		goto err1;
344 
345 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
346 	if (err)
347 		goto err1;
348 
349 	return 0;
350 
351 err1:
352 	return err;
353 }
354 
355 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
356 {
357 	struct rxe_srq *srq = to_rsrq(ibsrq);
358 
359 	if (srq->error)
360 		return -EINVAL;
361 
362 	attr->max_wr = srq->rq.queue->buf->index_mask;
363 	attr->max_sge = srq->rq.max_sge;
364 	attr->srq_limit = srq->limit;
365 	return 0;
366 }
367 
368 static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
369 {
370 	struct rxe_srq *srq = to_rsrq(ibsrq);
371 
372 	if (srq->rq.queue)
373 		rxe_queue_cleanup(srq->rq.queue);
374 
375 	rxe_drop_ref(srq->pd);
376 	rxe_drop_ref(srq);
377 }
378 
379 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
380 			     const struct ib_recv_wr **bad_wr)
381 {
382 	int err = 0;
383 	unsigned long flags;
384 	struct rxe_srq *srq = to_rsrq(ibsrq);
385 
386 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
387 
388 	while (wr) {
389 		err = post_one_recv(&srq->rq, wr);
390 		if (unlikely(err))
391 			break;
392 		wr = wr->next;
393 	}
394 
395 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
396 
397 	if (err)
398 		*bad_wr = wr;
399 
400 	return err;
401 }
402 
403 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
404 				   struct ib_qp_init_attr *init,
405 				   struct ib_udata *udata)
406 {
407 	int err;
408 	struct rxe_dev *rxe = to_rdev(ibpd->device);
409 	struct rxe_pd *pd = to_rpd(ibpd);
410 	struct rxe_qp *qp;
411 	struct rxe_create_qp_resp __user *uresp = NULL;
412 
413 	if (udata) {
414 		if (udata->outlen < sizeof(*uresp))
415 			return ERR_PTR(-EINVAL);
416 		uresp = udata->outbuf;
417 	}
418 
419 	err = rxe_qp_chk_init(rxe, init);
420 	if (err)
421 		goto err1;
422 
423 	qp = rxe_alloc(&rxe->qp_pool);
424 	if (!qp) {
425 		err = -ENOMEM;
426 		goto err1;
427 	}
428 
429 	if (udata) {
430 		if (udata->inlen) {
431 			err = -EINVAL;
432 			goto err2;
433 		}
434 		qp->is_user = 1;
435 	}
436 
437 	rxe_add_index(qp);
438 
439 	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
440 	if (err)
441 		goto err3;
442 
443 	return &qp->ibqp;
444 
445 err3:
446 	rxe_drop_index(qp);
447 err2:
448 	rxe_drop_ref(qp);
449 err1:
450 	return ERR_PTR(err);
451 }
452 
453 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
454 			 int mask, struct ib_udata *udata)
455 {
456 	int err;
457 	struct rxe_dev *rxe = to_rdev(ibqp->device);
458 	struct rxe_qp *qp = to_rqp(ibqp);
459 
460 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
461 	if (err)
462 		goto err1;
463 
464 	err = rxe_qp_from_attr(qp, attr, mask, udata);
465 	if (err)
466 		goto err1;
467 
468 	return 0;
469 
470 err1:
471 	return err;
472 }
473 
474 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
475 			int mask, struct ib_qp_init_attr *init)
476 {
477 	struct rxe_qp *qp = to_rqp(ibqp);
478 
479 	rxe_qp_to_init(qp, init);
480 	rxe_qp_to_attr(qp, attr, mask);
481 
482 	return 0;
483 }
484 
485 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
486 {
487 	struct rxe_qp *qp = to_rqp(ibqp);
488 
489 	rxe_qp_destroy(qp);
490 	rxe_drop_index(qp);
491 	rxe_drop_ref(qp);
492 	return 0;
493 }
494 
495 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
496 			    unsigned int mask, unsigned int length)
497 {
498 	int num_sge = ibwr->num_sge;
499 	struct rxe_sq *sq = &qp->sq;
500 
501 	if (unlikely(num_sge > sq->max_sge))
502 		goto err1;
503 
504 	if (unlikely(mask & WR_ATOMIC_MASK)) {
505 		if (length < 8)
506 			goto err1;
507 
508 		if (atomic_wr(ibwr)->remote_addr & 0x7)
509 			goto err1;
510 	}
511 
512 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
513 		     (length > sq->max_inline)))
514 		goto err1;
515 
516 	return 0;
517 
518 err1:
519 	return -EINVAL;
520 }
521 
522 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
523 			 const struct ib_send_wr *ibwr)
524 {
525 	wr->wr_id = ibwr->wr_id;
526 	wr->num_sge = ibwr->num_sge;
527 	wr->opcode = ibwr->opcode;
528 	wr->send_flags = ibwr->send_flags;
529 
530 	if (qp_type(qp) == IB_QPT_UD ||
531 	    qp_type(qp) == IB_QPT_SMI ||
532 	    qp_type(qp) == IB_QPT_GSI) {
533 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
534 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
535 		if (qp_type(qp) == IB_QPT_GSI)
536 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
537 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
538 			wr->ex.imm_data = ibwr->ex.imm_data;
539 	} else {
540 		switch (wr->opcode) {
541 		case IB_WR_RDMA_WRITE_WITH_IMM:
542 			wr->ex.imm_data = ibwr->ex.imm_data;
543 			/* fall through */
544 		case IB_WR_RDMA_READ:
545 		case IB_WR_RDMA_WRITE:
546 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
547 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
548 			break;
549 		case IB_WR_SEND_WITH_IMM:
550 			wr->ex.imm_data = ibwr->ex.imm_data;
551 			break;
552 		case IB_WR_SEND_WITH_INV:
553 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
554 			break;
555 		case IB_WR_ATOMIC_CMP_AND_SWP:
556 		case IB_WR_ATOMIC_FETCH_AND_ADD:
557 			wr->wr.atomic.remote_addr =
558 				atomic_wr(ibwr)->remote_addr;
559 			wr->wr.atomic.compare_add =
560 				atomic_wr(ibwr)->compare_add;
561 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
562 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
563 			break;
564 		case IB_WR_LOCAL_INV:
565 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
566 		break;
567 		case IB_WR_REG_MR:
568 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
569 			wr->wr.reg.key = reg_wr(ibwr)->key;
570 			wr->wr.reg.access = reg_wr(ibwr)->access;
571 		break;
572 		default:
573 			break;
574 		}
575 	}
576 }
577 
578 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
579 			 unsigned int mask, unsigned int length,
580 			 struct rxe_send_wqe *wqe)
581 {
582 	int num_sge = ibwr->num_sge;
583 	struct ib_sge *sge;
584 	int i;
585 	u8 *p;
586 
587 	init_send_wr(qp, &wqe->wr, ibwr);
588 
589 	if (qp_type(qp) == IB_QPT_UD ||
590 	    qp_type(qp) == IB_QPT_SMI ||
591 	    qp_type(qp) == IB_QPT_GSI)
592 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
593 
594 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
595 		p = wqe->dma.inline_data;
596 
597 		sge = ibwr->sg_list;
598 		for (i = 0; i < num_sge; i++, sge++) {
599 			memcpy(p, (void *)(uintptr_t)sge->addr,
600 					sge->length);
601 
602 			p += sge->length;
603 		}
604 	} else if (mask & WR_REG_MASK) {
605 		wqe->mask = mask;
606 		wqe->state = wqe_state_posted;
607 		return 0;
608 	} else
609 		memcpy(wqe->dma.sge, ibwr->sg_list,
610 		       num_sge * sizeof(struct ib_sge));
611 
612 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
613 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
614 	wqe->mask		= mask;
615 	wqe->dma.length		= length;
616 	wqe->dma.resid		= length;
617 	wqe->dma.num_sge	= num_sge;
618 	wqe->dma.cur_sge	= 0;
619 	wqe->dma.sge_offset	= 0;
620 	wqe->state		= wqe_state_posted;
621 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
622 
623 	return 0;
624 }
625 
626 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
627 			 unsigned int mask, u32 length)
628 {
629 	int err;
630 	struct rxe_sq *sq = &qp->sq;
631 	struct rxe_send_wqe *send_wqe;
632 	unsigned long flags;
633 
634 	err = validate_send_wr(qp, ibwr, mask, length);
635 	if (err)
636 		return err;
637 
638 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
639 
640 	if (unlikely(queue_full(sq->queue))) {
641 		err = -ENOMEM;
642 		goto err1;
643 	}
644 
645 	send_wqe = producer_addr(sq->queue);
646 
647 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
648 	if (unlikely(err))
649 		goto err1;
650 
651 	/*
652 	 * make sure all changes to the work queue are
653 	 * written before we update the producer pointer
654 	 */
655 	smp_wmb();
656 
657 	advance_producer(sq->queue);
658 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
659 
660 	return 0;
661 
662 err1:
663 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
664 	return err;
665 }
666 
667 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
668 				const struct ib_send_wr **bad_wr)
669 {
670 	int err = 0;
671 	unsigned int mask;
672 	unsigned int length = 0;
673 	int i;
674 	struct ib_send_wr *next;
675 
676 	while (wr) {
677 		mask = wr_opcode_mask(wr->opcode, qp);
678 		if (unlikely(!mask)) {
679 			err = -EINVAL;
680 			*bad_wr = wr;
681 			break;
682 		}
683 
684 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
685 			     !(mask & WR_INLINE_MASK))) {
686 			err = -EINVAL;
687 			*bad_wr = wr;
688 			break;
689 		}
690 
691 		next = wr->next;
692 
693 		length = 0;
694 		for (i = 0; i < wr->num_sge; i++)
695 			length += wr->sg_list[i].length;
696 
697 		err = post_one_send(qp, wr, mask, length);
698 
699 		if (err) {
700 			*bad_wr = wr;
701 			break;
702 		}
703 		wr = next;
704 	}
705 
706 	rxe_run_task(&qp->req.task, 1);
707 	if (unlikely(qp->req.state == QP_STATE_ERROR))
708 		rxe_run_task(&qp->comp.task, 1);
709 
710 	return err;
711 }
712 
713 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
714 			 const struct ib_send_wr **bad_wr)
715 {
716 	struct rxe_qp *qp = to_rqp(ibqp);
717 
718 	if (unlikely(!qp->valid)) {
719 		*bad_wr = wr;
720 		return -EINVAL;
721 	}
722 
723 	if (unlikely(qp->req.state < QP_STATE_READY)) {
724 		*bad_wr = wr;
725 		return -EINVAL;
726 	}
727 
728 	if (qp->is_user) {
729 		/* Utilize process context to do protocol processing */
730 		rxe_run_task(&qp->req.task, 0);
731 		return 0;
732 	} else
733 		return rxe_post_send_kernel(qp, wr, bad_wr);
734 }
735 
736 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
737 			 const struct ib_recv_wr **bad_wr)
738 {
739 	int err = 0;
740 	struct rxe_qp *qp = to_rqp(ibqp);
741 	struct rxe_rq *rq = &qp->rq;
742 	unsigned long flags;
743 
744 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
745 		*bad_wr = wr;
746 		err = -EINVAL;
747 		goto err1;
748 	}
749 
750 	if (unlikely(qp->srq)) {
751 		*bad_wr = wr;
752 		err = -EINVAL;
753 		goto err1;
754 	}
755 
756 	spin_lock_irqsave(&rq->producer_lock, flags);
757 
758 	while (wr) {
759 		err = post_one_recv(rq, wr);
760 		if (unlikely(err)) {
761 			*bad_wr = wr;
762 			break;
763 		}
764 		wr = wr->next;
765 	}
766 
767 	spin_unlock_irqrestore(&rq->producer_lock, flags);
768 
769 	if (qp->resp.state == QP_STATE_ERROR)
770 		rxe_run_task(&qp->resp.task, 1);
771 
772 err1:
773 	return err;
774 }
775 
776 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
777 			 struct ib_udata *udata)
778 {
779 	int err;
780 	struct ib_device *dev = ibcq->device;
781 	struct rxe_dev *rxe = to_rdev(dev);
782 	struct rxe_cq *cq = to_rcq(ibcq);
783 	struct rxe_create_cq_resp __user *uresp = NULL;
784 
785 	if (udata) {
786 		if (udata->outlen < sizeof(*uresp))
787 			return -EINVAL;
788 		uresp = udata->outbuf;
789 	}
790 
791 	if (attr->flags)
792 		return -EINVAL;
793 
794 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
795 	if (err)
796 		return err;
797 
798 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
799 			       uresp);
800 	if (err)
801 		return err;
802 
803 	return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
804 }
805 
806 static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
807 {
808 	struct rxe_cq *cq = to_rcq(ibcq);
809 
810 	rxe_cq_disable(cq);
811 
812 	rxe_drop_ref(cq);
813 }
814 
815 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
816 {
817 	int err;
818 	struct rxe_cq *cq = to_rcq(ibcq);
819 	struct rxe_dev *rxe = to_rdev(ibcq->device);
820 	struct rxe_resize_cq_resp __user *uresp = NULL;
821 
822 	if (udata) {
823 		if (udata->outlen < sizeof(*uresp))
824 			return -EINVAL;
825 		uresp = udata->outbuf;
826 	}
827 
828 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
829 	if (err)
830 		goto err1;
831 
832 	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
833 	if (err)
834 		goto err1;
835 
836 	return 0;
837 
838 err1:
839 	return err;
840 }
841 
842 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
843 {
844 	int i;
845 	struct rxe_cq *cq = to_rcq(ibcq);
846 	struct rxe_cqe *cqe;
847 	unsigned long flags;
848 
849 	spin_lock_irqsave(&cq->cq_lock, flags);
850 	for (i = 0; i < num_entries; i++) {
851 		cqe = queue_head(cq->queue);
852 		if (!cqe)
853 			break;
854 
855 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
856 		advance_consumer(cq->queue);
857 	}
858 	spin_unlock_irqrestore(&cq->cq_lock, flags);
859 
860 	return i;
861 }
862 
863 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
864 {
865 	struct rxe_cq *cq = to_rcq(ibcq);
866 	int count = queue_count(cq->queue);
867 
868 	return (count > wc_cnt) ? wc_cnt : count;
869 }
870 
871 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
872 {
873 	struct rxe_cq *cq = to_rcq(ibcq);
874 	unsigned long irq_flags;
875 	int ret = 0;
876 
877 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
878 	if (cq->notify != IB_CQ_NEXT_COMP)
879 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
880 
881 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
882 		ret = 1;
883 
884 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
885 
886 	return ret;
887 }
888 
889 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
890 {
891 	struct rxe_dev *rxe = to_rdev(ibpd->device);
892 	struct rxe_pd *pd = to_rpd(ibpd);
893 	struct rxe_mem *mr;
894 
895 	mr = rxe_alloc(&rxe->mr_pool);
896 	if (!mr)
897 		return ERR_PTR(-ENOMEM);
898 
899 	rxe_add_index(mr);
900 	rxe_add_ref(pd);
901 	rxe_mem_init_dma(pd, access, mr);
902 
903 	return &mr->ibmr;
904 }
905 
906 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
907 				     u64 start,
908 				     u64 length,
909 				     u64 iova,
910 				     int access, struct ib_udata *udata)
911 {
912 	int err;
913 	struct rxe_dev *rxe = to_rdev(ibpd->device);
914 	struct rxe_pd *pd = to_rpd(ibpd);
915 	struct rxe_mem *mr;
916 
917 	mr = rxe_alloc(&rxe->mr_pool);
918 	if (!mr) {
919 		err = -ENOMEM;
920 		goto err2;
921 	}
922 
923 	rxe_add_index(mr);
924 
925 	rxe_add_ref(pd);
926 
927 	err = rxe_mem_init_user(pd, start, length, iova,
928 				access, udata, mr);
929 	if (err)
930 		goto err3;
931 
932 	return &mr->ibmr;
933 
934 err3:
935 	rxe_drop_ref(pd);
936 	rxe_drop_index(mr);
937 	rxe_drop_ref(mr);
938 err2:
939 	return ERR_PTR(err);
940 }
941 
942 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
943 {
944 	struct rxe_mem *mr = to_rmr(ibmr);
945 
946 	mr->state = RXE_MEM_STATE_ZOMBIE;
947 	rxe_drop_ref(mr->pd);
948 	rxe_drop_index(mr);
949 	rxe_drop_ref(mr);
950 	return 0;
951 }
952 
953 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
954 				  u32 max_num_sg)
955 {
956 	struct rxe_dev *rxe = to_rdev(ibpd->device);
957 	struct rxe_pd *pd = to_rpd(ibpd);
958 	struct rxe_mem *mr;
959 	int err;
960 
961 	if (mr_type != IB_MR_TYPE_MEM_REG)
962 		return ERR_PTR(-EINVAL);
963 
964 	mr = rxe_alloc(&rxe->mr_pool);
965 	if (!mr) {
966 		err = -ENOMEM;
967 		goto err1;
968 	}
969 
970 	rxe_add_index(mr);
971 
972 	rxe_add_ref(pd);
973 
974 	err = rxe_mem_init_fast(pd, max_num_sg, mr);
975 	if (err)
976 		goto err2;
977 
978 	return &mr->ibmr;
979 
980 err2:
981 	rxe_drop_ref(pd);
982 	rxe_drop_index(mr);
983 	rxe_drop_ref(mr);
984 err1:
985 	return ERR_PTR(err);
986 }
987 
988 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
989 {
990 	struct rxe_mem *mr = to_rmr(ibmr);
991 	struct rxe_map *map;
992 	struct rxe_phys_buf *buf;
993 
994 	if (unlikely(mr->nbuf == mr->num_buf))
995 		return -ENOMEM;
996 
997 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
998 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
999 
1000 	buf->addr = addr;
1001 	buf->size = ibmr->page_size;
1002 	mr->nbuf++;
1003 
1004 	return 0;
1005 }
1006 
1007 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1008 			 int sg_nents, unsigned int *sg_offset)
1009 {
1010 	struct rxe_mem *mr = to_rmr(ibmr);
1011 	int n;
1012 
1013 	mr->nbuf = 0;
1014 
1015 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1016 
1017 	mr->va = ibmr->iova;
1018 	mr->iova = ibmr->iova;
1019 	mr->length = ibmr->length;
1020 	mr->page_shift = ilog2(ibmr->page_size);
1021 	mr->page_mask = ibmr->page_size - 1;
1022 	mr->offset = mr->iova & mr->page_mask;
1023 
1024 	return n;
1025 }
1026 
1027 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1028 {
1029 	int err;
1030 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1031 	struct rxe_qp *qp = to_rqp(ibqp);
1032 	struct rxe_mc_grp *grp;
1033 
1034 	/* takes a ref on grp if successful */
1035 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1036 	if (err)
1037 		return err;
1038 
1039 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1040 
1041 	rxe_drop_ref(grp);
1042 	return err;
1043 }
1044 
1045 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1046 {
1047 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1048 	struct rxe_qp *qp = to_rqp(ibqp);
1049 
1050 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1051 }
1052 
1053 static ssize_t parent_show(struct device *device,
1054 			   struct device_attribute *attr, char *buf)
1055 {
1056 	struct rxe_dev *rxe =
1057 		rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1058 
1059 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1060 }
1061 
1062 static DEVICE_ATTR_RO(parent);
1063 
1064 static struct attribute *rxe_dev_attributes[] = {
1065 	&dev_attr_parent.attr,
1066 	NULL
1067 };
1068 
1069 static const struct attribute_group rxe_attr_group = {
1070 	.attrs = rxe_dev_attributes,
1071 };
1072 
1073 static int rxe_enable_driver(struct ib_device *ib_dev)
1074 {
1075 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1076 
1077 	rxe_set_port_state(rxe);
1078 	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1079 	return 0;
1080 }
1081 
1082 static const struct ib_device_ops rxe_dev_ops = {
1083 	.owner = THIS_MODULE,
1084 	.driver_id = RDMA_DRIVER_RXE,
1085 	.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1086 
1087 	.alloc_hw_stats = rxe_ib_alloc_hw_stats,
1088 	.alloc_mr = rxe_alloc_mr,
1089 	.alloc_pd = rxe_alloc_pd,
1090 	.alloc_ucontext = rxe_alloc_ucontext,
1091 	.attach_mcast = rxe_attach_mcast,
1092 	.create_ah = rxe_create_ah,
1093 	.create_cq = rxe_create_cq,
1094 	.create_qp = rxe_create_qp,
1095 	.create_srq = rxe_create_srq,
1096 	.dealloc_driver = rxe_dealloc,
1097 	.dealloc_pd = rxe_dealloc_pd,
1098 	.dealloc_ucontext = rxe_dealloc_ucontext,
1099 	.dereg_mr = rxe_dereg_mr,
1100 	.destroy_ah = rxe_destroy_ah,
1101 	.destroy_cq = rxe_destroy_cq,
1102 	.destroy_qp = rxe_destroy_qp,
1103 	.destroy_srq = rxe_destroy_srq,
1104 	.detach_mcast = rxe_detach_mcast,
1105 	.enable_driver = rxe_enable_driver,
1106 	.get_dma_mr = rxe_get_dma_mr,
1107 	.get_hw_stats = rxe_ib_get_hw_stats,
1108 	.get_link_layer = rxe_get_link_layer,
1109 	.get_port_immutable = rxe_port_immutable,
1110 	.map_mr_sg = rxe_map_mr_sg,
1111 	.mmap = rxe_mmap,
1112 	.modify_ah = rxe_modify_ah,
1113 	.modify_device = rxe_modify_device,
1114 	.modify_port = rxe_modify_port,
1115 	.modify_qp = rxe_modify_qp,
1116 	.modify_srq = rxe_modify_srq,
1117 	.peek_cq = rxe_peek_cq,
1118 	.poll_cq = rxe_poll_cq,
1119 	.post_recv = rxe_post_recv,
1120 	.post_send = rxe_post_send,
1121 	.post_srq_recv = rxe_post_srq_recv,
1122 	.query_ah = rxe_query_ah,
1123 	.query_device = rxe_query_device,
1124 	.query_pkey = rxe_query_pkey,
1125 	.query_port = rxe_query_port,
1126 	.query_qp = rxe_query_qp,
1127 	.query_srq = rxe_query_srq,
1128 	.reg_user_mr = rxe_reg_user_mr,
1129 	.req_notify_cq = rxe_req_notify_cq,
1130 	.resize_cq = rxe_resize_cq,
1131 
1132 	INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1133 	INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1134 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1135 	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1136 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1137 };
1138 
1139 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1140 {
1141 	int err;
1142 	struct ib_device *dev = &rxe->ib_dev;
1143 	struct crypto_shash *tfm;
1144 
1145 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1146 
1147 	dev->node_type = RDMA_NODE_IB_CA;
1148 	dev->phys_port_cnt = 1;
1149 	dev->num_comp_vectors = num_possible_cpus();
1150 	dev->dev.parent = rxe_dma_device(rxe);
1151 	dev->local_dma_lkey = 0;
1152 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1153 			    rxe->ndev->dev_addr);
1154 	dev->dev.dma_ops = &dma_virt_ops;
1155 	dev->dev.dma_parms = &rxe->dma_parms;
1156 	rxe->dma_parms = (struct device_dma_parameters)
1157 		{ .max_segment_size = SZ_2G };
1158 	dma_coerce_mask_and_coherent(&dev->dev,
1159 				     dma_get_required_mask(&dev->dev));
1160 
1161 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1162 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1163 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1164 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1165 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1166 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1167 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1168 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1169 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1170 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1171 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1172 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1173 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1174 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1175 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1176 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1177 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1178 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1179 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1180 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1181 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1182 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1183 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1184 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1185 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1186 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1187 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1188 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1189 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1190 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1191 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1192 	    ;
1193 
1194 	ib_set_device_ops(dev, &rxe_dev_ops);
1195 	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1196 	if (err)
1197 		return err;
1198 
1199 	tfm = crypto_alloc_shash("crc32", 0, 0);
1200 	if (IS_ERR(tfm)) {
1201 		pr_err("failed to allocate crc algorithm err:%ld\n",
1202 		       PTR_ERR(tfm));
1203 		return PTR_ERR(tfm);
1204 	}
1205 	rxe->tfm = tfm;
1206 
1207 	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1208 	err = ib_register_device(dev, ibdev_name);
1209 	if (err)
1210 		pr_warn("%s failed with error %d\n", __func__, err);
1211 
1212 	/*
1213 	 * Note that rxe may be invalid at this point if another thread
1214 	 * unregistered it.
1215 	 */
1216 	return err;
1217 }
1218