1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include <rdma/uverbs_ioctl.h>
37 #include "rxe.h"
38 #include "rxe_loc.h"
39 #include "rxe_queue.h"
40 #include "rxe_hw_counters.h"
41 
42 static int rxe_query_device(struct ib_device *dev,
43 			    struct ib_device_attr *attr,
44 			    struct ib_udata *uhw)
45 {
46 	struct rxe_dev *rxe = to_rdev(dev);
47 
48 	if (uhw->inlen || uhw->outlen)
49 		return -EINVAL;
50 
51 	*attr = rxe->attr;
52 	return 0;
53 }
54 
55 static int rxe_query_port(struct ib_device *dev,
56 			  u8 port_num, struct ib_port_attr *attr)
57 {
58 	struct rxe_dev *rxe = to_rdev(dev);
59 	struct rxe_port *port;
60 	int rc;
61 
62 	port = &rxe->port;
63 
64 	/* *attr being zeroed by the caller, avoid zeroing it here */
65 	*attr = port->attr;
66 
67 	mutex_lock(&rxe->usdev_lock);
68 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 			      &attr->active_width);
70 
71 	if (attr->state == IB_PORT_ACTIVE)
72 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
73 	else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 		attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
75 	else
76 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
77 
78 	mutex_unlock(&rxe->usdev_lock);
79 
80 	return rc;
81 }
82 
83 static int rxe_query_pkey(struct ib_device *device,
84 			  u8 port_num, u16 index, u16 *pkey)
85 {
86 	struct rxe_dev *rxe = to_rdev(device);
87 	struct rxe_port *port;
88 
89 	port = &rxe->port;
90 
91 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
92 		dev_warn(device->dev.parent, "invalid index = %d\n",
93 			 index);
94 		goto err1;
95 	}
96 
97 	*pkey = port->pkey_tbl[index];
98 	return 0;
99 
100 err1:
101 	return -EINVAL;
102 }
103 
104 static int rxe_modify_device(struct ib_device *dev,
105 			     int mask, struct ib_device_modify *attr)
106 {
107 	struct rxe_dev *rxe = to_rdev(dev);
108 
109 	if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
110 		     IB_DEVICE_MODIFY_NODE_DESC))
111 		return -EOPNOTSUPP;
112 
113 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
114 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
115 
116 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
117 		memcpy(rxe->ib_dev.node_desc,
118 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
119 	}
120 
121 	return 0;
122 }
123 
124 static int rxe_modify_port(struct ib_device *dev,
125 			   u8 port_num, int mask, struct ib_port_modify *attr)
126 {
127 	struct rxe_dev *rxe = to_rdev(dev);
128 	struct rxe_port *port;
129 
130 	port = &rxe->port;
131 
132 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
133 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
134 
135 	if (mask & IB_PORT_RESET_QKEY_CNTR)
136 		port->attr.qkey_viol_cntr = 0;
137 
138 	return 0;
139 }
140 
141 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
142 					       u8 port_num)
143 {
144 	struct rxe_dev *rxe = to_rdev(dev);
145 
146 	return rxe_link_layer(rxe, port_num);
147 }
148 
149 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
150 {
151 	struct rxe_dev *rxe = to_rdev(uctx->device);
152 	struct rxe_ucontext *uc = to_ruc(uctx);
153 
154 	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
155 }
156 
157 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
158 {
159 	struct rxe_ucontext *uc = to_ruc(ibuc);
160 
161 	rxe_drop_ref(uc);
162 }
163 
164 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
165 			      struct ib_port_immutable *immutable)
166 {
167 	int err;
168 	struct ib_port_attr attr;
169 
170 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
171 
172 	err = ib_query_port(dev, port_num, &attr);
173 	if (err)
174 		return err;
175 
176 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
177 	immutable->gid_tbl_len = attr.gid_tbl_len;
178 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
179 
180 	return 0;
181 }
182 
183 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
184 {
185 	struct rxe_dev *rxe = to_rdev(ibpd->device);
186 	struct rxe_pd *pd = to_rpd(ibpd);
187 
188 	return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
189 }
190 
191 static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
192 {
193 	struct rxe_pd *pd = to_rpd(ibpd);
194 
195 	rxe_drop_ref(pd);
196 }
197 
198 static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
199 			 u32 flags, struct ib_udata *udata)
200 
201 {
202 	int err;
203 	struct rxe_dev *rxe = to_rdev(ibah->device);
204 	struct rxe_ah *ah = to_rah(ibah);
205 
206 	err = rxe_av_chk_attr(rxe, attr);
207 	if (err)
208 		return err;
209 
210 	err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
211 	if (err)
212 		return err;
213 
214 	rxe_init_av(attr, &ah->av);
215 	return 0;
216 }
217 
218 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
219 {
220 	int err;
221 	struct rxe_dev *rxe = to_rdev(ibah->device);
222 	struct rxe_ah *ah = to_rah(ibah);
223 
224 	err = rxe_av_chk_attr(rxe, attr);
225 	if (err)
226 		return err;
227 
228 	rxe_init_av(attr, &ah->av);
229 	return 0;
230 }
231 
232 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
233 {
234 	struct rxe_ah *ah = to_rah(ibah);
235 
236 	memset(attr, 0, sizeof(*attr));
237 	attr->type = ibah->type;
238 	rxe_av_to_attr(&ah->av, attr);
239 	return 0;
240 }
241 
242 static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
243 {
244 	struct rxe_ah *ah = to_rah(ibah);
245 
246 	rxe_drop_ref(ah);
247 }
248 
249 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
250 {
251 	int err;
252 	int i;
253 	u32 length;
254 	struct rxe_recv_wqe *recv_wqe;
255 	int num_sge = ibwr->num_sge;
256 
257 	if (unlikely(queue_full(rq->queue))) {
258 		err = -ENOMEM;
259 		goto err1;
260 	}
261 
262 	if (unlikely(num_sge > rq->max_sge)) {
263 		err = -EINVAL;
264 		goto err1;
265 	}
266 
267 	length = 0;
268 	for (i = 0; i < num_sge; i++)
269 		length += ibwr->sg_list[i].length;
270 
271 	recv_wqe = producer_addr(rq->queue);
272 	recv_wqe->wr_id = ibwr->wr_id;
273 	recv_wqe->num_sge = num_sge;
274 
275 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
276 	       num_sge * sizeof(struct ib_sge));
277 
278 	recv_wqe->dma.length		= length;
279 	recv_wqe->dma.resid		= length;
280 	recv_wqe->dma.num_sge		= num_sge;
281 	recv_wqe->dma.cur_sge		= 0;
282 	recv_wqe->dma.sge_offset	= 0;
283 
284 	/* make sure all changes to the work queue are written before we
285 	 * update the producer pointer
286 	 */
287 	smp_wmb();
288 
289 	advance_producer(rq->queue);
290 	return 0;
291 
292 err1:
293 	return err;
294 }
295 
296 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
297 			  struct ib_udata *udata)
298 {
299 	int err;
300 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
301 	struct rxe_pd *pd = to_rpd(ibsrq->pd);
302 	struct rxe_srq *srq = to_rsrq(ibsrq);
303 	struct rxe_create_srq_resp __user *uresp = NULL;
304 
305 	if (udata) {
306 		if (udata->outlen < sizeof(*uresp))
307 			return -EINVAL;
308 		uresp = udata->outbuf;
309 	}
310 
311 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
312 	if (err)
313 		goto err1;
314 
315 	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
316 	if (err)
317 		goto err1;
318 
319 	rxe_add_ref(pd);
320 	srq->pd = pd;
321 
322 	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
323 	if (err)
324 		goto err2;
325 
326 	return 0;
327 
328 err2:
329 	rxe_drop_ref(pd);
330 	rxe_drop_ref(srq);
331 err1:
332 	return err;
333 }
334 
335 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
336 			  enum ib_srq_attr_mask mask,
337 			  struct ib_udata *udata)
338 {
339 	int err;
340 	struct rxe_srq *srq = to_rsrq(ibsrq);
341 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
342 	struct rxe_modify_srq_cmd ucmd = {};
343 
344 	if (udata) {
345 		if (udata->inlen < sizeof(ucmd))
346 			return -EINVAL;
347 
348 		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
349 		if (err)
350 			return err;
351 	}
352 
353 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
354 	if (err)
355 		goto err1;
356 
357 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
358 	if (err)
359 		goto err1;
360 
361 	return 0;
362 
363 err1:
364 	return err;
365 }
366 
367 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
368 {
369 	struct rxe_srq *srq = to_rsrq(ibsrq);
370 
371 	if (srq->error)
372 		return -EINVAL;
373 
374 	attr->max_wr = srq->rq.queue->buf->index_mask;
375 	attr->max_sge = srq->rq.max_sge;
376 	attr->srq_limit = srq->limit;
377 	return 0;
378 }
379 
380 static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
381 {
382 	struct rxe_srq *srq = to_rsrq(ibsrq);
383 
384 	if (srq->rq.queue)
385 		rxe_queue_cleanup(srq->rq.queue);
386 
387 	rxe_drop_ref(srq->pd);
388 	rxe_drop_ref(srq);
389 }
390 
391 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
392 			     const struct ib_recv_wr **bad_wr)
393 {
394 	int err = 0;
395 	unsigned long flags;
396 	struct rxe_srq *srq = to_rsrq(ibsrq);
397 
398 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
399 
400 	while (wr) {
401 		err = post_one_recv(&srq->rq, wr);
402 		if (unlikely(err))
403 			break;
404 		wr = wr->next;
405 	}
406 
407 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
408 
409 	if (err)
410 		*bad_wr = wr;
411 
412 	return err;
413 }
414 
415 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
416 				   struct ib_qp_init_attr *init,
417 				   struct ib_udata *udata)
418 {
419 	int err;
420 	struct rxe_dev *rxe = to_rdev(ibpd->device);
421 	struct rxe_pd *pd = to_rpd(ibpd);
422 	struct rxe_qp *qp;
423 	struct rxe_create_qp_resp __user *uresp = NULL;
424 
425 	if (udata) {
426 		if (udata->outlen < sizeof(*uresp))
427 			return ERR_PTR(-EINVAL);
428 		uresp = udata->outbuf;
429 	}
430 
431 	err = rxe_qp_chk_init(rxe, init);
432 	if (err)
433 		goto err1;
434 
435 	qp = rxe_alloc(&rxe->qp_pool);
436 	if (!qp) {
437 		err = -ENOMEM;
438 		goto err1;
439 	}
440 
441 	if (udata) {
442 		if (udata->inlen) {
443 			err = -EINVAL;
444 			goto err2;
445 		}
446 		qp->is_user = 1;
447 	}
448 
449 	rxe_add_index(qp);
450 
451 	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
452 	if (err)
453 		goto err3;
454 
455 	return &qp->ibqp;
456 
457 err3:
458 	rxe_drop_index(qp);
459 err2:
460 	rxe_drop_ref(qp);
461 err1:
462 	return ERR_PTR(err);
463 }
464 
465 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
466 			 int mask, struct ib_udata *udata)
467 {
468 	int err;
469 	struct rxe_dev *rxe = to_rdev(ibqp->device);
470 	struct rxe_qp *qp = to_rqp(ibqp);
471 
472 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
473 	if (err)
474 		goto err1;
475 
476 	err = rxe_qp_from_attr(qp, attr, mask, udata);
477 	if (err)
478 		goto err1;
479 
480 	return 0;
481 
482 err1:
483 	return err;
484 }
485 
486 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
487 			int mask, struct ib_qp_init_attr *init)
488 {
489 	struct rxe_qp *qp = to_rqp(ibqp);
490 
491 	rxe_qp_to_init(qp, init);
492 	rxe_qp_to_attr(qp, attr, mask);
493 
494 	return 0;
495 }
496 
497 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
498 {
499 	struct rxe_qp *qp = to_rqp(ibqp);
500 
501 	rxe_qp_destroy(qp);
502 	rxe_drop_index(qp);
503 	rxe_drop_ref(qp);
504 	return 0;
505 }
506 
507 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
508 			    unsigned int mask, unsigned int length)
509 {
510 	int num_sge = ibwr->num_sge;
511 	struct rxe_sq *sq = &qp->sq;
512 
513 	if (unlikely(num_sge > sq->max_sge))
514 		goto err1;
515 
516 	if (unlikely(mask & WR_ATOMIC_MASK)) {
517 		if (length < 8)
518 			goto err1;
519 
520 		if (atomic_wr(ibwr)->remote_addr & 0x7)
521 			goto err1;
522 	}
523 
524 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
525 		     (length > sq->max_inline)))
526 		goto err1;
527 
528 	return 0;
529 
530 err1:
531 	return -EINVAL;
532 }
533 
534 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
535 			 const struct ib_send_wr *ibwr)
536 {
537 	wr->wr_id = ibwr->wr_id;
538 	wr->num_sge = ibwr->num_sge;
539 	wr->opcode = ibwr->opcode;
540 	wr->send_flags = ibwr->send_flags;
541 
542 	if (qp_type(qp) == IB_QPT_UD ||
543 	    qp_type(qp) == IB_QPT_SMI ||
544 	    qp_type(qp) == IB_QPT_GSI) {
545 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
546 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
547 		if (qp_type(qp) == IB_QPT_GSI)
548 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
549 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
550 			wr->ex.imm_data = ibwr->ex.imm_data;
551 	} else {
552 		switch (wr->opcode) {
553 		case IB_WR_RDMA_WRITE_WITH_IMM:
554 			wr->ex.imm_data = ibwr->ex.imm_data;
555 			/* fall through */
556 		case IB_WR_RDMA_READ:
557 		case IB_WR_RDMA_WRITE:
558 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
559 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
560 			break;
561 		case IB_WR_SEND_WITH_IMM:
562 			wr->ex.imm_data = ibwr->ex.imm_data;
563 			break;
564 		case IB_WR_SEND_WITH_INV:
565 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
566 			break;
567 		case IB_WR_ATOMIC_CMP_AND_SWP:
568 		case IB_WR_ATOMIC_FETCH_AND_ADD:
569 			wr->wr.atomic.remote_addr =
570 				atomic_wr(ibwr)->remote_addr;
571 			wr->wr.atomic.compare_add =
572 				atomic_wr(ibwr)->compare_add;
573 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
574 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
575 			break;
576 		case IB_WR_LOCAL_INV:
577 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
578 		break;
579 		case IB_WR_REG_MR:
580 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
581 			wr->wr.reg.key = reg_wr(ibwr)->key;
582 			wr->wr.reg.access = reg_wr(ibwr)->access;
583 		break;
584 		default:
585 			break;
586 		}
587 	}
588 }
589 
590 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
591 			 unsigned int mask, unsigned int length,
592 			 struct rxe_send_wqe *wqe)
593 {
594 	int num_sge = ibwr->num_sge;
595 	struct ib_sge *sge;
596 	int i;
597 	u8 *p;
598 
599 	init_send_wr(qp, &wqe->wr, ibwr);
600 
601 	if (qp_type(qp) == IB_QPT_UD ||
602 	    qp_type(qp) == IB_QPT_SMI ||
603 	    qp_type(qp) == IB_QPT_GSI)
604 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
605 
606 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
607 		p = wqe->dma.inline_data;
608 
609 		sge = ibwr->sg_list;
610 		for (i = 0; i < num_sge; i++, sge++) {
611 			memcpy(p, (void *)(uintptr_t)sge->addr,
612 					sge->length);
613 
614 			p += sge->length;
615 		}
616 	} else if (mask & WR_REG_MASK) {
617 		wqe->mask = mask;
618 		wqe->state = wqe_state_posted;
619 		return 0;
620 	} else
621 		memcpy(wqe->dma.sge, ibwr->sg_list,
622 		       num_sge * sizeof(struct ib_sge));
623 
624 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
625 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
626 	wqe->mask		= mask;
627 	wqe->dma.length		= length;
628 	wqe->dma.resid		= length;
629 	wqe->dma.num_sge	= num_sge;
630 	wqe->dma.cur_sge	= 0;
631 	wqe->dma.sge_offset	= 0;
632 	wqe->state		= wqe_state_posted;
633 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
634 
635 	return 0;
636 }
637 
638 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
639 			 unsigned int mask, u32 length)
640 {
641 	int err;
642 	struct rxe_sq *sq = &qp->sq;
643 	struct rxe_send_wqe *send_wqe;
644 	unsigned long flags;
645 
646 	err = validate_send_wr(qp, ibwr, mask, length);
647 	if (err)
648 		return err;
649 
650 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
651 
652 	if (unlikely(queue_full(sq->queue))) {
653 		err = -ENOMEM;
654 		goto err1;
655 	}
656 
657 	send_wqe = producer_addr(sq->queue);
658 
659 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
660 	if (unlikely(err))
661 		goto err1;
662 
663 	/*
664 	 * make sure all changes to the work queue are
665 	 * written before we update the producer pointer
666 	 */
667 	smp_wmb();
668 
669 	advance_producer(sq->queue);
670 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
671 
672 	return 0;
673 
674 err1:
675 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
676 	return err;
677 }
678 
679 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
680 				const struct ib_send_wr **bad_wr)
681 {
682 	int err = 0;
683 	unsigned int mask;
684 	unsigned int length = 0;
685 	int i;
686 
687 	while (wr) {
688 		mask = wr_opcode_mask(wr->opcode, qp);
689 		if (unlikely(!mask)) {
690 			err = -EINVAL;
691 			*bad_wr = wr;
692 			break;
693 		}
694 
695 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
696 			     !(mask & WR_INLINE_MASK))) {
697 			err = -EINVAL;
698 			*bad_wr = wr;
699 			break;
700 		}
701 
702 		length = 0;
703 		for (i = 0; i < wr->num_sge; i++)
704 			length += wr->sg_list[i].length;
705 
706 		err = post_one_send(qp, wr, mask, length);
707 
708 		if (err) {
709 			*bad_wr = wr;
710 			break;
711 		}
712 		wr = wr->next;
713 	}
714 
715 	rxe_run_task(&qp->req.task, 1);
716 	if (unlikely(qp->req.state == QP_STATE_ERROR))
717 		rxe_run_task(&qp->comp.task, 1);
718 
719 	return err;
720 }
721 
722 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
723 			 const struct ib_send_wr **bad_wr)
724 {
725 	struct rxe_qp *qp = to_rqp(ibqp);
726 
727 	if (unlikely(!qp->valid)) {
728 		*bad_wr = wr;
729 		return -EINVAL;
730 	}
731 
732 	if (unlikely(qp->req.state < QP_STATE_READY)) {
733 		*bad_wr = wr;
734 		return -EINVAL;
735 	}
736 
737 	if (qp->is_user) {
738 		/* Utilize process context to do protocol processing */
739 		rxe_run_task(&qp->req.task, 0);
740 		return 0;
741 	} else
742 		return rxe_post_send_kernel(qp, wr, bad_wr);
743 }
744 
745 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
746 			 const struct ib_recv_wr **bad_wr)
747 {
748 	int err = 0;
749 	struct rxe_qp *qp = to_rqp(ibqp);
750 	struct rxe_rq *rq = &qp->rq;
751 	unsigned long flags;
752 
753 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
754 		*bad_wr = wr;
755 		err = -EINVAL;
756 		goto err1;
757 	}
758 
759 	if (unlikely(qp->srq)) {
760 		*bad_wr = wr;
761 		err = -EINVAL;
762 		goto err1;
763 	}
764 
765 	spin_lock_irqsave(&rq->producer_lock, flags);
766 
767 	while (wr) {
768 		err = post_one_recv(rq, wr);
769 		if (unlikely(err)) {
770 			*bad_wr = wr;
771 			break;
772 		}
773 		wr = wr->next;
774 	}
775 
776 	spin_unlock_irqrestore(&rq->producer_lock, flags);
777 
778 	if (qp->resp.state == QP_STATE_ERROR)
779 		rxe_run_task(&qp->resp.task, 1);
780 
781 err1:
782 	return err;
783 }
784 
785 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
786 			 struct ib_udata *udata)
787 {
788 	int err;
789 	struct ib_device *dev = ibcq->device;
790 	struct rxe_dev *rxe = to_rdev(dev);
791 	struct rxe_cq *cq = to_rcq(ibcq);
792 	struct rxe_create_cq_resp __user *uresp = NULL;
793 
794 	if (udata) {
795 		if (udata->outlen < sizeof(*uresp))
796 			return -EINVAL;
797 		uresp = udata->outbuf;
798 	}
799 
800 	if (attr->flags)
801 		return -EINVAL;
802 
803 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
804 	if (err)
805 		return err;
806 
807 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
808 			       uresp);
809 	if (err)
810 		return err;
811 
812 	return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
813 }
814 
815 static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
816 {
817 	struct rxe_cq *cq = to_rcq(ibcq);
818 
819 	rxe_cq_disable(cq);
820 
821 	rxe_drop_ref(cq);
822 }
823 
824 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
825 {
826 	int err;
827 	struct rxe_cq *cq = to_rcq(ibcq);
828 	struct rxe_dev *rxe = to_rdev(ibcq->device);
829 	struct rxe_resize_cq_resp __user *uresp = NULL;
830 
831 	if (udata) {
832 		if (udata->outlen < sizeof(*uresp))
833 			return -EINVAL;
834 		uresp = udata->outbuf;
835 	}
836 
837 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
838 	if (err)
839 		goto err1;
840 
841 	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
842 	if (err)
843 		goto err1;
844 
845 	return 0;
846 
847 err1:
848 	return err;
849 }
850 
851 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
852 {
853 	int i;
854 	struct rxe_cq *cq = to_rcq(ibcq);
855 	struct rxe_cqe *cqe;
856 	unsigned long flags;
857 
858 	spin_lock_irqsave(&cq->cq_lock, flags);
859 	for (i = 0; i < num_entries; i++) {
860 		cqe = queue_head(cq->queue);
861 		if (!cqe)
862 			break;
863 
864 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
865 		advance_consumer(cq->queue);
866 	}
867 	spin_unlock_irqrestore(&cq->cq_lock, flags);
868 
869 	return i;
870 }
871 
872 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
873 {
874 	struct rxe_cq *cq = to_rcq(ibcq);
875 	int count = queue_count(cq->queue);
876 
877 	return (count > wc_cnt) ? wc_cnt : count;
878 }
879 
880 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
881 {
882 	struct rxe_cq *cq = to_rcq(ibcq);
883 	unsigned long irq_flags;
884 	int ret = 0;
885 
886 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
887 	if (cq->notify != IB_CQ_NEXT_COMP)
888 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
889 
890 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
891 		ret = 1;
892 
893 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
894 
895 	return ret;
896 }
897 
898 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
899 {
900 	struct rxe_dev *rxe = to_rdev(ibpd->device);
901 	struct rxe_pd *pd = to_rpd(ibpd);
902 	struct rxe_mem *mr;
903 	int err;
904 
905 	mr = rxe_alloc(&rxe->mr_pool);
906 	if (!mr) {
907 		err = -ENOMEM;
908 		goto err1;
909 	}
910 
911 	rxe_add_index(mr);
912 
913 	rxe_add_ref(pd);
914 
915 	err = rxe_mem_init_dma(pd, access, mr);
916 	if (err)
917 		goto err2;
918 
919 	return &mr->ibmr;
920 
921 err2:
922 	rxe_drop_ref(pd);
923 	rxe_drop_index(mr);
924 	rxe_drop_ref(mr);
925 err1:
926 	return ERR_PTR(err);
927 }
928 
929 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
930 				     u64 start,
931 				     u64 length,
932 				     u64 iova,
933 				     int access, struct ib_udata *udata)
934 {
935 	int err;
936 	struct rxe_dev *rxe = to_rdev(ibpd->device);
937 	struct rxe_pd *pd = to_rpd(ibpd);
938 	struct rxe_mem *mr;
939 
940 	mr = rxe_alloc(&rxe->mr_pool);
941 	if (!mr) {
942 		err = -ENOMEM;
943 		goto err2;
944 	}
945 
946 	rxe_add_index(mr);
947 
948 	rxe_add_ref(pd);
949 
950 	err = rxe_mem_init_user(pd, start, length, iova,
951 				access, udata, mr);
952 	if (err)
953 		goto err3;
954 
955 	return &mr->ibmr;
956 
957 err3:
958 	rxe_drop_ref(pd);
959 	rxe_drop_index(mr);
960 	rxe_drop_ref(mr);
961 err2:
962 	return ERR_PTR(err);
963 }
964 
965 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
966 {
967 	struct rxe_mem *mr = to_rmr(ibmr);
968 
969 	mr->state = RXE_MEM_STATE_ZOMBIE;
970 	rxe_drop_ref(mr->pd);
971 	rxe_drop_index(mr);
972 	rxe_drop_ref(mr);
973 	return 0;
974 }
975 
976 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
977 				  u32 max_num_sg, struct ib_udata *udata)
978 {
979 	struct rxe_dev *rxe = to_rdev(ibpd->device);
980 	struct rxe_pd *pd = to_rpd(ibpd);
981 	struct rxe_mem *mr;
982 	int err;
983 
984 	if (mr_type != IB_MR_TYPE_MEM_REG)
985 		return ERR_PTR(-EINVAL);
986 
987 	mr = rxe_alloc(&rxe->mr_pool);
988 	if (!mr) {
989 		err = -ENOMEM;
990 		goto err1;
991 	}
992 
993 	rxe_add_index(mr);
994 
995 	rxe_add_ref(pd);
996 
997 	err = rxe_mem_init_fast(pd, max_num_sg, mr);
998 	if (err)
999 		goto err2;
1000 
1001 	return &mr->ibmr;
1002 
1003 err2:
1004 	rxe_drop_ref(pd);
1005 	rxe_drop_index(mr);
1006 	rxe_drop_ref(mr);
1007 err1:
1008 	return ERR_PTR(err);
1009 }
1010 
1011 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1012 {
1013 	struct rxe_mem *mr = to_rmr(ibmr);
1014 	struct rxe_map *map;
1015 	struct rxe_phys_buf *buf;
1016 
1017 	if (unlikely(mr->nbuf == mr->num_buf))
1018 		return -ENOMEM;
1019 
1020 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1021 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1022 
1023 	buf->addr = addr;
1024 	buf->size = ibmr->page_size;
1025 	mr->nbuf++;
1026 
1027 	return 0;
1028 }
1029 
1030 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1031 			 int sg_nents, unsigned int *sg_offset)
1032 {
1033 	struct rxe_mem *mr = to_rmr(ibmr);
1034 	int n;
1035 
1036 	mr->nbuf = 0;
1037 
1038 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1039 
1040 	mr->va = ibmr->iova;
1041 	mr->iova = ibmr->iova;
1042 	mr->length = ibmr->length;
1043 	mr->page_shift = ilog2(ibmr->page_size);
1044 	mr->page_mask = ibmr->page_size - 1;
1045 	mr->offset = mr->iova & mr->page_mask;
1046 
1047 	return n;
1048 }
1049 
1050 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1051 {
1052 	int err;
1053 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1054 	struct rxe_qp *qp = to_rqp(ibqp);
1055 	struct rxe_mc_grp *grp;
1056 
1057 	/* takes a ref on grp if successful */
1058 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1059 	if (err)
1060 		return err;
1061 
1062 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1063 
1064 	rxe_drop_ref(grp);
1065 	return err;
1066 }
1067 
1068 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1069 {
1070 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1071 	struct rxe_qp *qp = to_rqp(ibqp);
1072 
1073 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1074 }
1075 
1076 static ssize_t parent_show(struct device *device,
1077 			   struct device_attribute *attr, char *buf)
1078 {
1079 	struct rxe_dev *rxe =
1080 		rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1081 
1082 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1083 }
1084 
1085 static DEVICE_ATTR_RO(parent);
1086 
1087 static struct attribute *rxe_dev_attributes[] = {
1088 	&dev_attr_parent.attr,
1089 	NULL
1090 };
1091 
1092 static const struct attribute_group rxe_attr_group = {
1093 	.attrs = rxe_dev_attributes,
1094 };
1095 
1096 static int rxe_enable_driver(struct ib_device *ib_dev)
1097 {
1098 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1099 
1100 	rxe_set_port_state(rxe);
1101 	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1102 	return 0;
1103 }
1104 
1105 static const struct ib_device_ops rxe_dev_ops = {
1106 	.owner = THIS_MODULE,
1107 	.driver_id = RDMA_DRIVER_RXE,
1108 	.uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1109 
1110 	.alloc_hw_stats = rxe_ib_alloc_hw_stats,
1111 	.alloc_mr = rxe_alloc_mr,
1112 	.alloc_pd = rxe_alloc_pd,
1113 	.alloc_ucontext = rxe_alloc_ucontext,
1114 	.attach_mcast = rxe_attach_mcast,
1115 	.create_ah = rxe_create_ah,
1116 	.create_cq = rxe_create_cq,
1117 	.create_qp = rxe_create_qp,
1118 	.create_srq = rxe_create_srq,
1119 	.dealloc_driver = rxe_dealloc,
1120 	.dealloc_pd = rxe_dealloc_pd,
1121 	.dealloc_ucontext = rxe_dealloc_ucontext,
1122 	.dereg_mr = rxe_dereg_mr,
1123 	.destroy_ah = rxe_destroy_ah,
1124 	.destroy_cq = rxe_destroy_cq,
1125 	.destroy_qp = rxe_destroy_qp,
1126 	.destroy_srq = rxe_destroy_srq,
1127 	.detach_mcast = rxe_detach_mcast,
1128 	.enable_driver = rxe_enable_driver,
1129 	.get_dma_mr = rxe_get_dma_mr,
1130 	.get_hw_stats = rxe_ib_get_hw_stats,
1131 	.get_link_layer = rxe_get_link_layer,
1132 	.get_port_immutable = rxe_port_immutable,
1133 	.map_mr_sg = rxe_map_mr_sg,
1134 	.mmap = rxe_mmap,
1135 	.modify_ah = rxe_modify_ah,
1136 	.modify_device = rxe_modify_device,
1137 	.modify_port = rxe_modify_port,
1138 	.modify_qp = rxe_modify_qp,
1139 	.modify_srq = rxe_modify_srq,
1140 	.peek_cq = rxe_peek_cq,
1141 	.poll_cq = rxe_poll_cq,
1142 	.post_recv = rxe_post_recv,
1143 	.post_send = rxe_post_send,
1144 	.post_srq_recv = rxe_post_srq_recv,
1145 	.query_ah = rxe_query_ah,
1146 	.query_device = rxe_query_device,
1147 	.query_pkey = rxe_query_pkey,
1148 	.query_port = rxe_query_port,
1149 	.query_qp = rxe_query_qp,
1150 	.query_srq = rxe_query_srq,
1151 	.reg_user_mr = rxe_reg_user_mr,
1152 	.req_notify_cq = rxe_req_notify_cq,
1153 	.resize_cq = rxe_resize_cq,
1154 
1155 	INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1156 	INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1157 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1158 	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1159 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1160 };
1161 
1162 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1163 {
1164 	int err;
1165 	struct ib_device *dev = &rxe->ib_dev;
1166 	struct crypto_shash *tfm;
1167 
1168 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1169 
1170 	dev->node_type = RDMA_NODE_IB_CA;
1171 	dev->phys_port_cnt = 1;
1172 	dev->num_comp_vectors = num_possible_cpus();
1173 	dev->dev.parent = rxe_dma_device(rxe);
1174 	dev->local_dma_lkey = 0;
1175 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1176 			    rxe->ndev->dev_addr);
1177 	dev->dev.dma_ops = &dma_virt_ops;
1178 	dev->dev.dma_parms = &rxe->dma_parms;
1179 	rxe->dma_parms = (struct device_dma_parameters)
1180 		{ .max_segment_size = SZ_2G };
1181 	dma_coerce_mask_and_coherent(&dev->dev,
1182 				     dma_get_required_mask(&dev->dev));
1183 
1184 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1185 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1186 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1187 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1188 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1189 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1190 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1191 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1192 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1193 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1194 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1195 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1196 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1197 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1198 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1199 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1200 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1201 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1202 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1203 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1204 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1205 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1206 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1207 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1208 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1209 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1210 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1211 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1212 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1213 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1214 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1215 	    ;
1216 
1217 	ib_set_device_ops(dev, &rxe_dev_ops);
1218 	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1219 	if (err)
1220 		return err;
1221 
1222 	tfm = crypto_alloc_shash("crc32", 0, 0);
1223 	if (IS_ERR(tfm)) {
1224 		pr_err("failed to allocate crc algorithm err:%ld\n",
1225 		       PTR_ERR(tfm));
1226 		return PTR_ERR(tfm);
1227 	}
1228 	rxe->tfm = tfm;
1229 
1230 	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1231 	err = ib_register_device(dev, ibdev_name);
1232 	if (err)
1233 		pr_warn("%s failed with error %d\n", __func__, err);
1234 
1235 	/*
1236 	 * Note that rxe may be invalid at this point if another thread
1237 	 * unregistered it.
1238 	 */
1239 	return err;
1240 }
1241