1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include <rdma/uverbs_ioctl.h>
37 #include "rxe.h"
38 #include "rxe_loc.h"
39 #include "rxe_queue.h"
40 #include "rxe_hw_counters.h"
41 
42 static int rxe_query_device(struct ib_device *dev,
43 			    struct ib_device_attr *attr,
44 			    struct ib_udata *uhw)
45 {
46 	struct rxe_dev *rxe = to_rdev(dev);
47 
48 	if (uhw->inlen || uhw->outlen)
49 		return -EINVAL;
50 
51 	*attr = rxe->attr;
52 	return 0;
53 }
54 
55 static int rxe_query_port(struct ib_device *dev,
56 			  u8 port_num, struct ib_port_attr *attr)
57 {
58 	struct rxe_dev *rxe = to_rdev(dev);
59 	struct rxe_port *port;
60 	int rc;
61 
62 	port = &rxe->port;
63 
64 	/* *attr being zeroed by the caller, avoid zeroing it here */
65 	*attr = port->attr;
66 
67 	mutex_lock(&rxe->usdev_lock);
68 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 			      &attr->active_width);
70 
71 	if (attr->state == IB_PORT_ACTIVE)
72 		attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
73 	else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 		attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
75 	else
76 		attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
77 
78 	mutex_unlock(&rxe->usdev_lock);
79 
80 	return rc;
81 }
82 
83 static int rxe_query_pkey(struct ib_device *device,
84 			  u8 port_num, u16 index, u16 *pkey)
85 {
86 	struct rxe_dev *rxe = to_rdev(device);
87 	struct rxe_port *port;
88 
89 	port = &rxe->port;
90 
91 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
92 		dev_warn(device->dev.parent, "invalid index = %d\n",
93 			 index);
94 		goto err1;
95 	}
96 
97 	*pkey = port->pkey_tbl[index];
98 	return 0;
99 
100 err1:
101 	return -EINVAL;
102 }
103 
104 static int rxe_modify_device(struct ib_device *dev,
105 			     int mask, struct ib_device_modify *attr)
106 {
107 	struct rxe_dev *rxe = to_rdev(dev);
108 
109 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
110 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
111 
112 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
113 		memcpy(rxe->ib_dev.node_desc,
114 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
115 	}
116 
117 	return 0;
118 }
119 
120 static int rxe_modify_port(struct ib_device *dev,
121 			   u8 port_num, int mask, struct ib_port_modify *attr)
122 {
123 	struct rxe_dev *rxe = to_rdev(dev);
124 	struct rxe_port *port;
125 
126 	port = &rxe->port;
127 
128 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
129 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
130 
131 	if (mask & IB_PORT_RESET_QKEY_CNTR)
132 		port->attr.qkey_viol_cntr = 0;
133 
134 	return 0;
135 }
136 
137 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
138 					       u8 port_num)
139 {
140 	struct rxe_dev *rxe = to_rdev(dev);
141 
142 	return rxe_link_layer(rxe, port_num);
143 }
144 
145 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
146 {
147 	struct rxe_dev *rxe = to_rdev(uctx->device);
148 	struct rxe_ucontext *uc = to_ruc(uctx);
149 
150 	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
151 }
152 
153 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
154 {
155 	struct rxe_ucontext *uc = to_ruc(ibuc);
156 
157 	rxe_drop_ref(uc);
158 }
159 
160 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
161 			      struct ib_port_immutable *immutable)
162 {
163 	int err;
164 	struct ib_port_attr attr;
165 
166 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
167 
168 	err = ib_query_port(dev, port_num, &attr);
169 	if (err)
170 		return err;
171 
172 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
173 	immutable->gid_tbl_len = attr.gid_tbl_len;
174 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
175 
176 	return 0;
177 }
178 
179 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
180 			struct ib_udata *udata)
181 {
182 	struct rxe_dev *rxe = to_rdev(ibpd->device);
183 	struct rxe_pd *pd = to_rpd(ibpd);
184 
185 	return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
186 }
187 
188 static void rxe_dealloc_pd(struct ib_pd *ibpd)
189 {
190 	struct rxe_pd *pd = to_rpd(ibpd);
191 
192 	rxe_drop_ref(pd);
193 }
194 
195 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
196 				   struct rdma_ah_attr *attr,
197 				   u32 flags,
198 				   struct ib_udata *udata)
199 
200 {
201 	int err;
202 	struct rxe_dev *rxe = to_rdev(ibpd->device);
203 	struct rxe_pd *pd = to_rpd(ibpd);
204 	struct rxe_ah *ah;
205 
206 	err = rxe_av_chk_attr(rxe, attr);
207 	if (err)
208 		return ERR_PTR(err);
209 
210 	ah = rxe_alloc(&rxe->ah_pool);
211 	if (!ah)
212 		return ERR_PTR(-ENOMEM);
213 
214 	rxe_add_ref(pd);
215 	ah->pd = pd;
216 
217 	rxe_init_av(attr, &ah->av);
218 	return &ah->ibah;
219 }
220 
221 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
222 {
223 	int err;
224 	struct rxe_dev *rxe = to_rdev(ibah->device);
225 	struct rxe_ah *ah = to_rah(ibah);
226 
227 	err = rxe_av_chk_attr(rxe, attr);
228 	if (err)
229 		return err;
230 
231 	rxe_init_av(attr, &ah->av);
232 	return 0;
233 }
234 
235 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
236 {
237 	struct rxe_ah *ah = to_rah(ibah);
238 
239 	memset(attr, 0, sizeof(*attr));
240 	attr->type = ibah->type;
241 	rxe_av_to_attr(&ah->av, attr);
242 	return 0;
243 }
244 
245 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
246 {
247 	struct rxe_ah *ah = to_rah(ibah);
248 
249 	rxe_drop_ref(ah->pd);
250 	rxe_drop_ref(ah);
251 	return 0;
252 }
253 
254 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
255 {
256 	int err;
257 	int i;
258 	u32 length;
259 	struct rxe_recv_wqe *recv_wqe;
260 	int num_sge = ibwr->num_sge;
261 
262 	if (unlikely(queue_full(rq->queue))) {
263 		err = -ENOMEM;
264 		goto err1;
265 	}
266 
267 	if (unlikely(num_sge > rq->max_sge)) {
268 		err = -EINVAL;
269 		goto err1;
270 	}
271 
272 	length = 0;
273 	for (i = 0; i < num_sge; i++)
274 		length += ibwr->sg_list[i].length;
275 
276 	recv_wqe = producer_addr(rq->queue);
277 	recv_wqe->wr_id = ibwr->wr_id;
278 	recv_wqe->num_sge = num_sge;
279 
280 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
281 	       num_sge * sizeof(struct ib_sge));
282 
283 	recv_wqe->dma.length		= length;
284 	recv_wqe->dma.resid		= length;
285 	recv_wqe->dma.num_sge		= num_sge;
286 	recv_wqe->dma.cur_sge		= 0;
287 	recv_wqe->dma.sge_offset	= 0;
288 
289 	/* make sure all changes to the work queue are written before we
290 	 * update the producer pointer
291 	 */
292 	smp_wmb();
293 
294 	advance_producer(rq->queue);
295 	return 0;
296 
297 err1:
298 	return err;
299 }
300 
301 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
302 				     struct ib_srq_init_attr *init,
303 				     struct ib_udata *udata)
304 {
305 	int err;
306 	struct rxe_dev *rxe = to_rdev(ibpd->device);
307 	struct rxe_pd *pd = to_rpd(ibpd);
308 	struct rxe_ucontext *ucontext =
309 		rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
310 	struct rxe_srq *srq;
311 	struct rxe_create_srq_resp __user *uresp = NULL;
312 
313 	if (udata) {
314 		if (udata->outlen < sizeof(*uresp))
315 			return ERR_PTR(-EINVAL);
316 		uresp = udata->outbuf;
317 	}
318 
319 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
320 	if (err)
321 		goto err1;
322 
323 	srq = rxe_alloc(&rxe->srq_pool);
324 	if (!srq) {
325 		err = -ENOMEM;
326 		goto err1;
327 	}
328 
329 	rxe_add_index(srq);
330 	rxe_add_ref(pd);
331 	srq->pd = pd;
332 
333 	err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
334 	if (err)
335 		goto err2;
336 
337 	return &srq->ibsrq;
338 
339 err2:
340 	rxe_drop_ref(pd);
341 	rxe_drop_index(srq);
342 	rxe_drop_ref(srq);
343 err1:
344 	return ERR_PTR(err);
345 }
346 
347 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
348 			  enum ib_srq_attr_mask mask,
349 			  struct ib_udata *udata)
350 {
351 	int err;
352 	struct rxe_srq *srq = to_rsrq(ibsrq);
353 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
354 	struct rxe_modify_srq_cmd ucmd = {};
355 
356 	if (udata) {
357 		if (udata->inlen < sizeof(ucmd))
358 			return -EINVAL;
359 
360 		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
361 		if (err)
362 			return err;
363 	}
364 
365 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
366 	if (err)
367 		goto err1;
368 
369 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
370 	if (err)
371 		goto err1;
372 
373 	return 0;
374 
375 err1:
376 	return err;
377 }
378 
379 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
380 {
381 	struct rxe_srq *srq = to_rsrq(ibsrq);
382 
383 	if (srq->error)
384 		return -EINVAL;
385 
386 	attr->max_wr = srq->rq.queue->buf->index_mask;
387 	attr->max_sge = srq->rq.max_sge;
388 	attr->srq_limit = srq->limit;
389 	return 0;
390 }
391 
392 static int rxe_destroy_srq(struct ib_srq *ibsrq)
393 {
394 	struct rxe_srq *srq = to_rsrq(ibsrq);
395 
396 	if (srq->rq.queue)
397 		rxe_queue_cleanup(srq->rq.queue);
398 
399 	rxe_drop_ref(srq->pd);
400 	rxe_drop_index(srq);
401 	rxe_drop_ref(srq);
402 
403 	return 0;
404 }
405 
406 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
407 			     const struct ib_recv_wr **bad_wr)
408 {
409 	int err = 0;
410 	unsigned long flags;
411 	struct rxe_srq *srq = to_rsrq(ibsrq);
412 
413 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
414 
415 	while (wr) {
416 		err = post_one_recv(&srq->rq, wr);
417 		if (unlikely(err))
418 			break;
419 		wr = wr->next;
420 	}
421 
422 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
423 
424 	if (err)
425 		*bad_wr = wr;
426 
427 	return err;
428 }
429 
430 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
431 				   struct ib_qp_init_attr *init,
432 				   struct ib_udata *udata)
433 {
434 	int err;
435 	struct rxe_dev *rxe = to_rdev(ibpd->device);
436 	struct rxe_pd *pd = to_rpd(ibpd);
437 	struct rxe_qp *qp;
438 	struct rxe_create_qp_resp __user *uresp = NULL;
439 
440 	if (udata) {
441 		if (udata->outlen < sizeof(*uresp))
442 			return ERR_PTR(-EINVAL);
443 		uresp = udata->outbuf;
444 	}
445 
446 	err = rxe_qp_chk_init(rxe, init);
447 	if (err)
448 		goto err1;
449 
450 	qp = rxe_alloc(&rxe->qp_pool);
451 	if (!qp) {
452 		err = -ENOMEM;
453 		goto err1;
454 	}
455 
456 	if (udata) {
457 		if (udata->inlen) {
458 			err = -EINVAL;
459 			goto err2;
460 		}
461 		qp->is_user = 1;
462 	}
463 
464 	rxe_add_index(qp);
465 
466 	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
467 	if (err)
468 		goto err3;
469 
470 	return &qp->ibqp;
471 
472 err3:
473 	rxe_drop_index(qp);
474 err2:
475 	rxe_drop_ref(qp);
476 err1:
477 	return ERR_PTR(err);
478 }
479 
480 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
481 			 int mask, struct ib_udata *udata)
482 {
483 	int err;
484 	struct rxe_dev *rxe = to_rdev(ibqp->device);
485 	struct rxe_qp *qp = to_rqp(ibqp);
486 
487 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
488 	if (err)
489 		goto err1;
490 
491 	err = rxe_qp_from_attr(qp, attr, mask, udata);
492 	if (err)
493 		goto err1;
494 
495 	return 0;
496 
497 err1:
498 	return err;
499 }
500 
501 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
502 			int mask, struct ib_qp_init_attr *init)
503 {
504 	struct rxe_qp *qp = to_rqp(ibqp);
505 
506 	rxe_qp_to_init(qp, init);
507 	rxe_qp_to_attr(qp, attr, mask);
508 
509 	return 0;
510 }
511 
512 static int rxe_destroy_qp(struct ib_qp *ibqp)
513 {
514 	struct rxe_qp *qp = to_rqp(ibqp);
515 
516 	rxe_qp_destroy(qp);
517 	rxe_drop_index(qp);
518 	rxe_drop_ref(qp);
519 	return 0;
520 }
521 
522 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
523 			    unsigned int mask, unsigned int length)
524 {
525 	int num_sge = ibwr->num_sge;
526 	struct rxe_sq *sq = &qp->sq;
527 
528 	if (unlikely(num_sge > sq->max_sge))
529 		goto err1;
530 
531 	if (unlikely(mask & WR_ATOMIC_MASK)) {
532 		if (length < 8)
533 			goto err1;
534 
535 		if (atomic_wr(ibwr)->remote_addr & 0x7)
536 			goto err1;
537 	}
538 
539 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
540 		     (length > sq->max_inline)))
541 		goto err1;
542 
543 	return 0;
544 
545 err1:
546 	return -EINVAL;
547 }
548 
549 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
550 			 const struct ib_send_wr *ibwr)
551 {
552 	wr->wr_id = ibwr->wr_id;
553 	wr->num_sge = ibwr->num_sge;
554 	wr->opcode = ibwr->opcode;
555 	wr->send_flags = ibwr->send_flags;
556 
557 	if (qp_type(qp) == IB_QPT_UD ||
558 	    qp_type(qp) == IB_QPT_SMI ||
559 	    qp_type(qp) == IB_QPT_GSI) {
560 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
561 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
562 		if (qp_type(qp) == IB_QPT_GSI)
563 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
564 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
565 			wr->ex.imm_data = ibwr->ex.imm_data;
566 	} else {
567 		switch (wr->opcode) {
568 		case IB_WR_RDMA_WRITE_WITH_IMM:
569 			wr->ex.imm_data = ibwr->ex.imm_data;
570 			/* fall through */
571 		case IB_WR_RDMA_READ:
572 		case IB_WR_RDMA_WRITE:
573 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
574 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
575 			break;
576 		case IB_WR_SEND_WITH_IMM:
577 			wr->ex.imm_data = ibwr->ex.imm_data;
578 			break;
579 		case IB_WR_SEND_WITH_INV:
580 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
581 			break;
582 		case IB_WR_ATOMIC_CMP_AND_SWP:
583 		case IB_WR_ATOMIC_FETCH_AND_ADD:
584 			wr->wr.atomic.remote_addr =
585 				atomic_wr(ibwr)->remote_addr;
586 			wr->wr.atomic.compare_add =
587 				atomic_wr(ibwr)->compare_add;
588 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
589 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
590 			break;
591 		case IB_WR_LOCAL_INV:
592 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
593 		break;
594 		case IB_WR_REG_MR:
595 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
596 			wr->wr.reg.key = reg_wr(ibwr)->key;
597 			wr->wr.reg.access = reg_wr(ibwr)->access;
598 		break;
599 		default:
600 			break;
601 		}
602 	}
603 }
604 
605 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
606 			 unsigned int mask, unsigned int length,
607 			 struct rxe_send_wqe *wqe)
608 {
609 	int num_sge = ibwr->num_sge;
610 	struct ib_sge *sge;
611 	int i;
612 	u8 *p;
613 
614 	init_send_wr(qp, &wqe->wr, ibwr);
615 
616 	if (qp_type(qp) == IB_QPT_UD ||
617 	    qp_type(qp) == IB_QPT_SMI ||
618 	    qp_type(qp) == IB_QPT_GSI)
619 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
620 
621 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
622 		p = wqe->dma.inline_data;
623 
624 		sge = ibwr->sg_list;
625 		for (i = 0; i < num_sge; i++, sge++) {
626 			memcpy(p, (void *)(uintptr_t)sge->addr,
627 					sge->length);
628 
629 			p += sge->length;
630 		}
631 	} else if (mask & WR_REG_MASK) {
632 		wqe->mask = mask;
633 		wqe->state = wqe_state_posted;
634 		return 0;
635 	} else
636 		memcpy(wqe->dma.sge, ibwr->sg_list,
637 		       num_sge * sizeof(struct ib_sge));
638 
639 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
640 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
641 	wqe->mask		= mask;
642 	wqe->dma.length		= length;
643 	wqe->dma.resid		= length;
644 	wqe->dma.num_sge	= num_sge;
645 	wqe->dma.cur_sge	= 0;
646 	wqe->dma.sge_offset	= 0;
647 	wqe->state		= wqe_state_posted;
648 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
649 
650 	return 0;
651 }
652 
653 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
654 			 unsigned int mask, u32 length)
655 {
656 	int err;
657 	struct rxe_sq *sq = &qp->sq;
658 	struct rxe_send_wqe *send_wqe;
659 	unsigned long flags;
660 
661 	err = validate_send_wr(qp, ibwr, mask, length);
662 	if (err)
663 		return err;
664 
665 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
666 
667 	if (unlikely(queue_full(sq->queue))) {
668 		err = -ENOMEM;
669 		goto err1;
670 	}
671 
672 	send_wqe = producer_addr(sq->queue);
673 
674 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
675 	if (unlikely(err))
676 		goto err1;
677 
678 	/*
679 	 * make sure all changes to the work queue are
680 	 * written before we update the producer pointer
681 	 */
682 	smp_wmb();
683 
684 	advance_producer(sq->queue);
685 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
686 
687 	return 0;
688 
689 err1:
690 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
691 	return err;
692 }
693 
694 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
695 				const struct ib_send_wr **bad_wr)
696 {
697 	int err = 0;
698 	unsigned int mask;
699 	unsigned int length = 0;
700 	int i;
701 
702 	while (wr) {
703 		mask = wr_opcode_mask(wr->opcode, qp);
704 		if (unlikely(!mask)) {
705 			err = -EINVAL;
706 			*bad_wr = wr;
707 			break;
708 		}
709 
710 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
711 			     !(mask & WR_INLINE_MASK))) {
712 			err = -EINVAL;
713 			*bad_wr = wr;
714 			break;
715 		}
716 
717 		length = 0;
718 		for (i = 0; i < wr->num_sge; i++)
719 			length += wr->sg_list[i].length;
720 
721 		err = post_one_send(qp, wr, mask, length);
722 
723 		if (err) {
724 			*bad_wr = wr;
725 			break;
726 		}
727 		wr = wr->next;
728 	}
729 
730 	rxe_run_task(&qp->req.task, 1);
731 	if (unlikely(qp->req.state == QP_STATE_ERROR))
732 		rxe_run_task(&qp->comp.task, 1);
733 
734 	return err;
735 }
736 
737 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
738 			 const struct ib_send_wr **bad_wr)
739 {
740 	struct rxe_qp *qp = to_rqp(ibqp);
741 
742 	if (unlikely(!qp->valid)) {
743 		*bad_wr = wr;
744 		return -EINVAL;
745 	}
746 
747 	if (unlikely(qp->req.state < QP_STATE_READY)) {
748 		*bad_wr = wr;
749 		return -EINVAL;
750 	}
751 
752 	if (qp->is_user) {
753 		/* Utilize process context to do protocol processing */
754 		rxe_run_task(&qp->req.task, 0);
755 		return 0;
756 	} else
757 		return rxe_post_send_kernel(qp, wr, bad_wr);
758 }
759 
760 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
761 			 const struct ib_recv_wr **bad_wr)
762 {
763 	int err = 0;
764 	struct rxe_qp *qp = to_rqp(ibqp);
765 	struct rxe_rq *rq = &qp->rq;
766 	unsigned long flags;
767 
768 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
769 		*bad_wr = wr;
770 		err = -EINVAL;
771 		goto err1;
772 	}
773 
774 	if (unlikely(qp->srq)) {
775 		*bad_wr = wr;
776 		err = -EINVAL;
777 		goto err1;
778 	}
779 
780 	spin_lock_irqsave(&rq->producer_lock, flags);
781 
782 	while (wr) {
783 		err = post_one_recv(rq, wr);
784 		if (unlikely(err)) {
785 			*bad_wr = wr;
786 			break;
787 		}
788 		wr = wr->next;
789 	}
790 
791 	spin_unlock_irqrestore(&rq->producer_lock, flags);
792 
793 	if (qp->resp.state == QP_STATE_ERROR)
794 		rxe_run_task(&qp->resp.task, 1);
795 
796 err1:
797 	return err;
798 }
799 
800 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
801 				   const struct ib_cq_init_attr *attr,
802 				   struct ib_ucontext *context,
803 				   struct ib_udata *udata)
804 {
805 	int err;
806 	struct rxe_dev *rxe = to_rdev(dev);
807 	struct rxe_cq *cq;
808 	struct rxe_create_cq_resp __user *uresp = NULL;
809 
810 	if (udata) {
811 		if (udata->outlen < sizeof(*uresp))
812 			return ERR_PTR(-EINVAL);
813 		uresp = udata->outbuf;
814 	}
815 
816 	if (attr->flags)
817 		return ERR_PTR(-EINVAL);
818 
819 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
820 	if (err)
821 		goto err1;
822 
823 	cq = rxe_alloc(&rxe->cq_pool);
824 	if (!cq) {
825 		err = -ENOMEM;
826 		goto err1;
827 	}
828 
829 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
830 			       context, uresp);
831 	if (err)
832 		goto err2;
833 
834 	return &cq->ibcq;
835 
836 err2:
837 	rxe_drop_ref(cq);
838 err1:
839 	return ERR_PTR(err);
840 }
841 
842 static int rxe_destroy_cq(struct ib_cq *ibcq)
843 {
844 	struct rxe_cq *cq = to_rcq(ibcq);
845 
846 	rxe_cq_disable(cq);
847 
848 	rxe_drop_ref(cq);
849 	return 0;
850 }
851 
852 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
853 {
854 	int err;
855 	struct rxe_cq *cq = to_rcq(ibcq);
856 	struct rxe_dev *rxe = to_rdev(ibcq->device);
857 	struct rxe_resize_cq_resp __user *uresp = NULL;
858 
859 	if (udata) {
860 		if (udata->outlen < sizeof(*uresp))
861 			return -EINVAL;
862 		uresp = udata->outbuf;
863 	}
864 
865 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
866 	if (err)
867 		goto err1;
868 
869 	err = rxe_cq_resize_queue(cq, cqe, uresp);
870 	if (err)
871 		goto err1;
872 
873 	return 0;
874 
875 err1:
876 	return err;
877 }
878 
879 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
880 {
881 	int i;
882 	struct rxe_cq *cq = to_rcq(ibcq);
883 	struct rxe_cqe *cqe;
884 	unsigned long flags;
885 
886 	spin_lock_irqsave(&cq->cq_lock, flags);
887 	for (i = 0; i < num_entries; i++) {
888 		cqe = queue_head(cq->queue);
889 		if (!cqe)
890 			break;
891 
892 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
893 		advance_consumer(cq->queue);
894 	}
895 	spin_unlock_irqrestore(&cq->cq_lock, flags);
896 
897 	return i;
898 }
899 
900 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
901 {
902 	struct rxe_cq *cq = to_rcq(ibcq);
903 	int count = queue_count(cq->queue);
904 
905 	return (count > wc_cnt) ? wc_cnt : count;
906 }
907 
908 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
909 {
910 	struct rxe_cq *cq = to_rcq(ibcq);
911 	unsigned long irq_flags;
912 	int ret = 0;
913 
914 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
915 	if (cq->notify != IB_CQ_NEXT_COMP)
916 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
917 
918 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
919 		ret = 1;
920 
921 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
922 
923 	return ret;
924 }
925 
926 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
927 {
928 	struct rxe_dev *rxe = to_rdev(ibpd->device);
929 	struct rxe_pd *pd = to_rpd(ibpd);
930 	struct rxe_mem *mr;
931 	int err;
932 
933 	mr = rxe_alloc(&rxe->mr_pool);
934 	if (!mr) {
935 		err = -ENOMEM;
936 		goto err1;
937 	}
938 
939 	rxe_add_index(mr);
940 
941 	rxe_add_ref(pd);
942 
943 	err = rxe_mem_init_dma(pd, access, mr);
944 	if (err)
945 		goto err2;
946 
947 	return &mr->ibmr;
948 
949 err2:
950 	rxe_drop_ref(pd);
951 	rxe_drop_index(mr);
952 	rxe_drop_ref(mr);
953 err1:
954 	return ERR_PTR(err);
955 }
956 
957 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
958 				     u64 start,
959 				     u64 length,
960 				     u64 iova,
961 				     int access, struct ib_udata *udata)
962 {
963 	int err;
964 	struct rxe_dev *rxe = to_rdev(ibpd->device);
965 	struct rxe_pd *pd = to_rpd(ibpd);
966 	struct rxe_mem *mr;
967 
968 	mr = rxe_alloc(&rxe->mr_pool);
969 	if (!mr) {
970 		err = -ENOMEM;
971 		goto err2;
972 	}
973 
974 	rxe_add_index(mr);
975 
976 	rxe_add_ref(pd);
977 
978 	err = rxe_mem_init_user(pd, start, length, iova,
979 				access, udata, mr);
980 	if (err)
981 		goto err3;
982 
983 	return &mr->ibmr;
984 
985 err3:
986 	rxe_drop_ref(pd);
987 	rxe_drop_index(mr);
988 	rxe_drop_ref(mr);
989 err2:
990 	return ERR_PTR(err);
991 }
992 
993 static int rxe_dereg_mr(struct ib_mr *ibmr)
994 {
995 	struct rxe_mem *mr = to_rmr(ibmr);
996 
997 	mr->state = RXE_MEM_STATE_ZOMBIE;
998 	rxe_drop_ref(mr->pd);
999 	rxe_drop_index(mr);
1000 	rxe_drop_ref(mr);
1001 	return 0;
1002 }
1003 
1004 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1005 				  enum ib_mr_type mr_type,
1006 				  u32 max_num_sg)
1007 {
1008 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1009 	struct rxe_pd *pd = to_rpd(ibpd);
1010 	struct rxe_mem *mr;
1011 	int err;
1012 
1013 	if (mr_type != IB_MR_TYPE_MEM_REG)
1014 		return ERR_PTR(-EINVAL);
1015 
1016 	mr = rxe_alloc(&rxe->mr_pool);
1017 	if (!mr) {
1018 		err = -ENOMEM;
1019 		goto err1;
1020 	}
1021 
1022 	rxe_add_index(mr);
1023 
1024 	rxe_add_ref(pd);
1025 
1026 	err = rxe_mem_init_fast(pd, max_num_sg, mr);
1027 	if (err)
1028 		goto err2;
1029 
1030 	return &mr->ibmr;
1031 
1032 err2:
1033 	rxe_drop_ref(pd);
1034 	rxe_drop_index(mr);
1035 	rxe_drop_ref(mr);
1036 err1:
1037 	return ERR_PTR(err);
1038 }
1039 
1040 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1041 {
1042 	struct rxe_mem *mr = to_rmr(ibmr);
1043 	struct rxe_map *map;
1044 	struct rxe_phys_buf *buf;
1045 
1046 	if (unlikely(mr->nbuf == mr->num_buf))
1047 		return -ENOMEM;
1048 
1049 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1050 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1051 
1052 	buf->addr = addr;
1053 	buf->size = ibmr->page_size;
1054 	mr->nbuf++;
1055 
1056 	return 0;
1057 }
1058 
1059 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1060 			 int sg_nents, unsigned int *sg_offset)
1061 {
1062 	struct rxe_mem *mr = to_rmr(ibmr);
1063 	int n;
1064 
1065 	mr->nbuf = 0;
1066 
1067 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1068 
1069 	mr->va = ibmr->iova;
1070 	mr->iova = ibmr->iova;
1071 	mr->length = ibmr->length;
1072 	mr->page_shift = ilog2(ibmr->page_size);
1073 	mr->page_mask = ibmr->page_size - 1;
1074 	mr->offset = mr->iova & mr->page_mask;
1075 
1076 	return n;
1077 }
1078 
1079 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1080 {
1081 	int err;
1082 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1083 	struct rxe_qp *qp = to_rqp(ibqp);
1084 	struct rxe_mc_grp *grp;
1085 
1086 	/* takes a ref on grp if successful */
1087 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1088 	if (err)
1089 		return err;
1090 
1091 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1092 
1093 	rxe_drop_ref(grp);
1094 	return err;
1095 }
1096 
1097 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1098 {
1099 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1100 	struct rxe_qp *qp = to_rqp(ibqp);
1101 
1102 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1103 }
1104 
1105 static ssize_t parent_show(struct device *device,
1106 			   struct device_attribute *attr, char *buf)
1107 {
1108 	struct rxe_dev *rxe =
1109 		rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1110 
1111 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1112 }
1113 
1114 static DEVICE_ATTR_RO(parent);
1115 
1116 static struct attribute *rxe_dev_attributes[] = {
1117 	&dev_attr_parent.attr,
1118 	NULL
1119 };
1120 
1121 static const struct attribute_group rxe_attr_group = {
1122 	.attrs = rxe_dev_attributes,
1123 };
1124 
1125 static int rxe_enable_driver(struct ib_device *ib_dev)
1126 {
1127 	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1128 
1129 	rxe_set_port_state(rxe);
1130 	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1131 	return 0;
1132 }
1133 
1134 static const struct ib_device_ops rxe_dev_ops = {
1135 	.alloc_hw_stats = rxe_ib_alloc_hw_stats,
1136 	.alloc_mr = rxe_alloc_mr,
1137 	.alloc_pd = rxe_alloc_pd,
1138 	.alloc_ucontext = rxe_alloc_ucontext,
1139 	.attach_mcast = rxe_attach_mcast,
1140 	.create_ah = rxe_create_ah,
1141 	.create_cq = rxe_create_cq,
1142 	.create_qp = rxe_create_qp,
1143 	.create_srq = rxe_create_srq,
1144 	.dealloc_driver = rxe_dealloc,
1145 	.dealloc_pd = rxe_dealloc_pd,
1146 	.dealloc_ucontext = rxe_dealloc_ucontext,
1147 	.dereg_mr = rxe_dereg_mr,
1148 	.destroy_ah = rxe_destroy_ah,
1149 	.destroy_cq = rxe_destroy_cq,
1150 	.destroy_qp = rxe_destroy_qp,
1151 	.destroy_srq = rxe_destroy_srq,
1152 	.detach_mcast = rxe_detach_mcast,
1153 	.enable_driver = rxe_enable_driver,
1154 	.get_dma_mr = rxe_get_dma_mr,
1155 	.get_hw_stats = rxe_ib_get_hw_stats,
1156 	.get_link_layer = rxe_get_link_layer,
1157 	.get_port_immutable = rxe_port_immutable,
1158 	.map_mr_sg = rxe_map_mr_sg,
1159 	.mmap = rxe_mmap,
1160 	.modify_ah = rxe_modify_ah,
1161 	.modify_device = rxe_modify_device,
1162 	.modify_port = rxe_modify_port,
1163 	.modify_qp = rxe_modify_qp,
1164 	.modify_srq = rxe_modify_srq,
1165 	.peek_cq = rxe_peek_cq,
1166 	.poll_cq = rxe_poll_cq,
1167 	.post_recv = rxe_post_recv,
1168 	.post_send = rxe_post_send,
1169 	.post_srq_recv = rxe_post_srq_recv,
1170 	.query_ah = rxe_query_ah,
1171 	.query_device = rxe_query_device,
1172 	.query_pkey = rxe_query_pkey,
1173 	.query_port = rxe_query_port,
1174 	.query_qp = rxe_query_qp,
1175 	.query_srq = rxe_query_srq,
1176 	.reg_user_mr = rxe_reg_user_mr,
1177 	.req_notify_cq = rxe_req_notify_cq,
1178 	.resize_cq = rxe_resize_cq,
1179 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1180 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1181 };
1182 
1183 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1184 {
1185 	int err;
1186 	struct ib_device *dev = &rxe->ib_dev;
1187 	struct crypto_shash *tfm;
1188 
1189 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1190 
1191 	dev->owner = THIS_MODULE;
1192 	dev->node_type = RDMA_NODE_IB_CA;
1193 	dev->phys_port_cnt = 1;
1194 	dev->num_comp_vectors = num_possible_cpus();
1195 	dev->dev.parent = rxe_dma_device(rxe);
1196 	dev->local_dma_lkey = 0;
1197 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1198 			    rxe->ndev->dev_addr);
1199 	dev->dev.dma_ops = &dma_virt_ops;
1200 	dma_coerce_mask_and_coherent(&dev->dev,
1201 				     dma_get_required_mask(&dev->dev));
1202 
1203 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1204 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1205 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1206 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1207 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1208 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1209 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1210 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1211 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1212 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1213 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1214 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1215 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1216 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1217 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1218 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1219 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1220 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1221 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1222 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1223 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1224 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1225 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1226 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1227 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1228 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1229 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1230 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1231 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1232 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1233 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1234 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1235 	    ;
1236 
1237 	ib_set_device_ops(dev, &rxe_dev_ops);
1238 	err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1239 	if (err)
1240 		return err;
1241 
1242 	tfm = crypto_alloc_shash("crc32", 0, 0);
1243 	if (IS_ERR(tfm)) {
1244 		pr_err("failed to allocate crc algorithm err:%ld\n",
1245 		       PTR_ERR(tfm));
1246 		return PTR_ERR(tfm);
1247 	}
1248 	rxe->tfm = tfm;
1249 
1250 	rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1251 	dev->driver_id = RDMA_DRIVER_RXE;
1252 	err = ib_register_device(dev, ibdev_name);
1253 	if (err)
1254 		pr_warn("%s failed with error %d\n", __func__, err);
1255 
1256 	/*
1257 	 * Note that rxe may be invalid at this point if another thread
1258 	 * unregistered it.
1259 	 */
1260 	return err;
1261 }
1262