1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 #include "rxe_hw_counters.h"
40 
41 static int rxe_query_device(struct ib_device *dev,
42 			    struct ib_device_attr *attr,
43 			    struct ib_udata *uhw)
44 {
45 	struct rxe_dev *rxe = to_rdev(dev);
46 
47 	if (uhw->inlen || uhw->outlen)
48 		return -EINVAL;
49 
50 	*attr = rxe->attr;
51 	return 0;
52 }
53 
54 static int rxe_query_port(struct ib_device *dev,
55 			  u8 port_num, struct ib_port_attr *attr)
56 {
57 	struct rxe_dev *rxe = to_rdev(dev);
58 	struct rxe_port *port;
59 	int rc = -EINVAL;
60 
61 	if (unlikely(port_num != 1)) {
62 		pr_warn("invalid port_number %d\n", port_num);
63 		goto out;
64 	}
65 
66 	port = &rxe->port;
67 
68 	/* *attr being zeroed by the caller, avoid zeroing it here */
69 	*attr = port->attr;
70 
71 	mutex_lock(&rxe->usdev_lock);
72 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 			      &attr->active_width);
74 	mutex_unlock(&rxe->usdev_lock);
75 
76 out:
77 	return rc;
78 }
79 
80 static struct net_device *rxe_get_netdev(struct ib_device *device,
81 					 u8 port_num)
82 {
83 	struct rxe_dev *rxe = to_rdev(device);
84 
85 	if (rxe->ndev) {
86 		dev_hold(rxe->ndev);
87 		return rxe->ndev;
88 	}
89 
90 	return NULL;
91 }
92 
93 static int rxe_query_pkey(struct ib_device *device,
94 			  u8 port_num, u16 index, u16 *pkey)
95 {
96 	struct rxe_dev *rxe = to_rdev(device);
97 	struct rxe_port *port;
98 
99 	if (unlikely(port_num != 1)) {
100 		dev_warn(device->dev.parent, "invalid port_num = %d\n",
101 			 port_num);
102 		goto err1;
103 	}
104 
105 	port = &rxe->port;
106 
107 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
108 		dev_warn(device->dev.parent, "invalid index = %d\n",
109 			 index);
110 		goto err1;
111 	}
112 
113 	*pkey = port->pkey_tbl[index];
114 	return 0;
115 
116 err1:
117 	return -EINVAL;
118 }
119 
120 static int rxe_modify_device(struct ib_device *dev,
121 			     int mask, struct ib_device_modify *attr)
122 {
123 	struct rxe_dev *rxe = to_rdev(dev);
124 
125 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
126 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
127 
128 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
129 		memcpy(rxe->ib_dev.node_desc,
130 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
131 	}
132 
133 	return 0;
134 }
135 
136 static int rxe_modify_port(struct ib_device *dev,
137 			   u8 port_num, int mask, struct ib_port_modify *attr)
138 {
139 	struct rxe_dev *rxe = to_rdev(dev);
140 	struct rxe_port *port;
141 
142 	if (unlikely(port_num != 1)) {
143 		pr_warn("invalid port_num = %d\n", port_num);
144 		goto err1;
145 	}
146 
147 	port = &rxe->port;
148 
149 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
150 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
151 
152 	if (mask & IB_PORT_RESET_QKEY_CNTR)
153 		port->attr.qkey_viol_cntr = 0;
154 
155 	return 0;
156 
157 err1:
158 	return -EINVAL;
159 }
160 
161 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
162 					       u8 port_num)
163 {
164 	struct rxe_dev *rxe = to_rdev(dev);
165 
166 	return rxe_link_layer(rxe, port_num);
167 }
168 
169 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
170 					      struct ib_udata *udata)
171 {
172 	struct rxe_dev *rxe = to_rdev(dev);
173 	struct rxe_ucontext *uc;
174 
175 	uc = rxe_alloc(&rxe->uc_pool);
176 	return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
177 }
178 
179 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
180 {
181 	struct rxe_ucontext *uc = to_ruc(ibuc);
182 
183 	rxe_drop_ref(uc);
184 	return 0;
185 }
186 
187 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
188 			      struct ib_port_immutable *immutable)
189 {
190 	int err;
191 	struct ib_port_attr attr;
192 
193 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
194 
195 	err = ib_query_port(dev, port_num, &attr);
196 	if (err)
197 		return err;
198 
199 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
200 	immutable->gid_tbl_len = attr.gid_tbl_len;
201 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
202 
203 	return 0;
204 }
205 
206 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
207 				  struct ib_ucontext *context,
208 				  struct ib_udata *udata)
209 {
210 	struct rxe_dev *rxe = to_rdev(dev);
211 	struct rxe_pd *pd;
212 
213 	pd = rxe_alloc(&rxe->pd_pool);
214 	return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
215 }
216 
217 static int rxe_dealloc_pd(struct ib_pd *ibpd)
218 {
219 	struct rxe_pd *pd = to_rpd(ibpd);
220 
221 	rxe_drop_ref(pd);
222 	return 0;
223 }
224 
225 static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
226 		       struct rxe_av *av)
227 {
228 	int err;
229 	union ib_gid sgid;
230 	struct ib_gid_attr sgid_attr;
231 
232 	err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
233 				rdma_ah_read_grh(attr)->sgid_index, &sgid,
234 				&sgid_attr);
235 	if (err) {
236 		pr_err("Failed to query sgid. err = %d\n", err);
237 		return err;
238 	}
239 
240 	rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
241 	rxe_av_fill_ip_info(av, attr, &sgid_attr, &sgid);
242 	dev_put(sgid_attr.ndev);
243 	return 0;
244 }
245 
246 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
247 				   struct rdma_ah_attr *attr,
248 				   struct ib_udata *udata)
249 
250 {
251 	int err;
252 	struct rxe_dev *rxe = to_rdev(ibpd->device);
253 	struct rxe_pd *pd = to_rpd(ibpd);
254 	struct rxe_ah *ah;
255 
256 	err = rxe_av_chk_attr(rxe, attr);
257 	if (err)
258 		goto err1;
259 
260 	ah = rxe_alloc(&rxe->ah_pool);
261 	if (!ah) {
262 		err = -ENOMEM;
263 		goto err1;
264 	}
265 
266 	rxe_add_ref(pd);
267 	ah->pd = pd;
268 
269 	err = rxe_init_av(rxe, attr, &ah->av);
270 	if (err)
271 		goto err2;
272 
273 	return &ah->ibah;
274 
275 err2:
276 	rxe_drop_ref(pd);
277 	rxe_drop_ref(ah);
278 err1:
279 	return ERR_PTR(err);
280 }
281 
282 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
283 {
284 	int err;
285 	struct rxe_dev *rxe = to_rdev(ibah->device);
286 	struct rxe_ah *ah = to_rah(ibah);
287 
288 	err = rxe_av_chk_attr(rxe, attr);
289 	if (err)
290 		return err;
291 
292 	err = rxe_init_av(rxe, attr, &ah->av);
293 	if (err)
294 		return err;
295 
296 	return 0;
297 }
298 
299 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
300 {
301 	struct rxe_ah *ah = to_rah(ibah);
302 
303 	memset(attr, 0, sizeof(*attr));
304 	attr->type = ibah->type;
305 	rxe_av_to_attr(&ah->av, attr);
306 	return 0;
307 }
308 
309 static int rxe_destroy_ah(struct ib_ah *ibah)
310 {
311 	struct rxe_ah *ah = to_rah(ibah);
312 
313 	rxe_drop_ref(ah->pd);
314 	rxe_drop_ref(ah);
315 	return 0;
316 }
317 
318 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
319 {
320 	int err;
321 	int i;
322 	u32 length;
323 	struct rxe_recv_wqe *recv_wqe;
324 	int num_sge = ibwr->num_sge;
325 
326 	if (unlikely(queue_full(rq->queue))) {
327 		err = -ENOMEM;
328 		goto err1;
329 	}
330 
331 	if (unlikely(num_sge > rq->max_sge)) {
332 		err = -EINVAL;
333 		goto err1;
334 	}
335 
336 	length = 0;
337 	for (i = 0; i < num_sge; i++)
338 		length += ibwr->sg_list[i].length;
339 
340 	recv_wqe = producer_addr(rq->queue);
341 	recv_wqe->wr_id = ibwr->wr_id;
342 	recv_wqe->num_sge = num_sge;
343 
344 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
345 	       num_sge * sizeof(struct ib_sge));
346 
347 	recv_wqe->dma.length		= length;
348 	recv_wqe->dma.resid		= length;
349 	recv_wqe->dma.num_sge		= num_sge;
350 	recv_wqe->dma.cur_sge		= 0;
351 	recv_wqe->dma.sge_offset	= 0;
352 
353 	/* make sure all changes to the work queue are written before we
354 	 * update the producer pointer
355 	 */
356 	smp_wmb();
357 
358 	advance_producer(rq->queue);
359 	return 0;
360 
361 err1:
362 	return err;
363 }
364 
365 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
366 				     struct ib_srq_init_attr *init,
367 				     struct ib_udata *udata)
368 {
369 	int err;
370 	struct rxe_dev *rxe = to_rdev(ibpd->device);
371 	struct rxe_pd *pd = to_rpd(ibpd);
372 	struct rxe_srq *srq;
373 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
374 	struct rxe_create_srq_resp __user *uresp = NULL;
375 
376 	if (udata) {
377 		if (udata->outlen < sizeof(*uresp))
378 			return ERR_PTR(-EINVAL);
379 		uresp = udata->outbuf;
380 	}
381 
382 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
383 	if (err)
384 		goto err1;
385 
386 	srq = rxe_alloc(&rxe->srq_pool);
387 	if (!srq) {
388 		err = -ENOMEM;
389 		goto err1;
390 	}
391 
392 	rxe_add_index(srq);
393 	rxe_add_ref(pd);
394 	srq->pd = pd;
395 
396 	err = rxe_srq_from_init(rxe, srq, init, context, uresp);
397 	if (err)
398 		goto err2;
399 
400 	return &srq->ibsrq;
401 
402 err2:
403 	rxe_drop_ref(pd);
404 	rxe_drop_index(srq);
405 	rxe_drop_ref(srq);
406 err1:
407 	return ERR_PTR(err);
408 }
409 
410 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
411 			  enum ib_srq_attr_mask mask,
412 			  struct ib_udata *udata)
413 {
414 	int err;
415 	struct rxe_srq *srq = to_rsrq(ibsrq);
416 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
417 	struct rxe_modify_srq_cmd ucmd = {};
418 
419 	if (udata) {
420 		if (udata->inlen < sizeof(ucmd))
421 			return -EINVAL;
422 
423 		err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
424 		if (err)
425 			return err;
426 	}
427 
428 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
429 	if (err)
430 		goto err1;
431 
432 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
433 	if (err)
434 		goto err1;
435 
436 	return 0;
437 
438 err1:
439 	return err;
440 }
441 
442 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
443 {
444 	struct rxe_srq *srq = to_rsrq(ibsrq);
445 
446 	if (srq->error)
447 		return -EINVAL;
448 
449 	attr->max_wr = srq->rq.queue->buf->index_mask;
450 	attr->max_sge = srq->rq.max_sge;
451 	attr->srq_limit = srq->limit;
452 	return 0;
453 }
454 
455 static int rxe_destroy_srq(struct ib_srq *ibsrq)
456 {
457 	struct rxe_srq *srq = to_rsrq(ibsrq);
458 
459 	if (srq->rq.queue)
460 		rxe_queue_cleanup(srq->rq.queue);
461 
462 	rxe_drop_ref(srq->pd);
463 	rxe_drop_index(srq);
464 	rxe_drop_ref(srq);
465 
466 	return 0;
467 }
468 
469 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
470 			     struct ib_recv_wr **bad_wr)
471 {
472 	int err = 0;
473 	unsigned long flags;
474 	struct rxe_srq *srq = to_rsrq(ibsrq);
475 
476 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
477 
478 	while (wr) {
479 		err = post_one_recv(&srq->rq, wr);
480 		if (unlikely(err))
481 			break;
482 		wr = wr->next;
483 	}
484 
485 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
486 
487 	if (err)
488 		*bad_wr = wr;
489 
490 	return err;
491 }
492 
493 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
494 				   struct ib_qp_init_attr *init,
495 				   struct ib_udata *udata)
496 {
497 	int err;
498 	struct rxe_dev *rxe = to_rdev(ibpd->device);
499 	struct rxe_pd *pd = to_rpd(ibpd);
500 	struct rxe_qp *qp;
501 	struct rxe_create_qp_resp __user *uresp = NULL;
502 
503 	if (udata) {
504 		if (udata->outlen < sizeof(*uresp))
505 			return ERR_PTR(-EINVAL);
506 		uresp = udata->outbuf;
507 	}
508 
509 	err = rxe_qp_chk_init(rxe, init);
510 	if (err)
511 		goto err1;
512 
513 	qp = rxe_alloc(&rxe->qp_pool);
514 	if (!qp) {
515 		err = -ENOMEM;
516 		goto err1;
517 	}
518 
519 	if (udata) {
520 		if (udata->inlen) {
521 			err = -EINVAL;
522 			goto err2;
523 		}
524 		qp->is_user = 1;
525 	}
526 
527 	rxe_add_index(qp);
528 
529 	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
530 	if (err)
531 		goto err3;
532 
533 	return &qp->ibqp;
534 
535 err3:
536 	rxe_drop_index(qp);
537 err2:
538 	rxe_drop_ref(qp);
539 err1:
540 	return ERR_PTR(err);
541 }
542 
543 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
544 			 int mask, struct ib_udata *udata)
545 {
546 	int err;
547 	struct rxe_dev *rxe = to_rdev(ibqp->device);
548 	struct rxe_qp *qp = to_rqp(ibqp);
549 
550 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
551 	if (err)
552 		goto err1;
553 
554 	err = rxe_qp_from_attr(qp, attr, mask, udata);
555 	if (err)
556 		goto err1;
557 
558 	return 0;
559 
560 err1:
561 	return err;
562 }
563 
564 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
565 			int mask, struct ib_qp_init_attr *init)
566 {
567 	struct rxe_qp *qp = to_rqp(ibqp);
568 
569 	rxe_qp_to_init(qp, init);
570 	rxe_qp_to_attr(qp, attr, mask);
571 
572 	return 0;
573 }
574 
575 static int rxe_destroy_qp(struct ib_qp *ibqp)
576 {
577 	struct rxe_qp *qp = to_rqp(ibqp);
578 
579 	rxe_qp_destroy(qp);
580 	rxe_drop_index(qp);
581 	rxe_drop_ref(qp);
582 	return 0;
583 }
584 
585 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
586 			    unsigned int mask, unsigned int length)
587 {
588 	int num_sge = ibwr->num_sge;
589 	struct rxe_sq *sq = &qp->sq;
590 
591 	if (unlikely(num_sge > sq->max_sge))
592 		goto err1;
593 
594 	if (unlikely(mask & WR_ATOMIC_MASK)) {
595 		if (length < 8)
596 			goto err1;
597 
598 		if (atomic_wr(ibwr)->remote_addr & 0x7)
599 			goto err1;
600 	}
601 
602 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
603 		     (length > sq->max_inline)))
604 		goto err1;
605 
606 	return 0;
607 
608 err1:
609 	return -EINVAL;
610 }
611 
612 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
613 			 struct ib_send_wr *ibwr)
614 {
615 	wr->wr_id = ibwr->wr_id;
616 	wr->num_sge = ibwr->num_sge;
617 	wr->opcode = ibwr->opcode;
618 	wr->send_flags = ibwr->send_flags;
619 
620 	if (qp_type(qp) == IB_QPT_UD ||
621 	    qp_type(qp) == IB_QPT_SMI ||
622 	    qp_type(qp) == IB_QPT_GSI) {
623 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
624 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
625 		if (qp_type(qp) == IB_QPT_GSI)
626 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
627 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
628 			wr->ex.imm_data = ibwr->ex.imm_data;
629 	} else {
630 		switch (wr->opcode) {
631 		case IB_WR_RDMA_WRITE_WITH_IMM:
632 			wr->ex.imm_data = ibwr->ex.imm_data;
633 			/* fall through */
634 		case IB_WR_RDMA_READ:
635 		case IB_WR_RDMA_WRITE:
636 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
637 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
638 			break;
639 		case IB_WR_SEND_WITH_IMM:
640 			wr->ex.imm_data = ibwr->ex.imm_data;
641 			break;
642 		case IB_WR_SEND_WITH_INV:
643 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
644 			break;
645 		case IB_WR_ATOMIC_CMP_AND_SWP:
646 		case IB_WR_ATOMIC_FETCH_AND_ADD:
647 			wr->wr.atomic.remote_addr =
648 				atomic_wr(ibwr)->remote_addr;
649 			wr->wr.atomic.compare_add =
650 				atomic_wr(ibwr)->compare_add;
651 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
652 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
653 			break;
654 		case IB_WR_LOCAL_INV:
655 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
656 		break;
657 		case IB_WR_REG_MR:
658 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
659 			wr->wr.reg.key = reg_wr(ibwr)->key;
660 			wr->wr.reg.access = reg_wr(ibwr)->access;
661 		break;
662 		default:
663 			break;
664 		}
665 	}
666 }
667 
668 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
669 			 unsigned int mask, unsigned int length,
670 			 struct rxe_send_wqe *wqe)
671 {
672 	int num_sge = ibwr->num_sge;
673 	struct ib_sge *sge;
674 	int i;
675 	u8 *p;
676 
677 	init_send_wr(qp, &wqe->wr, ibwr);
678 
679 	if (qp_type(qp) == IB_QPT_UD ||
680 	    qp_type(qp) == IB_QPT_SMI ||
681 	    qp_type(qp) == IB_QPT_GSI)
682 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
683 
684 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
685 		p = wqe->dma.inline_data;
686 
687 		sge = ibwr->sg_list;
688 		for (i = 0; i < num_sge; i++, sge++) {
689 			memcpy(p, (void *)(uintptr_t)sge->addr,
690 					sge->length);
691 
692 			p += sge->length;
693 		}
694 	} else if (mask & WR_REG_MASK) {
695 		wqe->mask = mask;
696 		wqe->state = wqe_state_posted;
697 		return 0;
698 	} else
699 		memcpy(wqe->dma.sge, ibwr->sg_list,
700 		       num_sge * sizeof(struct ib_sge));
701 
702 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
703 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
704 	wqe->mask		= mask;
705 	wqe->dma.length		= length;
706 	wqe->dma.resid		= length;
707 	wqe->dma.num_sge	= num_sge;
708 	wqe->dma.cur_sge	= 0;
709 	wqe->dma.sge_offset	= 0;
710 	wqe->state		= wqe_state_posted;
711 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
712 
713 	return 0;
714 }
715 
716 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
717 			 unsigned int mask, u32 length)
718 {
719 	int err;
720 	struct rxe_sq *sq = &qp->sq;
721 	struct rxe_send_wqe *send_wqe;
722 	unsigned long flags;
723 
724 	err = validate_send_wr(qp, ibwr, mask, length);
725 	if (err)
726 		return err;
727 
728 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
729 
730 	if (unlikely(queue_full(sq->queue))) {
731 		err = -ENOMEM;
732 		goto err1;
733 	}
734 
735 	send_wqe = producer_addr(sq->queue);
736 
737 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
738 	if (unlikely(err))
739 		goto err1;
740 
741 	/*
742 	 * make sure all changes to the work queue are
743 	 * written before we update the producer pointer
744 	 */
745 	smp_wmb();
746 
747 	advance_producer(sq->queue);
748 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
749 
750 	return 0;
751 
752 err1:
753 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
754 	return err;
755 }
756 
757 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
758 				struct ib_send_wr **bad_wr)
759 {
760 	int err = 0;
761 	unsigned int mask;
762 	unsigned int length = 0;
763 	int i;
764 	int must_sched;
765 
766 	while (wr) {
767 		mask = wr_opcode_mask(wr->opcode, qp);
768 		if (unlikely(!mask)) {
769 			err = -EINVAL;
770 			*bad_wr = wr;
771 			break;
772 		}
773 
774 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
775 			     !(mask & WR_INLINE_MASK))) {
776 			err = -EINVAL;
777 			*bad_wr = wr;
778 			break;
779 		}
780 
781 		length = 0;
782 		for (i = 0; i < wr->num_sge; i++)
783 			length += wr->sg_list[i].length;
784 
785 		err = post_one_send(qp, wr, mask, length);
786 
787 		if (err) {
788 			*bad_wr = wr;
789 			break;
790 		}
791 		wr = wr->next;
792 	}
793 
794 	/*
795 	 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
796 	 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
797 	 */
798 	must_sched = (qp_type(qp) == IB_QPT_GSI) ||
799 			(queue_count(qp->sq.queue) > 1);
800 
801 	rxe_run_task(&qp->req.task, must_sched);
802 	if (unlikely(qp->req.state == QP_STATE_ERROR))
803 		rxe_run_task(&qp->comp.task, 1);
804 
805 	return err;
806 }
807 
808 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
809 			 struct ib_send_wr **bad_wr)
810 {
811 	struct rxe_qp *qp = to_rqp(ibqp);
812 
813 	if (unlikely(!qp->valid)) {
814 		*bad_wr = wr;
815 		return -EINVAL;
816 	}
817 
818 	if (unlikely(qp->req.state < QP_STATE_READY)) {
819 		*bad_wr = wr;
820 		return -EINVAL;
821 	}
822 
823 	if (qp->is_user) {
824 		/* Utilize process context to do protocol processing */
825 		rxe_run_task(&qp->req.task, 0);
826 		return 0;
827 	} else
828 		return rxe_post_send_kernel(qp, wr, bad_wr);
829 }
830 
831 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
832 			 struct ib_recv_wr **bad_wr)
833 {
834 	int err = 0;
835 	struct rxe_qp *qp = to_rqp(ibqp);
836 	struct rxe_rq *rq = &qp->rq;
837 	unsigned long flags;
838 
839 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
840 		*bad_wr = wr;
841 		err = -EINVAL;
842 		goto err1;
843 	}
844 
845 	if (unlikely(qp->srq)) {
846 		*bad_wr = wr;
847 		err = -EINVAL;
848 		goto err1;
849 	}
850 
851 	spin_lock_irqsave(&rq->producer_lock, flags);
852 
853 	while (wr) {
854 		err = post_one_recv(rq, wr);
855 		if (unlikely(err)) {
856 			*bad_wr = wr;
857 			break;
858 		}
859 		wr = wr->next;
860 	}
861 
862 	spin_unlock_irqrestore(&rq->producer_lock, flags);
863 
864 	if (qp->resp.state == QP_STATE_ERROR)
865 		rxe_run_task(&qp->resp.task, 1);
866 
867 err1:
868 	return err;
869 }
870 
871 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
872 				   const struct ib_cq_init_attr *attr,
873 				   struct ib_ucontext *context,
874 				   struct ib_udata *udata)
875 {
876 	int err;
877 	struct rxe_dev *rxe = to_rdev(dev);
878 	struct rxe_cq *cq;
879 	struct rxe_create_cq_resp __user *uresp = NULL;
880 
881 	if (udata) {
882 		if (udata->outlen < sizeof(*uresp))
883 			return ERR_PTR(-EINVAL);
884 		uresp = udata->outbuf;
885 	}
886 
887 	if (attr->flags)
888 		return ERR_PTR(-EINVAL);
889 
890 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
891 	if (err)
892 		goto err1;
893 
894 	cq = rxe_alloc(&rxe->cq_pool);
895 	if (!cq) {
896 		err = -ENOMEM;
897 		goto err1;
898 	}
899 
900 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
901 			       context, uresp);
902 	if (err)
903 		goto err2;
904 
905 	return &cq->ibcq;
906 
907 err2:
908 	rxe_drop_ref(cq);
909 err1:
910 	return ERR_PTR(err);
911 }
912 
913 static int rxe_destroy_cq(struct ib_cq *ibcq)
914 {
915 	struct rxe_cq *cq = to_rcq(ibcq);
916 
917 	rxe_cq_disable(cq);
918 
919 	rxe_drop_ref(cq);
920 	return 0;
921 }
922 
923 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
924 {
925 	int err;
926 	struct rxe_cq *cq = to_rcq(ibcq);
927 	struct rxe_dev *rxe = to_rdev(ibcq->device);
928 	struct rxe_resize_cq_resp __user *uresp = NULL;
929 
930 	if (udata) {
931 		if (udata->outlen < sizeof(*uresp))
932 			return -EINVAL;
933 		uresp = udata->outbuf;
934 	}
935 
936 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
937 	if (err)
938 		goto err1;
939 
940 	err = rxe_cq_resize_queue(cq, cqe, uresp);
941 	if (err)
942 		goto err1;
943 
944 	return 0;
945 
946 err1:
947 	return err;
948 }
949 
950 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
951 {
952 	int i;
953 	struct rxe_cq *cq = to_rcq(ibcq);
954 	struct rxe_cqe *cqe;
955 	unsigned long flags;
956 
957 	spin_lock_irqsave(&cq->cq_lock, flags);
958 	for (i = 0; i < num_entries; i++) {
959 		cqe = queue_head(cq->queue);
960 		if (!cqe)
961 			break;
962 
963 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
964 		advance_consumer(cq->queue);
965 	}
966 	spin_unlock_irqrestore(&cq->cq_lock, flags);
967 
968 	return i;
969 }
970 
971 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
972 {
973 	struct rxe_cq *cq = to_rcq(ibcq);
974 	int count = queue_count(cq->queue);
975 
976 	return (count > wc_cnt) ? wc_cnt : count;
977 }
978 
979 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
980 {
981 	struct rxe_cq *cq = to_rcq(ibcq);
982 	unsigned long irq_flags;
983 	int ret = 0;
984 
985 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
986 	if (cq->notify != IB_CQ_NEXT_COMP)
987 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
988 
989 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
990 		ret = 1;
991 
992 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
993 
994 	return ret;
995 }
996 
997 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
998 {
999 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1000 	struct rxe_pd *pd = to_rpd(ibpd);
1001 	struct rxe_mem *mr;
1002 	int err;
1003 
1004 	mr = rxe_alloc(&rxe->mr_pool);
1005 	if (!mr) {
1006 		err = -ENOMEM;
1007 		goto err1;
1008 	}
1009 
1010 	rxe_add_index(mr);
1011 
1012 	rxe_add_ref(pd);
1013 
1014 	err = rxe_mem_init_dma(rxe, pd, access, mr);
1015 	if (err)
1016 		goto err2;
1017 
1018 	return &mr->ibmr;
1019 
1020 err2:
1021 	rxe_drop_ref(pd);
1022 	rxe_drop_index(mr);
1023 	rxe_drop_ref(mr);
1024 err1:
1025 	return ERR_PTR(err);
1026 }
1027 
1028 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1029 				     u64 start,
1030 				     u64 length,
1031 				     u64 iova,
1032 				     int access, struct ib_udata *udata)
1033 {
1034 	int err;
1035 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1036 	struct rxe_pd *pd = to_rpd(ibpd);
1037 	struct rxe_mem *mr;
1038 
1039 	mr = rxe_alloc(&rxe->mr_pool);
1040 	if (!mr) {
1041 		err = -ENOMEM;
1042 		goto err2;
1043 	}
1044 
1045 	rxe_add_index(mr);
1046 
1047 	rxe_add_ref(pd);
1048 
1049 	err = rxe_mem_init_user(rxe, pd, start, length, iova,
1050 				access, udata, mr);
1051 	if (err)
1052 		goto err3;
1053 
1054 	return &mr->ibmr;
1055 
1056 err3:
1057 	rxe_drop_ref(pd);
1058 	rxe_drop_index(mr);
1059 	rxe_drop_ref(mr);
1060 err2:
1061 	return ERR_PTR(err);
1062 }
1063 
1064 static int rxe_dereg_mr(struct ib_mr *ibmr)
1065 {
1066 	struct rxe_mem *mr = to_rmr(ibmr);
1067 
1068 	mr->state = RXE_MEM_STATE_ZOMBIE;
1069 	rxe_drop_ref(mr->pd);
1070 	rxe_drop_index(mr);
1071 	rxe_drop_ref(mr);
1072 	return 0;
1073 }
1074 
1075 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1076 				  enum ib_mr_type mr_type,
1077 				  u32 max_num_sg)
1078 {
1079 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1080 	struct rxe_pd *pd = to_rpd(ibpd);
1081 	struct rxe_mem *mr;
1082 	int err;
1083 
1084 	if (mr_type != IB_MR_TYPE_MEM_REG)
1085 		return ERR_PTR(-EINVAL);
1086 
1087 	mr = rxe_alloc(&rxe->mr_pool);
1088 	if (!mr) {
1089 		err = -ENOMEM;
1090 		goto err1;
1091 	}
1092 
1093 	rxe_add_index(mr);
1094 
1095 	rxe_add_ref(pd);
1096 
1097 	err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1098 	if (err)
1099 		goto err2;
1100 
1101 	return &mr->ibmr;
1102 
1103 err2:
1104 	rxe_drop_ref(pd);
1105 	rxe_drop_index(mr);
1106 	rxe_drop_ref(mr);
1107 err1:
1108 	return ERR_PTR(err);
1109 }
1110 
1111 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1112 {
1113 	struct rxe_mem *mr = to_rmr(ibmr);
1114 	struct rxe_map *map;
1115 	struct rxe_phys_buf *buf;
1116 
1117 	if (unlikely(mr->nbuf == mr->num_buf))
1118 		return -ENOMEM;
1119 
1120 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1121 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1122 
1123 	buf->addr = addr;
1124 	buf->size = ibmr->page_size;
1125 	mr->nbuf++;
1126 
1127 	return 0;
1128 }
1129 
1130 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1131 			 int sg_nents, unsigned int *sg_offset)
1132 {
1133 	struct rxe_mem *mr = to_rmr(ibmr);
1134 	int n;
1135 
1136 	mr->nbuf = 0;
1137 
1138 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1139 
1140 	mr->va = ibmr->iova;
1141 	mr->iova = ibmr->iova;
1142 	mr->length = ibmr->length;
1143 	mr->page_shift = ilog2(ibmr->page_size);
1144 	mr->page_mask = ibmr->page_size - 1;
1145 	mr->offset = mr->iova & mr->page_mask;
1146 
1147 	return n;
1148 }
1149 
1150 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1151 {
1152 	int err;
1153 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1154 	struct rxe_qp *qp = to_rqp(ibqp);
1155 	struct rxe_mc_grp *grp;
1156 
1157 	/* takes a ref on grp if successful */
1158 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1159 	if (err)
1160 		return err;
1161 
1162 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1163 
1164 	rxe_drop_ref(grp);
1165 	return err;
1166 }
1167 
1168 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1169 {
1170 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1171 	struct rxe_qp *qp = to_rqp(ibqp);
1172 
1173 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1174 }
1175 
1176 static ssize_t parent_show(struct device *device,
1177 			   struct device_attribute *attr, char *buf)
1178 {
1179 	struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1180 					   ib_dev.dev);
1181 
1182 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1183 }
1184 
1185 static DEVICE_ATTR_RO(parent);
1186 
1187 static struct device_attribute *rxe_dev_attributes[] = {
1188 	&dev_attr_parent,
1189 };
1190 
1191 int rxe_register_device(struct rxe_dev *rxe)
1192 {
1193 	int err;
1194 	int i;
1195 	struct ib_device *dev = &rxe->ib_dev;
1196 	struct crypto_shash *tfm;
1197 
1198 	strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1199 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1200 
1201 	dev->owner = THIS_MODULE;
1202 	dev->node_type = RDMA_NODE_IB_CA;
1203 	dev->phys_port_cnt = 1;
1204 	dev->num_comp_vectors = num_possible_cpus();
1205 	dev->dev.parent = rxe_dma_device(rxe);
1206 	dev->local_dma_lkey = 0;
1207 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1208 			    rxe->ndev->dev_addr);
1209 	dev->dev.dma_ops = &dma_virt_ops;
1210 	dma_coerce_mask_and_coherent(&dev->dev,
1211 				     dma_get_required_mask(&dev->dev));
1212 
1213 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1214 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1215 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1216 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1217 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1218 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1219 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1220 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1221 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1222 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1223 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1224 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1225 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1226 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1227 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1228 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1229 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1230 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1231 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1232 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1233 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1234 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1235 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1236 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1237 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1238 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1239 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1240 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1241 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1242 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1243 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1244 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1245 	    ;
1246 
1247 	dev->query_device = rxe_query_device;
1248 	dev->modify_device = rxe_modify_device;
1249 	dev->query_port = rxe_query_port;
1250 	dev->modify_port = rxe_modify_port;
1251 	dev->get_link_layer = rxe_get_link_layer;
1252 	dev->get_netdev = rxe_get_netdev;
1253 	dev->query_pkey = rxe_query_pkey;
1254 	dev->alloc_ucontext = rxe_alloc_ucontext;
1255 	dev->dealloc_ucontext = rxe_dealloc_ucontext;
1256 	dev->mmap = rxe_mmap;
1257 	dev->get_port_immutable = rxe_port_immutable;
1258 	dev->alloc_pd = rxe_alloc_pd;
1259 	dev->dealloc_pd = rxe_dealloc_pd;
1260 	dev->create_ah = rxe_create_ah;
1261 	dev->modify_ah = rxe_modify_ah;
1262 	dev->query_ah = rxe_query_ah;
1263 	dev->destroy_ah = rxe_destroy_ah;
1264 	dev->create_srq = rxe_create_srq;
1265 	dev->modify_srq = rxe_modify_srq;
1266 	dev->query_srq = rxe_query_srq;
1267 	dev->destroy_srq = rxe_destroy_srq;
1268 	dev->post_srq_recv = rxe_post_srq_recv;
1269 	dev->create_qp = rxe_create_qp;
1270 	dev->modify_qp = rxe_modify_qp;
1271 	dev->query_qp = rxe_query_qp;
1272 	dev->destroy_qp = rxe_destroy_qp;
1273 	dev->post_send = rxe_post_send;
1274 	dev->post_recv = rxe_post_recv;
1275 	dev->create_cq = rxe_create_cq;
1276 	dev->destroy_cq = rxe_destroy_cq;
1277 	dev->resize_cq = rxe_resize_cq;
1278 	dev->poll_cq = rxe_poll_cq;
1279 	dev->peek_cq = rxe_peek_cq;
1280 	dev->req_notify_cq = rxe_req_notify_cq;
1281 	dev->get_dma_mr = rxe_get_dma_mr;
1282 	dev->reg_user_mr = rxe_reg_user_mr;
1283 	dev->dereg_mr = rxe_dereg_mr;
1284 	dev->alloc_mr = rxe_alloc_mr;
1285 	dev->map_mr_sg = rxe_map_mr_sg;
1286 	dev->attach_mcast = rxe_attach_mcast;
1287 	dev->detach_mcast = rxe_detach_mcast;
1288 	dev->get_hw_stats = rxe_ib_get_hw_stats;
1289 	dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
1290 
1291 	tfm = crypto_alloc_shash("crc32", 0, 0);
1292 	if (IS_ERR(tfm)) {
1293 		pr_err("failed to allocate crc algorithm err:%ld\n",
1294 		       PTR_ERR(tfm));
1295 		return PTR_ERR(tfm);
1296 	}
1297 	rxe->tfm = tfm;
1298 
1299 	dev->driver_id = RDMA_DRIVER_RXE;
1300 	err = ib_register_device(dev, NULL);
1301 	if (err) {
1302 		pr_warn("%s failed with error %d\n", __func__, err);
1303 		goto err1;
1304 	}
1305 
1306 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1307 		err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1308 		if (err) {
1309 			pr_warn("%s failed with error %d for attr number %d\n",
1310 				__func__, err, i);
1311 			goto err2;
1312 		}
1313 	}
1314 
1315 	return 0;
1316 
1317 err2:
1318 	ib_unregister_device(dev);
1319 err1:
1320 	crypto_free_shash(rxe->tfm);
1321 
1322 	return err;
1323 }
1324 
1325 int rxe_unregister_device(struct rxe_dev *rxe)
1326 {
1327 	int i;
1328 	struct ib_device *dev = &rxe->ib_dev;
1329 
1330 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1331 		device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1332 
1333 	ib_unregister_device(dev);
1334 
1335 	return 0;
1336 }
1337