1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include <net/addrconf.h>
36 #include "rxe.h"
37 #include "rxe_loc.h"
38 #include "rxe_queue.h"
39 #include "rxe_hw_counters.h"
40 
41 static int rxe_query_device(struct ib_device *dev,
42 			    struct ib_device_attr *attr,
43 			    struct ib_udata *uhw)
44 {
45 	struct rxe_dev *rxe = to_rdev(dev);
46 
47 	if (uhw->inlen || uhw->outlen)
48 		return -EINVAL;
49 
50 	*attr = rxe->attr;
51 	return 0;
52 }
53 
54 static int rxe_query_port(struct ib_device *dev,
55 			  u8 port_num, struct ib_port_attr *attr)
56 {
57 	struct rxe_dev *rxe = to_rdev(dev);
58 	struct rxe_port *port;
59 	int rc = -EINVAL;
60 
61 	if (unlikely(port_num != 1)) {
62 		pr_warn("invalid port_number %d\n", port_num);
63 		goto out;
64 	}
65 
66 	port = &rxe->port;
67 
68 	/* *attr being zeroed by the caller, avoid zeroing it here */
69 	*attr = port->attr;
70 
71 	mutex_lock(&rxe->usdev_lock);
72 	rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
73 			      &attr->active_width);
74 	mutex_unlock(&rxe->usdev_lock);
75 
76 out:
77 	return rc;
78 }
79 
80 static int rxe_query_gid(struct ib_device *device,
81 			 u8 port_num, int index, union ib_gid *gid)
82 {
83 	int ret;
84 
85 	if (index > RXE_PORT_GID_TBL_LEN)
86 		return -EINVAL;
87 
88 	ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
89 	if (ret == -EAGAIN) {
90 		memcpy(gid, &zgid, sizeof(*gid));
91 		return 0;
92 	}
93 
94 	return ret;
95 }
96 
97 static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
98 		       index, const union ib_gid *gid,
99 		       const struct ib_gid_attr *attr, void **context)
100 {
101 	if (index >= RXE_PORT_GID_TBL_LEN)
102 		return -EINVAL;
103 	return 0;
104 }
105 
106 static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
107 		       index, void **context)
108 {
109 	if (index >= RXE_PORT_GID_TBL_LEN)
110 		return -EINVAL;
111 	return 0;
112 }
113 
114 static struct net_device *rxe_get_netdev(struct ib_device *device,
115 					 u8 port_num)
116 {
117 	struct rxe_dev *rxe = to_rdev(device);
118 
119 	if (rxe->ndev) {
120 		dev_hold(rxe->ndev);
121 		return rxe->ndev;
122 	}
123 
124 	return NULL;
125 }
126 
127 static int rxe_query_pkey(struct ib_device *device,
128 			  u8 port_num, u16 index, u16 *pkey)
129 {
130 	struct rxe_dev *rxe = to_rdev(device);
131 	struct rxe_port *port;
132 
133 	if (unlikely(port_num != 1)) {
134 		dev_warn(device->dev.parent, "invalid port_num = %d\n",
135 			 port_num);
136 		goto err1;
137 	}
138 
139 	port = &rxe->port;
140 
141 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
142 		dev_warn(device->dev.parent, "invalid index = %d\n",
143 			 index);
144 		goto err1;
145 	}
146 
147 	*pkey = port->pkey_tbl[index];
148 	return 0;
149 
150 err1:
151 	return -EINVAL;
152 }
153 
154 static int rxe_modify_device(struct ib_device *dev,
155 			     int mask, struct ib_device_modify *attr)
156 {
157 	struct rxe_dev *rxe = to_rdev(dev);
158 
159 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
160 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
161 
162 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
163 		memcpy(rxe->ib_dev.node_desc,
164 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
165 	}
166 
167 	return 0;
168 }
169 
170 static int rxe_modify_port(struct ib_device *dev,
171 			   u8 port_num, int mask, struct ib_port_modify *attr)
172 {
173 	struct rxe_dev *rxe = to_rdev(dev);
174 	struct rxe_port *port;
175 
176 	if (unlikely(port_num != 1)) {
177 		pr_warn("invalid port_num = %d\n", port_num);
178 		goto err1;
179 	}
180 
181 	port = &rxe->port;
182 
183 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
184 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
185 
186 	if (mask & IB_PORT_RESET_QKEY_CNTR)
187 		port->attr.qkey_viol_cntr = 0;
188 
189 	return 0;
190 
191 err1:
192 	return -EINVAL;
193 }
194 
195 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
196 					       u8 port_num)
197 {
198 	struct rxe_dev *rxe = to_rdev(dev);
199 
200 	return rxe_link_layer(rxe, port_num);
201 }
202 
203 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
204 					      struct ib_udata *udata)
205 {
206 	struct rxe_dev *rxe = to_rdev(dev);
207 	struct rxe_ucontext *uc;
208 
209 	uc = rxe_alloc(&rxe->uc_pool);
210 	return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
211 }
212 
213 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
214 {
215 	struct rxe_ucontext *uc = to_ruc(ibuc);
216 
217 	rxe_drop_ref(uc);
218 	return 0;
219 }
220 
221 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
222 			      struct ib_port_immutable *immutable)
223 {
224 	int err;
225 	struct ib_port_attr attr;
226 
227 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
228 
229 	err = ib_query_port(dev, port_num, &attr);
230 	if (err)
231 		return err;
232 
233 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
234 	immutable->gid_tbl_len = attr.gid_tbl_len;
235 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
236 
237 	return 0;
238 }
239 
240 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
241 				  struct ib_ucontext *context,
242 				  struct ib_udata *udata)
243 {
244 	struct rxe_dev *rxe = to_rdev(dev);
245 	struct rxe_pd *pd;
246 
247 	pd = rxe_alloc(&rxe->pd_pool);
248 	return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
249 }
250 
251 static int rxe_dealloc_pd(struct ib_pd *ibpd)
252 {
253 	struct rxe_pd *pd = to_rpd(ibpd);
254 
255 	rxe_drop_ref(pd);
256 	return 0;
257 }
258 
259 static int rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
260 		       struct rxe_av *av)
261 {
262 	int err;
263 	union ib_gid sgid;
264 	struct ib_gid_attr sgid_attr;
265 
266 	err = ib_get_cached_gid(&rxe->ib_dev, rdma_ah_get_port_num(attr),
267 				rdma_ah_read_grh(attr)->sgid_index, &sgid,
268 				&sgid_attr);
269 	if (err) {
270 		pr_err("Failed to query sgid. err = %d\n", err);
271 		return err;
272 	}
273 
274 	err = rxe_av_from_attr(rxe, rdma_ah_get_port_num(attr), av, attr);
275 	if (!err)
276 		err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
277 
278 	if (sgid_attr.ndev)
279 		dev_put(sgid_attr.ndev);
280 	return err;
281 }
282 
283 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
284 				   struct rdma_ah_attr *attr,
285 				   struct ib_udata *udata)
286 
287 {
288 	int err;
289 	struct rxe_dev *rxe = to_rdev(ibpd->device);
290 	struct rxe_pd *pd = to_rpd(ibpd);
291 	struct rxe_ah *ah;
292 
293 	err = rxe_av_chk_attr(rxe, attr);
294 	if (err)
295 		goto err1;
296 
297 	ah = rxe_alloc(&rxe->ah_pool);
298 	if (!ah) {
299 		err = -ENOMEM;
300 		goto err1;
301 	}
302 
303 	rxe_add_ref(pd);
304 	ah->pd = pd;
305 
306 	err = rxe_init_av(rxe, attr, &ah->av);
307 	if (err)
308 		goto err2;
309 
310 	return &ah->ibah;
311 
312 err2:
313 	rxe_drop_ref(pd);
314 	rxe_drop_ref(ah);
315 err1:
316 	return ERR_PTR(err);
317 }
318 
319 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
320 {
321 	int err;
322 	struct rxe_dev *rxe = to_rdev(ibah->device);
323 	struct rxe_ah *ah = to_rah(ibah);
324 
325 	err = rxe_av_chk_attr(rxe, attr);
326 	if (err)
327 		return err;
328 
329 	err = rxe_init_av(rxe, attr, &ah->av);
330 	if (err)
331 		return err;
332 
333 	return 0;
334 }
335 
336 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
337 {
338 	struct rxe_dev *rxe = to_rdev(ibah->device);
339 	struct rxe_ah *ah = to_rah(ibah);
340 
341 	memset(attr, 0, sizeof(*attr));
342 	attr->type = ibah->type;
343 	rxe_av_to_attr(rxe, &ah->av, attr);
344 	return 0;
345 }
346 
347 static int rxe_destroy_ah(struct ib_ah *ibah)
348 {
349 	struct rxe_ah *ah = to_rah(ibah);
350 
351 	rxe_drop_ref(ah->pd);
352 	rxe_drop_ref(ah);
353 	return 0;
354 }
355 
356 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
357 {
358 	int err;
359 	int i;
360 	u32 length;
361 	struct rxe_recv_wqe *recv_wqe;
362 	int num_sge = ibwr->num_sge;
363 
364 	if (unlikely(queue_full(rq->queue))) {
365 		err = -ENOMEM;
366 		goto err1;
367 	}
368 
369 	if (unlikely(num_sge > rq->max_sge)) {
370 		err = -EINVAL;
371 		goto err1;
372 	}
373 
374 	length = 0;
375 	for (i = 0; i < num_sge; i++)
376 		length += ibwr->sg_list[i].length;
377 
378 	recv_wqe = producer_addr(rq->queue);
379 	recv_wqe->wr_id = ibwr->wr_id;
380 	recv_wqe->num_sge = num_sge;
381 
382 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
383 	       num_sge * sizeof(struct ib_sge));
384 
385 	recv_wqe->dma.length		= length;
386 	recv_wqe->dma.resid		= length;
387 	recv_wqe->dma.num_sge		= num_sge;
388 	recv_wqe->dma.cur_sge		= 0;
389 	recv_wqe->dma.sge_offset	= 0;
390 
391 	/* make sure all changes to the work queue are written before we
392 	 * update the producer pointer
393 	 */
394 	smp_wmb();
395 
396 	advance_producer(rq->queue);
397 	return 0;
398 
399 err1:
400 	return err;
401 }
402 
403 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
404 				     struct ib_srq_init_attr *init,
405 				     struct ib_udata *udata)
406 {
407 	int err;
408 	struct rxe_dev *rxe = to_rdev(ibpd->device);
409 	struct rxe_pd *pd = to_rpd(ibpd);
410 	struct rxe_srq *srq;
411 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
412 
413 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
414 	if (err)
415 		goto err1;
416 
417 	srq = rxe_alloc(&rxe->srq_pool);
418 	if (!srq) {
419 		err = -ENOMEM;
420 		goto err1;
421 	}
422 
423 	rxe_add_index(srq);
424 	rxe_add_ref(pd);
425 	srq->pd = pd;
426 
427 	err = rxe_srq_from_init(rxe, srq, init, context, udata);
428 	if (err)
429 		goto err2;
430 
431 	return &srq->ibsrq;
432 
433 err2:
434 	rxe_drop_ref(pd);
435 	rxe_drop_index(srq);
436 	rxe_drop_ref(srq);
437 err1:
438 	return ERR_PTR(err);
439 }
440 
441 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
442 			  enum ib_srq_attr_mask mask,
443 			  struct ib_udata *udata)
444 {
445 	int err;
446 	struct rxe_srq *srq = to_rsrq(ibsrq);
447 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
448 
449 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
450 	if (err)
451 		goto err1;
452 
453 	err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
454 	if (err)
455 		goto err1;
456 
457 	return 0;
458 
459 err1:
460 	return err;
461 }
462 
463 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
464 {
465 	struct rxe_srq *srq = to_rsrq(ibsrq);
466 
467 	if (srq->error)
468 		return -EINVAL;
469 
470 	attr->max_wr = srq->rq.queue->buf->index_mask;
471 	attr->max_sge = srq->rq.max_sge;
472 	attr->srq_limit = srq->limit;
473 	return 0;
474 }
475 
476 static int rxe_destroy_srq(struct ib_srq *ibsrq)
477 {
478 	struct rxe_srq *srq = to_rsrq(ibsrq);
479 
480 	if (srq->rq.queue)
481 		rxe_queue_cleanup(srq->rq.queue);
482 
483 	rxe_drop_ref(srq->pd);
484 	rxe_drop_index(srq);
485 	rxe_drop_ref(srq);
486 
487 	return 0;
488 }
489 
490 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
491 			     struct ib_recv_wr **bad_wr)
492 {
493 	int err = 0;
494 	unsigned long flags;
495 	struct rxe_srq *srq = to_rsrq(ibsrq);
496 
497 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
498 
499 	while (wr) {
500 		err = post_one_recv(&srq->rq, wr);
501 		if (unlikely(err))
502 			break;
503 		wr = wr->next;
504 	}
505 
506 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
507 
508 	if (err)
509 		*bad_wr = wr;
510 
511 	return err;
512 }
513 
514 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
515 				   struct ib_qp_init_attr *init,
516 				   struct ib_udata *udata)
517 {
518 	int err;
519 	struct rxe_dev *rxe = to_rdev(ibpd->device);
520 	struct rxe_pd *pd = to_rpd(ibpd);
521 	struct rxe_qp *qp;
522 
523 	err = rxe_qp_chk_init(rxe, init);
524 	if (err)
525 		goto err1;
526 
527 	qp = rxe_alloc(&rxe->qp_pool);
528 	if (!qp) {
529 		err = -ENOMEM;
530 		goto err1;
531 	}
532 
533 	if (udata) {
534 		if (udata->inlen) {
535 			err = -EINVAL;
536 			goto err2;
537 		}
538 		qp->is_user = 1;
539 	}
540 
541 	rxe_add_index(qp);
542 
543 	err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
544 	if (err)
545 		goto err3;
546 
547 	return &qp->ibqp;
548 
549 err3:
550 	rxe_drop_index(qp);
551 err2:
552 	rxe_drop_ref(qp);
553 err1:
554 	return ERR_PTR(err);
555 }
556 
557 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
558 			 int mask, struct ib_udata *udata)
559 {
560 	int err;
561 	struct rxe_dev *rxe = to_rdev(ibqp->device);
562 	struct rxe_qp *qp = to_rqp(ibqp);
563 
564 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
565 	if (err)
566 		goto err1;
567 
568 	err = rxe_qp_from_attr(qp, attr, mask, udata);
569 	if (err)
570 		goto err1;
571 
572 	return 0;
573 
574 err1:
575 	return err;
576 }
577 
578 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
579 			int mask, struct ib_qp_init_attr *init)
580 {
581 	struct rxe_qp *qp = to_rqp(ibqp);
582 
583 	rxe_qp_to_init(qp, init);
584 	rxe_qp_to_attr(qp, attr, mask);
585 
586 	return 0;
587 }
588 
589 static int rxe_destroy_qp(struct ib_qp *ibqp)
590 {
591 	struct rxe_qp *qp = to_rqp(ibqp);
592 
593 	rxe_qp_destroy(qp);
594 	rxe_drop_index(qp);
595 	rxe_drop_ref(qp);
596 	return 0;
597 }
598 
599 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
600 			    unsigned int mask, unsigned int length)
601 {
602 	int num_sge = ibwr->num_sge;
603 	struct rxe_sq *sq = &qp->sq;
604 
605 	if (unlikely(num_sge > sq->max_sge))
606 		goto err1;
607 
608 	if (unlikely(mask & WR_ATOMIC_MASK)) {
609 		if (length < 8)
610 			goto err1;
611 
612 		if (atomic_wr(ibwr)->remote_addr & 0x7)
613 			goto err1;
614 	}
615 
616 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
617 		     (length > sq->max_inline)))
618 		goto err1;
619 
620 	return 0;
621 
622 err1:
623 	return -EINVAL;
624 }
625 
626 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
627 			 struct ib_send_wr *ibwr)
628 {
629 	wr->wr_id = ibwr->wr_id;
630 	wr->num_sge = ibwr->num_sge;
631 	wr->opcode = ibwr->opcode;
632 	wr->send_flags = ibwr->send_flags;
633 
634 	if (qp_type(qp) == IB_QPT_UD ||
635 	    qp_type(qp) == IB_QPT_SMI ||
636 	    qp_type(qp) == IB_QPT_GSI) {
637 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
638 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
639 		if (qp_type(qp) == IB_QPT_GSI)
640 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
641 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
642 			wr->ex.imm_data = ibwr->ex.imm_data;
643 	} else {
644 		switch (wr->opcode) {
645 		case IB_WR_RDMA_WRITE_WITH_IMM:
646 			wr->ex.imm_data = ibwr->ex.imm_data;
647 		case IB_WR_RDMA_READ:
648 		case IB_WR_RDMA_WRITE:
649 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
650 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
651 			break;
652 		case IB_WR_SEND_WITH_IMM:
653 			wr->ex.imm_data = ibwr->ex.imm_data;
654 			break;
655 		case IB_WR_SEND_WITH_INV:
656 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
657 			break;
658 		case IB_WR_ATOMIC_CMP_AND_SWP:
659 		case IB_WR_ATOMIC_FETCH_AND_ADD:
660 			wr->wr.atomic.remote_addr =
661 				atomic_wr(ibwr)->remote_addr;
662 			wr->wr.atomic.compare_add =
663 				atomic_wr(ibwr)->compare_add;
664 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
665 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
666 			break;
667 		case IB_WR_LOCAL_INV:
668 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
669 		break;
670 		case IB_WR_REG_MR:
671 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
672 			wr->wr.reg.key = reg_wr(ibwr)->key;
673 			wr->wr.reg.access = reg_wr(ibwr)->access;
674 		break;
675 		default:
676 			break;
677 		}
678 	}
679 }
680 
681 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
682 			 unsigned int mask, unsigned int length,
683 			 struct rxe_send_wqe *wqe)
684 {
685 	int num_sge = ibwr->num_sge;
686 	struct ib_sge *sge;
687 	int i;
688 	u8 *p;
689 
690 	init_send_wr(qp, &wqe->wr, ibwr);
691 
692 	if (qp_type(qp) == IB_QPT_UD ||
693 	    qp_type(qp) == IB_QPT_SMI ||
694 	    qp_type(qp) == IB_QPT_GSI)
695 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
696 
697 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
698 		p = wqe->dma.inline_data;
699 
700 		sge = ibwr->sg_list;
701 		for (i = 0; i < num_sge; i++, sge++) {
702 			memcpy(p, (void *)(uintptr_t)sge->addr,
703 					sge->length);
704 
705 			p += sge->length;
706 		}
707 	} else if (mask & WR_REG_MASK) {
708 		wqe->mask = mask;
709 		wqe->state = wqe_state_posted;
710 		return 0;
711 	} else
712 		memcpy(wqe->dma.sge, ibwr->sg_list,
713 		       num_sge * sizeof(struct ib_sge));
714 
715 	wqe->iova		= (mask & WR_ATOMIC_MASK) ?
716 					atomic_wr(ibwr)->remote_addr :
717 					rdma_wr(ibwr)->remote_addr;
718 	wqe->mask		= mask;
719 	wqe->dma.length		= length;
720 	wqe->dma.resid		= length;
721 	wqe->dma.num_sge	= num_sge;
722 	wqe->dma.cur_sge	= 0;
723 	wqe->dma.sge_offset	= 0;
724 	wqe->state		= wqe_state_posted;
725 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
726 
727 	return 0;
728 }
729 
730 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
731 			 unsigned int mask, u32 length)
732 {
733 	int err;
734 	struct rxe_sq *sq = &qp->sq;
735 	struct rxe_send_wqe *send_wqe;
736 	unsigned long flags;
737 
738 	err = validate_send_wr(qp, ibwr, mask, length);
739 	if (err)
740 		return err;
741 
742 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
743 
744 	if (unlikely(queue_full(sq->queue))) {
745 		err = -ENOMEM;
746 		goto err1;
747 	}
748 
749 	send_wqe = producer_addr(sq->queue);
750 
751 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
752 	if (unlikely(err))
753 		goto err1;
754 
755 	/*
756 	 * make sure all changes to the work queue are
757 	 * written before we update the producer pointer
758 	 */
759 	smp_wmb();
760 
761 	advance_producer(sq->queue);
762 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
763 
764 	return 0;
765 
766 err1:
767 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
768 	return err;
769 }
770 
771 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
772 				struct ib_send_wr **bad_wr)
773 {
774 	int err = 0;
775 	unsigned int mask;
776 	unsigned int length = 0;
777 	int i;
778 	int must_sched;
779 
780 	while (wr) {
781 		mask = wr_opcode_mask(wr->opcode, qp);
782 		if (unlikely(!mask)) {
783 			err = -EINVAL;
784 			*bad_wr = wr;
785 			break;
786 		}
787 
788 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
789 			     !(mask & WR_INLINE_MASK))) {
790 			err = -EINVAL;
791 			*bad_wr = wr;
792 			break;
793 		}
794 
795 		length = 0;
796 		for (i = 0; i < wr->num_sge; i++)
797 			length += wr->sg_list[i].length;
798 
799 		err = post_one_send(qp, wr, mask, length);
800 
801 		if (err) {
802 			*bad_wr = wr;
803 			break;
804 		}
805 		wr = wr->next;
806 	}
807 
808 	/*
809 	 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
810 	 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
811 	 */
812 	must_sched = (qp_type(qp) == IB_QPT_GSI) ||
813 			(queue_count(qp->sq.queue) > 1);
814 
815 	rxe_run_task(&qp->req.task, must_sched);
816 
817 	return err;
818 }
819 
820 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
821 			 struct ib_send_wr **bad_wr)
822 {
823 	struct rxe_qp *qp = to_rqp(ibqp);
824 
825 	if (unlikely(!qp->valid)) {
826 		*bad_wr = wr;
827 		return -EINVAL;
828 	}
829 
830 	if (unlikely(qp->req.state < QP_STATE_READY)) {
831 		*bad_wr = wr;
832 		return -EINVAL;
833 	}
834 
835 	if (qp->is_user) {
836 		/* Utilize process context to do protocol processing */
837 		rxe_run_task(&qp->req.task, 0);
838 		return 0;
839 	} else
840 		return rxe_post_send_kernel(qp, wr, bad_wr);
841 }
842 
843 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
844 			 struct ib_recv_wr **bad_wr)
845 {
846 	int err = 0;
847 	struct rxe_qp *qp = to_rqp(ibqp);
848 	struct rxe_rq *rq = &qp->rq;
849 	unsigned long flags;
850 
851 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
852 		*bad_wr = wr;
853 		err = -EINVAL;
854 		goto err1;
855 	}
856 
857 	if (unlikely(qp->srq)) {
858 		*bad_wr = wr;
859 		err = -EINVAL;
860 		goto err1;
861 	}
862 
863 	spin_lock_irqsave(&rq->producer_lock, flags);
864 
865 	while (wr) {
866 		err = post_one_recv(rq, wr);
867 		if (unlikely(err)) {
868 			*bad_wr = wr;
869 			break;
870 		}
871 		wr = wr->next;
872 	}
873 
874 	spin_unlock_irqrestore(&rq->producer_lock, flags);
875 
876 	if (qp->resp.state == QP_STATE_ERROR)
877 		rxe_run_task(&qp->resp.task, 1);
878 
879 err1:
880 	return err;
881 }
882 
883 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
884 				   const struct ib_cq_init_attr *attr,
885 				   struct ib_ucontext *context,
886 				   struct ib_udata *udata)
887 {
888 	int err;
889 	struct rxe_dev *rxe = to_rdev(dev);
890 	struct rxe_cq *cq;
891 
892 	if (attr->flags)
893 		return ERR_PTR(-EINVAL);
894 
895 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
896 	if (err)
897 		goto err1;
898 
899 	cq = rxe_alloc(&rxe->cq_pool);
900 	if (!cq) {
901 		err = -ENOMEM;
902 		goto err1;
903 	}
904 
905 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
906 			       context, udata);
907 	if (err)
908 		goto err2;
909 
910 	return &cq->ibcq;
911 
912 err2:
913 	rxe_drop_ref(cq);
914 err1:
915 	return ERR_PTR(err);
916 }
917 
918 static int rxe_destroy_cq(struct ib_cq *ibcq)
919 {
920 	struct rxe_cq *cq = to_rcq(ibcq);
921 
922 	rxe_cq_disable(cq);
923 
924 	rxe_drop_ref(cq);
925 	return 0;
926 }
927 
928 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
929 {
930 	int err;
931 	struct rxe_cq *cq = to_rcq(ibcq);
932 	struct rxe_dev *rxe = to_rdev(ibcq->device);
933 
934 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
935 	if (err)
936 		goto err1;
937 
938 	err = rxe_cq_resize_queue(cq, cqe, udata);
939 	if (err)
940 		goto err1;
941 
942 	return 0;
943 
944 err1:
945 	return err;
946 }
947 
948 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
949 {
950 	int i;
951 	struct rxe_cq *cq = to_rcq(ibcq);
952 	struct rxe_cqe *cqe;
953 	unsigned long flags;
954 
955 	spin_lock_irqsave(&cq->cq_lock, flags);
956 	for (i = 0; i < num_entries; i++) {
957 		cqe = queue_head(cq->queue);
958 		if (!cqe)
959 			break;
960 
961 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
962 		advance_consumer(cq->queue);
963 	}
964 	spin_unlock_irqrestore(&cq->cq_lock, flags);
965 
966 	return i;
967 }
968 
969 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
970 {
971 	struct rxe_cq *cq = to_rcq(ibcq);
972 	int count = queue_count(cq->queue);
973 
974 	return (count > wc_cnt) ? wc_cnt : count;
975 }
976 
977 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
978 {
979 	struct rxe_cq *cq = to_rcq(ibcq);
980 	unsigned long irq_flags;
981 	int ret = 0;
982 
983 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
984 	if (cq->notify != IB_CQ_NEXT_COMP)
985 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
986 
987 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
988 		ret = 1;
989 
990 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
991 
992 	return ret;
993 }
994 
995 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
996 {
997 	struct rxe_dev *rxe = to_rdev(ibpd->device);
998 	struct rxe_pd *pd = to_rpd(ibpd);
999 	struct rxe_mem *mr;
1000 	int err;
1001 
1002 	mr = rxe_alloc(&rxe->mr_pool);
1003 	if (!mr) {
1004 		err = -ENOMEM;
1005 		goto err1;
1006 	}
1007 
1008 	rxe_add_index(mr);
1009 
1010 	rxe_add_ref(pd);
1011 
1012 	err = rxe_mem_init_dma(rxe, pd, access, mr);
1013 	if (err)
1014 		goto err2;
1015 
1016 	return &mr->ibmr;
1017 
1018 err2:
1019 	rxe_drop_ref(pd);
1020 	rxe_drop_index(mr);
1021 	rxe_drop_ref(mr);
1022 err1:
1023 	return ERR_PTR(err);
1024 }
1025 
1026 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1027 				     u64 start,
1028 				     u64 length,
1029 				     u64 iova,
1030 				     int access, struct ib_udata *udata)
1031 {
1032 	int err;
1033 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1034 	struct rxe_pd *pd = to_rpd(ibpd);
1035 	struct rxe_mem *mr;
1036 
1037 	mr = rxe_alloc(&rxe->mr_pool);
1038 	if (!mr) {
1039 		err = -ENOMEM;
1040 		goto err2;
1041 	}
1042 
1043 	rxe_add_index(mr);
1044 
1045 	rxe_add_ref(pd);
1046 
1047 	err = rxe_mem_init_user(rxe, pd, start, length, iova,
1048 				access, udata, mr);
1049 	if (err)
1050 		goto err3;
1051 
1052 	return &mr->ibmr;
1053 
1054 err3:
1055 	rxe_drop_ref(pd);
1056 	rxe_drop_index(mr);
1057 	rxe_drop_ref(mr);
1058 err2:
1059 	return ERR_PTR(err);
1060 }
1061 
1062 static int rxe_dereg_mr(struct ib_mr *ibmr)
1063 {
1064 	struct rxe_mem *mr = to_rmr(ibmr);
1065 
1066 	mr->state = RXE_MEM_STATE_ZOMBIE;
1067 	rxe_drop_ref(mr->pd);
1068 	rxe_drop_index(mr);
1069 	rxe_drop_ref(mr);
1070 	return 0;
1071 }
1072 
1073 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1074 				  enum ib_mr_type mr_type,
1075 				  u32 max_num_sg)
1076 {
1077 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1078 	struct rxe_pd *pd = to_rpd(ibpd);
1079 	struct rxe_mem *mr;
1080 	int err;
1081 
1082 	if (mr_type != IB_MR_TYPE_MEM_REG)
1083 		return ERR_PTR(-EINVAL);
1084 
1085 	mr = rxe_alloc(&rxe->mr_pool);
1086 	if (!mr) {
1087 		err = -ENOMEM;
1088 		goto err1;
1089 	}
1090 
1091 	rxe_add_index(mr);
1092 
1093 	rxe_add_ref(pd);
1094 
1095 	err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1096 	if (err)
1097 		goto err2;
1098 
1099 	return &mr->ibmr;
1100 
1101 err2:
1102 	rxe_drop_ref(pd);
1103 	rxe_drop_index(mr);
1104 	rxe_drop_ref(mr);
1105 err1:
1106 	return ERR_PTR(err);
1107 }
1108 
1109 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1110 {
1111 	struct rxe_mem *mr = to_rmr(ibmr);
1112 	struct rxe_map *map;
1113 	struct rxe_phys_buf *buf;
1114 
1115 	if (unlikely(mr->nbuf == mr->num_buf))
1116 		return -ENOMEM;
1117 
1118 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1119 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1120 
1121 	buf->addr = addr;
1122 	buf->size = ibmr->page_size;
1123 	mr->nbuf++;
1124 
1125 	return 0;
1126 }
1127 
1128 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1129 			 int sg_nents, unsigned int *sg_offset)
1130 {
1131 	struct rxe_mem *mr = to_rmr(ibmr);
1132 	int n;
1133 
1134 	mr->nbuf = 0;
1135 
1136 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1137 
1138 	mr->va = ibmr->iova;
1139 	mr->iova = ibmr->iova;
1140 	mr->length = ibmr->length;
1141 	mr->page_shift = ilog2(ibmr->page_size);
1142 	mr->page_mask = ibmr->page_size - 1;
1143 	mr->offset = mr->iova & mr->page_mask;
1144 
1145 	return n;
1146 }
1147 
1148 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1149 {
1150 	int err;
1151 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1152 	struct rxe_qp *qp = to_rqp(ibqp);
1153 	struct rxe_mc_grp *grp;
1154 
1155 	/* takes a ref on grp if successful */
1156 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1157 	if (err)
1158 		return err;
1159 
1160 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1161 
1162 	rxe_drop_ref(grp);
1163 	return err;
1164 }
1165 
1166 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1167 {
1168 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1169 	struct rxe_qp *qp = to_rqp(ibqp);
1170 
1171 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1172 }
1173 
1174 static ssize_t parent_show(struct device *device,
1175 			   struct device_attribute *attr, char *buf)
1176 {
1177 	struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1178 					   ib_dev.dev);
1179 
1180 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1181 }
1182 
1183 static DEVICE_ATTR_RO(parent);
1184 
1185 static struct device_attribute *rxe_dev_attributes[] = {
1186 	&dev_attr_parent,
1187 };
1188 
1189 int rxe_register_device(struct rxe_dev *rxe)
1190 {
1191 	int err;
1192 	int i;
1193 	struct ib_device *dev = &rxe->ib_dev;
1194 
1195 	strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1196 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1197 
1198 	dev->owner = THIS_MODULE;
1199 	dev->node_type = RDMA_NODE_IB_CA;
1200 	dev->phys_port_cnt = 1;
1201 	dev->num_comp_vectors = num_possible_cpus();
1202 	dev->dev.parent = rxe_dma_device(rxe);
1203 	dev->local_dma_lkey = 0;
1204 	addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1205 			    rxe->ndev->dev_addr);
1206 	dev->dev.dma_ops = &dma_virt_ops;
1207 	dma_coerce_mask_and_coherent(&dev->dev,
1208 				     dma_get_required_mask(dev->dev.parent));
1209 
1210 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1211 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1212 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1213 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1214 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1215 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1216 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1217 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1218 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1219 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1220 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1221 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1222 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1223 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1224 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1225 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1226 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1227 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1228 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1229 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1230 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1231 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1232 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1233 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1234 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1235 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1236 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1237 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1238 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1239 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1240 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1241 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1242 	    ;
1243 
1244 	dev->query_device = rxe_query_device;
1245 	dev->modify_device = rxe_modify_device;
1246 	dev->query_port = rxe_query_port;
1247 	dev->modify_port = rxe_modify_port;
1248 	dev->get_link_layer = rxe_get_link_layer;
1249 	dev->query_gid = rxe_query_gid;
1250 	dev->get_netdev = rxe_get_netdev;
1251 	dev->add_gid = rxe_add_gid;
1252 	dev->del_gid = rxe_del_gid;
1253 	dev->query_pkey = rxe_query_pkey;
1254 	dev->alloc_ucontext = rxe_alloc_ucontext;
1255 	dev->dealloc_ucontext = rxe_dealloc_ucontext;
1256 	dev->mmap = rxe_mmap;
1257 	dev->get_port_immutable = rxe_port_immutable;
1258 	dev->alloc_pd = rxe_alloc_pd;
1259 	dev->dealloc_pd = rxe_dealloc_pd;
1260 	dev->create_ah = rxe_create_ah;
1261 	dev->modify_ah = rxe_modify_ah;
1262 	dev->query_ah = rxe_query_ah;
1263 	dev->destroy_ah = rxe_destroy_ah;
1264 	dev->create_srq = rxe_create_srq;
1265 	dev->modify_srq = rxe_modify_srq;
1266 	dev->query_srq = rxe_query_srq;
1267 	dev->destroy_srq = rxe_destroy_srq;
1268 	dev->post_srq_recv = rxe_post_srq_recv;
1269 	dev->create_qp = rxe_create_qp;
1270 	dev->modify_qp = rxe_modify_qp;
1271 	dev->query_qp = rxe_query_qp;
1272 	dev->destroy_qp = rxe_destroy_qp;
1273 	dev->post_send = rxe_post_send;
1274 	dev->post_recv = rxe_post_recv;
1275 	dev->create_cq = rxe_create_cq;
1276 	dev->destroy_cq = rxe_destroy_cq;
1277 	dev->resize_cq = rxe_resize_cq;
1278 	dev->poll_cq = rxe_poll_cq;
1279 	dev->peek_cq = rxe_peek_cq;
1280 	dev->req_notify_cq = rxe_req_notify_cq;
1281 	dev->get_dma_mr = rxe_get_dma_mr;
1282 	dev->reg_user_mr = rxe_reg_user_mr;
1283 	dev->dereg_mr = rxe_dereg_mr;
1284 	dev->alloc_mr = rxe_alloc_mr;
1285 	dev->map_mr_sg = rxe_map_mr_sg;
1286 	dev->attach_mcast = rxe_attach_mcast;
1287 	dev->detach_mcast = rxe_detach_mcast;
1288 	dev->get_hw_stats = rxe_ib_get_hw_stats;
1289 	dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
1290 
1291 	rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
1292 	if (IS_ERR(rxe->tfm)) {
1293 		pr_err("failed to allocate crc algorithm err:%ld\n",
1294 		       PTR_ERR(rxe->tfm));
1295 		return PTR_ERR(rxe->tfm);
1296 	}
1297 
1298 	err = ib_register_device(dev, NULL);
1299 	if (err) {
1300 		pr_warn("%s failed with error %d\n", __func__, err);
1301 		goto err1;
1302 	}
1303 
1304 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1305 		err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1306 		if (err) {
1307 			pr_warn("%s failed with error %d for attr number %d\n",
1308 				__func__, err, i);
1309 			goto err2;
1310 		}
1311 	}
1312 
1313 	return 0;
1314 
1315 err2:
1316 	ib_unregister_device(dev);
1317 err1:
1318 	crypto_free_shash(rxe->tfm);
1319 
1320 	return err;
1321 }
1322 
1323 int rxe_unregister_device(struct rxe_dev *rxe)
1324 {
1325 	int i;
1326 	struct ib_device *dev = &rxe->ib_dev;
1327 
1328 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1329 		device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1330 
1331 	ib_unregister_device(dev);
1332 
1333 	return 0;
1334 }
1335