1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/dma-mapping.h>
35 #include "rxe.h"
36 #include "rxe_loc.h"
37 #include "rxe_queue.h"
38 
39 static int rxe_query_device(struct ib_device *dev,
40 			    struct ib_device_attr *attr,
41 			    struct ib_udata *uhw)
42 {
43 	struct rxe_dev *rxe = to_rdev(dev);
44 
45 	if (uhw->inlen || uhw->outlen)
46 		return -EINVAL;
47 
48 	*attr = rxe->attr;
49 	return 0;
50 }
51 
52 static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
53 				      u8 *active_width)
54 {
55 	if (speed <= 1000) {
56 		*active_width = IB_WIDTH_1X;
57 		*active_speed = IB_SPEED_SDR;
58 	} else if (speed <= 10000) {
59 		*active_width = IB_WIDTH_1X;
60 		*active_speed = IB_SPEED_FDR10;
61 	} else if (speed <= 20000) {
62 		*active_width = IB_WIDTH_4X;
63 		*active_speed = IB_SPEED_DDR;
64 	} else if (speed <= 30000) {
65 		*active_width = IB_WIDTH_4X;
66 		*active_speed = IB_SPEED_QDR;
67 	} else if (speed <= 40000) {
68 		*active_width = IB_WIDTH_4X;
69 		*active_speed = IB_SPEED_FDR10;
70 	} else {
71 		*active_width = IB_WIDTH_4X;
72 		*active_speed = IB_SPEED_EDR;
73 	}
74 }
75 
76 static int rxe_query_port(struct ib_device *dev,
77 			  u8 port_num, struct ib_port_attr *attr)
78 {
79 	struct rxe_dev *rxe = to_rdev(dev);
80 	struct rxe_port *port;
81 	u32 speed;
82 
83 	if (unlikely(port_num != 1)) {
84 		pr_warn("invalid port_number %d\n", port_num);
85 		goto err1;
86 	}
87 
88 	port = &rxe->port;
89 
90 	/* *attr being zeroed by the caller, avoid zeroing it here */
91 	*attr = port->attr;
92 
93 	mutex_lock(&rxe->usdev_lock);
94 	if (rxe->ndev->ethtool_ops->get_link_ksettings) {
95 		struct ethtool_link_ksettings ks;
96 
97 		rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
98 		speed = ks.base.speed;
99 	} else if (rxe->ndev->ethtool_ops->get_settings) {
100 		struct ethtool_cmd cmd;
101 
102 		rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
103 		speed = cmd.speed;
104 	} else {
105 		pr_warn("%s speed is unknown, defaulting to 1000\n",
106 			rxe->ndev->name);
107 		speed = 1000;
108 	}
109 	rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
110 				  &attr->active_width);
111 	mutex_unlock(&rxe->usdev_lock);
112 
113 	return 0;
114 
115 err1:
116 	return -EINVAL;
117 }
118 
119 static int rxe_query_gid(struct ib_device *device,
120 			 u8 port_num, int index, union ib_gid *gid)
121 {
122 	int ret;
123 
124 	if (index > RXE_PORT_GID_TBL_LEN)
125 		return -EINVAL;
126 
127 	ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
128 	if (ret == -EAGAIN) {
129 		memcpy(gid, &zgid, sizeof(*gid));
130 		return 0;
131 	}
132 
133 	return ret;
134 }
135 
136 static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
137 		       index, const union ib_gid *gid,
138 		       const struct ib_gid_attr *attr, void **context)
139 {
140 	if (index >= RXE_PORT_GID_TBL_LEN)
141 		return -EINVAL;
142 	return 0;
143 }
144 
145 static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
146 		       index, void **context)
147 {
148 	if (index >= RXE_PORT_GID_TBL_LEN)
149 		return -EINVAL;
150 	return 0;
151 }
152 
153 static struct net_device *rxe_get_netdev(struct ib_device *device,
154 					 u8 port_num)
155 {
156 	struct rxe_dev *rxe = to_rdev(device);
157 
158 	if (rxe->ndev) {
159 		dev_hold(rxe->ndev);
160 		return rxe->ndev;
161 	}
162 
163 	return NULL;
164 }
165 
166 static int rxe_query_pkey(struct ib_device *device,
167 			  u8 port_num, u16 index, u16 *pkey)
168 {
169 	struct rxe_dev *rxe = to_rdev(device);
170 	struct rxe_port *port;
171 
172 	if (unlikely(port_num != 1)) {
173 		dev_warn(device->dev.parent, "invalid port_num = %d\n",
174 			 port_num);
175 		goto err1;
176 	}
177 
178 	port = &rxe->port;
179 
180 	if (unlikely(index >= port->attr.pkey_tbl_len)) {
181 		dev_warn(device->dev.parent, "invalid index = %d\n",
182 			 index);
183 		goto err1;
184 	}
185 
186 	*pkey = port->pkey_tbl[index];
187 	return 0;
188 
189 err1:
190 	return -EINVAL;
191 }
192 
193 static int rxe_modify_device(struct ib_device *dev,
194 			     int mask, struct ib_device_modify *attr)
195 {
196 	struct rxe_dev *rxe = to_rdev(dev);
197 
198 	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
199 		rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
200 
201 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
202 		memcpy(rxe->ib_dev.node_desc,
203 		       attr->node_desc, sizeof(rxe->ib_dev.node_desc));
204 	}
205 
206 	return 0;
207 }
208 
209 static int rxe_modify_port(struct ib_device *dev,
210 			   u8 port_num, int mask, struct ib_port_modify *attr)
211 {
212 	struct rxe_dev *rxe = to_rdev(dev);
213 	struct rxe_port *port;
214 
215 	if (unlikely(port_num != 1)) {
216 		pr_warn("invalid port_num = %d\n", port_num);
217 		goto err1;
218 	}
219 
220 	port = &rxe->port;
221 
222 	port->attr.port_cap_flags |= attr->set_port_cap_mask;
223 	port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
224 
225 	if (mask & IB_PORT_RESET_QKEY_CNTR)
226 		port->attr.qkey_viol_cntr = 0;
227 
228 	return 0;
229 
230 err1:
231 	return -EINVAL;
232 }
233 
234 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
235 					       u8 port_num)
236 {
237 	struct rxe_dev *rxe = to_rdev(dev);
238 
239 	return rxe_link_layer(rxe, port_num);
240 }
241 
242 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
243 					      struct ib_udata *udata)
244 {
245 	struct rxe_dev *rxe = to_rdev(dev);
246 	struct rxe_ucontext *uc;
247 
248 	uc = rxe_alloc(&rxe->uc_pool);
249 	return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
250 }
251 
252 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
253 {
254 	struct rxe_ucontext *uc = to_ruc(ibuc);
255 
256 	rxe_drop_ref(uc);
257 	return 0;
258 }
259 
260 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
261 			      struct ib_port_immutable *immutable)
262 {
263 	int err;
264 	struct ib_port_attr attr;
265 
266 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
267 
268 	err = ib_query_port(dev, port_num, &attr);
269 	if (err)
270 		return err;
271 
272 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
273 	immutable->gid_tbl_len = attr.gid_tbl_len;
274 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
275 
276 	return 0;
277 }
278 
279 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
280 				  struct ib_ucontext *context,
281 				  struct ib_udata *udata)
282 {
283 	struct rxe_dev *rxe = to_rdev(dev);
284 	struct rxe_pd *pd;
285 
286 	pd = rxe_alloc(&rxe->pd_pool);
287 	return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
288 }
289 
290 static int rxe_dealloc_pd(struct ib_pd *ibpd)
291 {
292 	struct rxe_pd *pd = to_rpd(ibpd);
293 
294 	rxe_drop_ref(pd);
295 	return 0;
296 }
297 
298 static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
299 		       struct rxe_av *av)
300 {
301 	int err;
302 	union ib_gid sgid;
303 	struct ib_gid_attr sgid_attr;
304 
305 	err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
306 				attr->grh.sgid_index, &sgid,
307 				&sgid_attr);
308 	if (err) {
309 		pr_err("Failed to query sgid. err = %d\n", err);
310 		return err;
311 	}
312 
313 	err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
314 	if (!err)
315 		err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
316 
317 	if (sgid_attr.ndev)
318 		dev_put(sgid_attr.ndev);
319 	return err;
320 }
321 
322 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
323 				   struct ib_udata *udata)
324 
325 {
326 	int err;
327 	struct rxe_dev *rxe = to_rdev(ibpd->device);
328 	struct rxe_pd *pd = to_rpd(ibpd);
329 	struct rxe_ah *ah;
330 
331 	err = rxe_av_chk_attr(rxe, attr);
332 	if (err)
333 		goto err1;
334 
335 	ah = rxe_alloc(&rxe->ah_pool);
336 	if (!ah) {
337 		err = -ENOMEM;
338 		goto err1;
339 	}
340 
341 	rxe_add_ref(pd);
342 	ah->pd = pd;
343 
344 	err = rxe_init_av(rxe, attr, &ah->av);
345 	if (err)
346 		goto err2;
347 
348 	return &ah->ibah;
349 
350 err2:
351 	rxe_drop_ref(pd);
352 	rxe_drop_ref(ah);
353 err1:
354 	return ERR_PTR(err);
355 }
356 
357 static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
358 {
359 	int err;
360 	struct rxe_dev *rxe = to_rdev(ibah->device);
361 	struct rxe_ah *ah = to_rah(ibah);
362 
363 	err = rxe_av_chk_attr(rxe, attr);
364 	if (err)
365 		return err;
366 
367 	err = rxe_init_av(rxe, attr, &ah->av);
368 	if (err)
369 		return err;
370 
371 	return 0;
372 }
373 
374 static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
375 {
376 	struct rxe_dev *rxe = to_rdev(ibah->device);
377 	struct rxe_ah *ah = to_rah(ibah);
378 
379 	rxe_av_to_attr(rxe, &ah->av, attr);
380 	return 0;
381 }
382 
383 static int rxe_destroy_ah(struct ib_ah *ibah)
384 {
385 	struct rxe_ah *ah = to_rah(ibah);
386 
387 	rxe_drop_ref(ah->pd);
388 	rxe_drop_ref(ah);
389 	return 0;
390 }
391 
392 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
393 {
394 	int err;
395 	int i;
396 	u32 length;
397 	struct rxe_recv_wqe *recv_wqe;
398 	int num_sge = ibwr->num_sge;
399 
400 	if (unlikely(queue_full(rq->queue))) {
401 		err = -ENOMEM;
402 		goto err1;
403 	}
404 
405 	if (unlikely(num_sge > rq->max_sge)) {
406 		err = -EINVAL;
407 		goto err1;
408 	}
409 
410 	length = 0;
411 	for (i = 0; i < num_sge; i++)
412 		length += ibwr->sg_list[i].length;
413 
414 	recv_wqe = producer_addr(rq->queue);
415 	recv_wqe->wr_id = ibwr->wr_id;
416 	recv_wqe->num_sge = num_sge;
417 
418 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
419 	       num_sge * sizeof(struct ib_sge));
420 
421 	recv_wqe->dma.length		= length;
422 	recv_wqe->dma.resid		= length;
423 	recv_wqe->dma.num_sge		= num_sge;
424 	recv_wqe->dma.cur_sge		= 0;
425 	recv_wqe->dma.sge_offset	= 0;
426 
427 	/* make sure all changes to the work queue are written before we
428 	 * update the producer pointer
429 	 */
430 	smp_wmb();
431 
432 	advance_producer(rq->queue);
433 	return 0;
434 
435 err1:
436 	return err;
437 }
438 
439 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
440 				     struct ib_srq_init_attr *init,
441 				     struct ib_udata *udata)
442 {
443 	int err;
444 	struct rxe_dev *rxe = to_rdev(ibpd->device);
445 	struct rxe_pd *pd = to_rpd(ibpd);
446 	struct rxe_srq *srq;
447 	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
448 
449 	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
450 	if (err)
451 		goto err1;
452 
453 	srq = rxe_alloc(&rxe->srq_pool);
454 	if (!srq) {
455 		err = -ENOMEM;
456 		goto err1;
457 	}
458 
459 	rxe_add_index(srq);
460 	rxe_add_ref(pd);
461 	srq->pd = pd;
462 
463 	err = rxe_srq_from_init(rxe, srq, init, context, udata);
464 	if (err)
465 		goto err2;
466 
467 	return &srq->ibsrq;
468 
469 err2:
470 	rxe_drop_ref(pd);
471 	rxe_drop_index(srq);
472 	rxe_drop_ref(srq);
473 err1:
474 	return ERR_PTR(err);
475 }
476 
477 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
478 			  enum ib_srq_attr_mask mask,
479 			  struct ib_udata *udata)
480 {
481 	int err;
482 	struct rxe_srq *srq = to_rsrq(ibsrq);
483 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
484 
485 	err = rxe_srq_chk_attr(rxe, srq, attr, mask);
486 	if (err)
487 		goto err1;
488 
489 	err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
490 	if (err)
491 		goto err1;
492 
493 	return 0;
494 
495 err1:
496 	return err;
497 }
498 
499 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
500 {
501 	struct rxe_srq *srq = to_rsrq(ibsrq);
502 
503 	if (srq->error)
504 		return -EINVAL;
505 
506 	attr->max_wr = srq->rq.queue->buf->index_mask;
507 	attr->max_sge = srq->rq.max_sge;
508 	attr->srq_limit = srq->limit;
509 	return 0;
510 }
511 
512 static int rxe_destroy_srq(struct ib_srq *ibsrq)
513 {
514 	struct rxe_srq *srq = to_rsrq(ibsrq);
515 
516 	if (srq->rq.queue)
517 		rxe_queue_cleanup(srq->rq.queue);
518 
519 	rxe_drop_ref(srq->pd);
520 	rxe_drop_index(srq);
521 	rxe_drop_ref(srq);
522 
523 	return 0;
524 }
525 
526 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
527 			     struct ib_recv_wr **bad_wr)
528 {
529 	int err = 0;
530 	unsigned long flags;
531 	struct rxe_srq *srq = to_rsrq(ibsrq);
532 
533 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
534 
535 	while (wr) {
536 		err = post_one_recv(&srq->rq, wr);
537 		if (unlikely(err))
538 			break;
539 		wr = wr->next;
540 	}
541 
542 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
543 
544 	if (err)
545 		*bad_wr = wr;
546 
547 	return err;
548 }
549 
550 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
551 				   struct ib_qp_init_attr *init,
552 				   struct ib_udata *udata)
553 {
554 	int err;
555 	struct rxe_dev *rxe = to_rdev(ibpd->device);
556 	struct rxe_pd *pd = to_rpd(ibpd);
557 	struct rxe_qp *qp;
558 
559 	err = rxe_qp_chk_init(rxe, init);
560 	if (err)
561 		goto err1;
562 
563 	qp = rxe_alloc(&rxe->qp_pool);
564 	if (!qp) {
565 		err = -ENOMEM;
566 		goto err1;
567 	}
568 
569 	if (udata) {
570 		if (udata->inlen) {
571 			err = -EINVAL;
572 			goto err2;
573 		}
574 		qp->is_user = 1;
575 	}
576 
577 	rxe_add_index(qp);
578 
579 	err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
580 	if (err)
581 		goto err3;
582 
583 	return &qp->ibqp;
584 
585 err3:
586 	rxe_drop_index(qp);
587 err2:
588 	rxe_drop_ref(qp);
589 err1:
590 	return ERR_PTR(err);
591 }
592 
593 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
594 			 int mask, struct ib_udata *udata)
595 {
596 	int err;
597 	struct rxe_dev *rxe = to_rdev(ibqp->device);
598 	struct rxe_qp *qp = to_rqp(ibqp);
599 
600 	err = rxe_qp_chk_attr(rxe, qp, attr, mask);
601 	if (err)
602 		goto err1;
603 
604 	err = rxe_qp_from_attr(qp, attr, mask, udata);
605 	if (err)
606 		goto err1;
607 
608 	return 0;
609 
610 err1:
611 	return err;
612 }
613 
614 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
615 			int mask, struct ib_qp_init_attr *init)
616 {
617 	struct rxe_qp *qp = to_rqp(ibqp);
618 
619 	rxe_qp_to_init(qp, init);
620 	rxe_qp_to_attr(qp, attr, mask);
621 
622 	return 0;
623 }
624 
625 static int rxe_destroy_qp(struct ib_qp *ibqp)
626 {
627 	struct rxe_qp *qp = to_rqp(ibqp);
628 
629 	rxe_qp_destroy(qp);
630 	rxe_drop_index(qp);
631 	rxe_drop_ref(qp);
632 	return 0;
633 }
634 
635 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
636 			    unsigned int mask, unsigned int length)
637 {
638 	int num_sge = ibwr->num_sge;
639 	struct rxe_sq *sq = &qp->sq;
640 
641 	if (unlikely(num_sge > sq->max_sge))
642 		goto err1;
643 
644 	if (unlikely(mask & WR_ATOMIC_MASK)) {
645 		if (length < 8)
646 			goto err1;
647 
648 		if (atomic_wr(ibwr)->remote_addr & 0x7)
649 			goto err1;
650 	}
651 
652 	if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
653 		     (length > sq->max_inline)))
654 		goto err1;
655 
656 	return 0;
657 
658 err1:
659 	return -EINVAL;
660 }
661 
662 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
663 			 struct ib_send_wr *ibwr)
664 {
665 	wr->wr_id = ibwr->wr_id;
666 	wr->num_sge = ibwr->num_sge;
667 	wr->opcode = ibwr->opcode;
668 	wr->send_flags = ibwr->send_flags;
669 
670 	if (qp_type(qp) == IB_QPT_UD ||
671 	    qp_type(qp) == IB_QPT_SMI ||
672 	    qp_type(qp) == IB_QPT_GSI) {
673 		wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
674 		wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
675 		if (qp_type(qp) == IB_QPT_GSI)
676 			wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
677 		if (wr->opcode == IB_WR_SEND_WITH_IMM)
678 			wr->ex.imm_data = ibwr->ex.imm_data;
679 	} else {
680 		switch (wr->opcode) {
681 		case IB_WR_RDMA_WRITE_WITH_IMM:
682 			wr->ex.imm_data = ibwr->ex.imm_data;
683 		case IB_WR_RDMA_READ:
684 		case IB_WR_RDMA_WRITE:
685 			wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
686 			wr->wr.rdma.rkey	= rdma_wr(ibwr)->rkey;
687 			break;
688 		case IB_WR_SEND_WITH_IMM:
689 			wr->ex.imm_data = ibwr->ex.imm_data;
690 			break;
691 		case IB_WR_SEND_WITH_INV:
692 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
693 			break;
694 		case IB_WR_ATOMIC_CMP_AND_SWP:
695 		case IB_WR_ATOMIC_FETCH_AND_ADD:
696 			wr->wr.atomic.remote_addr =
697 				atomic_wr(ibwr)->remote_addr;
698 			wr->wr.atomic.compare_add =
699 				atomic_wr(ibwr)->compare_add;
700 			wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
701 			wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
702 			break;
703 		case IB_WR_LOCAL_INV:
704 			wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
705 		break;
706 		case IB_WR_REG_MR:
707 			wr->wr.reg.mr = reg_wr(ibwr)->mr;
708 			wr->wr.reg.key = reg_wr(ibwr)->key;
709 			wr->wr.reg.access = reg_wr(ibwr)->access;
710 		break;
711 		default:
712 			break;
713 		}
714 	}
715 }
716 
717 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
718 			 unsigned int mask, unsigned int length,
719 			 struct rxe_send_wqe *wqe)
720 {
721 	int num_sge = ibwr->num_sge;
722 	struct ib_sge *sge;
723 	int i;
724 	u8 *p;
725 
726 	init_send_wr(qp, &wqe->wr, ibwr);
727 
728 	if (qp_type(qp) == IB_QPT_UD ||
729 	    qp_type(qp) == IB_QPT_SMI ||
730 	    qp_type(qp) == IB_QPT_GSI)
731 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
732 
733 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
734 		p = wqe->dma.inline_data;
735 
736 		sge = ibwr->sg_list;
737 		for (i = 0; i < num_sge; i++, sge++) {
738 			if (qp->is_user && copy_from_user(p, (__user void *)
739 					    (uintptr_t)sge->addr, sge->length))
740 				return -EFAULT;
741 
742 			else if (!qp->is_user)
743 				memcpy(p, (void *)(uintptr_t)sge->addr,
744 				       sge->length);
745 
746 			p += sge->length;
747 		}
748 	} else if (mask & WR_REG_MASK) {
749 		wqe->mask = mask;
750 		wqe->state = wqe_state_posted;
751 		return 0;
752 	} else
753 		memcpy(wqe->dma.sge, ibwr->sg_list,
754 		       num_sge * sizeof(struct ib_sge));
755 
756 	wqe->iova		= (mask & WR_ATOMIC_MASK) ?
757 					atomic_wr(ibwr)->remote_addr :
758 					rdma_wr(ibwr)->remote_addr;
759 	wqe->mask		= mask;
760 	wqe->dma.length		= length;
761 	wqe->dma.resid		= length;
762 	wqe->dma.num_sge	= num_sge;
763 	wqe->dma.cur_sge	= 0;
764 	wqe->dma.sge_offset	= 0;
765 	wqe->state		= wqe_state_posted;
766 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
767 
768 	return 0;
769 }
770 
771 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
772 			 unsigned int mask, u32 length)
773 {
774 	int err;
775 	struct rxe_sq *sq = &qp->sq;
776 	struct rxe_send_wqe *send_wqe;
777 	unsigned long flags;
778 
779 	err = validate_send_wr(qp, ibwr, mask, length);
780 	if (err)
781 		return err;
782 
783 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
784 
785 	if (unlikely(queue_full(sq->queue))) {
786 		err = -ENOMEM;
787 		goto err1;
788 	}
789 
790 	send_wqe = producer_addr(sq->queue);
791 
792 	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
793 	if (unlikely(err))
794 		goto err1;
795 
796 	/*
797 	 * make sure all changes to the work queue are
798 	 * written before we update the producer pointer
799 	 */
800 	smp_wmb();
801 
802 	advance_producer(sq->queue);
803 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
804 
805 	return 0;
806 
807 err1:
808 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
809 	return err;
810 }
811 
812 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
813 				struct ib_send_wr **bad_wr)
814 {
815 	int err = 0;
816 	unsigned int mask;
817 	unsigned int length = 0;
818 	int i;
819 	int must_sched;
820 
821 	while (wr) {
822 		mask = wr_opcode_mask(wr->opcode, qp);
823 		if (unlikely(!mask)) {
824 			err = -EINVAL;
825 			*bad_wr = wr;
826 			break;
827 		}
828 
829 		if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
830 			     !(mask & WR_INLINE_MASK))) {
831 			err = -EINVAL;
832 			*bad_wr = wr;
833 			break;
834 		}
835 
836 		length = 0;
837 		for (i = 0; i < wr->num_sge; i++)
838 			length += wr->sg_list[i].length;
839 
840 		err = post_one_send(qp, wr, mask, length);
841 
842 		if (err) {
843 			*bad_wr = wr;
844 			break;
845 		}
846 		wr = wr->next;
847 	}
848 
849 	/*
850 	 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
851 	 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
852 	 */
853 	must_sched = (qp_type(qp) == IB_QPT_GSI) ||
854 			(queue_count(qp->sq.queue) > 1);
855 
856 	rxe_run_task(&qp->req.task, must_sched);
857 
858 	return err;
859 }
860 
861 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
862 			 struct ib_send_wr **bad_wr)
863 {
864 	struct rxe_qp *qp = to_rqp(ibqp);
865 
866 	if (unlikely(!qp->valid)) {
867 		*bad_wr = wr;
868 		return -EINVAL;
869 	}
870 
871 	if (unlikely(qp->req.state < QP_STATE_READY)) {
872 		*bad_wr = wr;
873 		return -EINVAL;
874 	}
875 
876 	if (qp->is_user) {
877 		/* Utilize process context to do protocol processing */
878 		rxe_run_task(&qp->req.task, 0);
879 		return 0;
880 	} else
881 		return rxe_post_send_kernel(qp, wr, bad_wr);
882 }
883 
884 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
885 			 struct ib_recv_wr **bad_wr)
886 {
887 	int err = 0;
888 	struct rxe_qp *qp = to_rqp(ibqp);
889 	struct rxe_rq *rq = &qp->rq;
890 	unsigned long flags;
891 
892 	if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
893 		*bad_wr = wr;
894 		err = -EINVAL;
895 		goto err1;
896 	}
897 
898 	if (unlikely(qp->srq)) {
899 		*bad_wr = wr;
900 		err = -EINVAL;
901 		goto err1;
902 	}
903 
904 	spin_lock_irqsave(&rq->producer_lock, flags);
905 
906 	while (wr) {
907 		err = post_one_recv(rq, wr);
908 		if (unlikely(err)) {
909 			*bad_wr = wr;
910 			break;
911 		}
912 		wr = wr->next;
913 	}
914 
915 	spin_unlock_irqrestore(&rq->producer_lock, flags);
916 
917 err1:
918 	return err;
919 }
920 
921 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
922 				   const struct ib_cq_init_attr *attr,
923 				   struct ib_ucontext *context,
924 				   struct ib_udata *udata)
925 {
926 	int err;
927 	struct rxe_dev *rxe = to_rdev(dev);
928 	struct rxe_cq *cq;
929 
930 	if (attr->flags)
931 		return ERR_PTR(-EINVAL);
932 
933 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
934 	if (err)
935 		goto err1;
936 
937 	cq = rxe_alloc(&rxe->cq_pool);
938 	if (!cq) {
939 		err = -ENOMEM;
940 		goto err1;
941 	}
942 
943 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
944 			       context, udata);
945 	if (err)
946 		goto err2;
947 
948 	return &cq->ibcq;
949 
950 err2:
951 	rxe_drop_ref(cq);
952 err1:
953 	return ERR_PTR(err);
954 }
955 
956 static int rxe_destroy_cq(struct ib_cq *ibcq)
957 {
958 	struct rxe_cq *cq = to_rcq(ibcq);
959 
960 	rxe_drop_ref(cq);
961 	return 0;
962 }
963 
964 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
965 {
966 	int err;
967 	struct rxe_cq *cq = to_rcq(ibcq);
968 	struct rxe_dev *rxe = to_rdev(ibcq->device);
969 
970 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
971 	if (err)
972 		goto err1;
973 
974 	err = rxe_cq_resize_queue(cq, cqe, udata);
975 	if (err)
976 		goto err1;
977 
978 	return 0;
979 
980 err1:
981 	return err;
982 }
983 
984 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
985 {
986 	int i;
987 	struct rxe_cq *cq = to_rcq(ibcq);
988 	struct rxe_cqe *cqe;
989 	unsigned long flags;
990 
991 	spin_lock_irqsave(&cq->cq_lock, flags);
992 	for (i = 0; i < num_entries; i++) {
993 		cqe = queue_head(cq->queue);
994 		if (!cqe)
995 			break;
996 
997 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
998 		advance_consumer(cq->queue);
999 	}
1000 	spin_unlock_irqrestore(&cq->cq_lock, flags);
1001 
1002 	return i;
1003 }
1004 
1005 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1006 {
1007 	struct rxe_cq *cq = to_rcq(ibcq);
1008 	int count = queue_count(cq->queue);
1009 
1010 	return (count > wc_cnt) ? wc_cnt : count;
1011 }
1012 
1013 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1014 {
1015 	struct rxe_cq *cq = to_rcq(ibcq);
1016 	unsigned long irq_flags;
1017 	int ret = 0;
1018 
1019 	spin_lock_irqsave(&cq->cq_lock, irq_flags);
1020 	if (cq->notify != IB_CQ_NEXT_COMP)
1021 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
1022 
1023 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1024 		ret = 1;
1025 
1026 	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1027 
1028 	return ret;
1029 }
1030 
1031 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1032 {
1033 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1034 	struct rxe_pd *pd = to_rpd(ibpd);
1035 	struct rxe_mem *mr;
1036 	int err;
1037 
1038 	mr = rxe_alloc(&rxe->mr_pool);
1039 	if (!mr) {
1040 		err = -ENOMEM;
1041 		goto err1;
1042 	}
1043 
1044 	rxe_add_index(mr);
1045 
1046 	rxe_add_ref(pd);
1047 
1048 	err = rxe_mem_init_dma(rxe, pd, access, mr);
1049 	if (err)
1050 		goto err2;
1051 
1052 	return &mr->ibmr;
1053 
1054 err2:
1055 	rxe_drop_ref(pd);
1056 	rxe_drop_index(mr);
1057 	rxe_drop_ref(mr);
1058 err1:
1059 	return ERR_PTR(err);
1060 }
1061 
1062 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1063 				     u64 start,
1064 				     u64 length,
1065 				     u64 iova,
1066 				     int access, struct ib_udata *udata)
1067 {
1068 	int err;
1069 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1070 	struct rxe_pd *pd = to_rpd(ibpd);
1071 	struct rxe_mem *mr;
1072 
1073 	mr = rxe_alloc(&rxe->mr_pool);
1074 	if (!mr) {
1075 		err = -ENOMEM;
1076 		goto err2;
1077 	}
1078 
1079 	rxe_add_index(mr);
1080 
1081 	rxe_add_ref(pd);
1082 
1083 	err = rxe_mem_init_user(rxe, pd, start, length, iova,
1084 				access, udata, mr);
1085 	if (err)
1086 		goto err3;
1087 
1088 	return &mr->ibmr;
1089 
1090 err3:
1091 	rxe_drop_ref(pd);
1092 	rxe_drop_index(mr);
1093 	rxe_drop_ref(mr);
1094 err2:
1095 	return ERR_PTR(err);
1096 }
1097 
1098 static int rxe_dereg_mr(struct ib_mr *ibmr)
1099 {
1100 	struct rxe_mem *mr = to_rmr(ibmr);
1101 
1102 	mr->state = RXE_MEM_STATE_ZOMBIE;
1103 	rxe_drop_ref(mr->pd);
1104 	rxe_drop_index(mr);
1105 	rxe_drop_ref(mr);
1106 	return 0;
1107 }
1108 
1109 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1110 				  enum ib_mr_type mr_type,
1111 				  u32 max_num_sg)
1112 {
1113 	struct rxe_dev *rxe = to_rdev(ibpd->device);
1114 	struct rxe_pd *pd = to_rpd(ibpd);
1115 	struct rxe_mem *mr;
1116 	int err;
1117 
1118 	if (mr_type != IB_MR_TYPE_MEM_REG)
1119 		return ERR_PTR(-EINVAL);
1120 
1121 	mr = rxe_alloc(&rxe->mr_pool);
1122 	if (!mr) {
1123 		err = -ENOMEM;
1124 		goto err1;
1125 	}
1126 
1127 	rxe_add_index(mr);
1128 
1129 	rxe_add_ref(pd);
1130 
1131 	err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1132 	if (err)
1133 		goto err2;
1134 
1135 	return &mr->ibmr;
1136 
1137 err2:
1138 	rxe_drop_ref(pd);
1139 	rxe_drop_index(mr);
1140 	rxe_drop_ref(mr);
1141 err1:
1142 	return ERR_PTR(err);
1143 }
1144 
1145 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1146 {
1147 	struct rxe_mem *mr = to_rmr(ibmr);
1148 	struct rxe_map *map;
1149 	struct rxe_phys_buf *buf;
1150 
1151 	if (unlikely(mr->nbuf == mr->num_buf))
1152 		return -ENOMEM;
1153 
1154 	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1155 	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1156 
1157 	buf->addr = addr;
1158 	buf->size = ibmr->page_size;
1159 	mr->nbuf++;
1160 
1161 	return 0;
1162 }
1163 
1164 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1165 			 int sg_nents, unsigned int *sg_offset)
1166 {
1167 	struct rxe_mem *mr = to_rmr(ibmr);
1168 	int n;
1169 
1170 	mr->nbuf = 0;
1171 
1172 	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1173 
1174 	mr->va = ibmr->iova;
1175 	mr->iova = ibmr->iova;
1176 	mr->length = ibmr->length;
1177 	mr->page_shift = ilog2(ibmr->page_size);
1178 	mr->page_mask = ibmr->page_size - 1;
1179 	mr->offset = mr->iova & mr->page_mask;
1180 
1181 	return n;
1182 }
1183 
1184 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1185 {
1186 	int err;
1187 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1188 	struct rxe_qp *qp = to_rqp(ibqp);
1189 	struct rxe_mc_grp *grp;
1190 
1191 	/* takes a ref on grp if successful */
1192 	err = rxe_mcast_get_grp(rxe, mgid, &grp);
1193 	if (err)
1194 		return err;
1195 
1196 	err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1197 
1198 	rxe_drop_ref(grp);
1199 	return err;
1200 }
1201 
1202 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1203 {
1204 	struct rxe_dev *rxe = to_rdev(ibqp->device);
1205 	struct rxe_qp *qp = to_rqp(ibqp);
1206 
1207 	return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1208 }
1209 
1210 static ssize_t rxe_show_parent(struct device *device,
1211 			       struct device_attribute *attr, char *buf)
1212 {
1213 	struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1214 					   ib_dev.dev);
1215 
1216 	return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1217 }
1218 
1219 static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1220 
1221 static struct device_attribute *rxe_dev_attributes[] = {
1222 	&dev_attr_parent,
1223 };
1224 
1225 int rxe_register_device(struct rxe_dev *rxe)
1226 {
1227 	int err;
1228 	int i;
1229 	struct ib_device *dev = &rxe->ib_dev;
1230 
1231 	strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1232 	strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1233 
1234 	dev->owner = THIS_MODULE;
1235 	dev->node_type = RDMA_NODE_IB_CA;
1236 	dev->phys_port_cnt = 1;
1237 	dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
1238 	dev->dev.parent = rxe_dma_device(rxe);
1239 	dev->local_dma_lkey = 0;
1240 	dev->node_guid = rxe_node_guid(rxe);
1241 	dev->dev.dma_ops = &dma_virt_ops;
1242 
1243 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1244 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1245 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1246 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1247 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1248 	    | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1249 	    | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1250 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1251 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1252 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1253 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1254 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1255 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1256 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1257 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1258 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1259 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1260 	    | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1261 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1262 	    | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1263 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1264 	    | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1265 	    | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1266 	    | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1267 	    | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1268 	    | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1269 	    | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1270 	    | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1271 	    | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1272 	    | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1273 	    | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1274 	    | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1275 	    ;
1276 
1277 	dev->query_device = rxe_query_device;
1278 	dev->modify_device = rxe_modify_device;
1279 	dev->query_port = rxe_query_port;
1280 	dev->modify_port = rxe_modify_port;
1281 	dev->get_link_layer = rxe_get_link_layer;
1282 	dev->query_gid = rxe_query_gid;
1283 	dev->get_netdev = rxe_get_netdev;
1284 	dev->add_gid = rxe_add_gid;
1285 	dev->del_gid = rxe_del_gid;
1286 	dev->query_pkey = rxe_query_pkey;
1287 	dev->alloc_ucontext = rxe_alloc_ucontext;
1288 	dev->dealloc_ucontext = rxe_dealloc_ucontext;
1289 	dev->mmap = rxe_mmap;
1290 	dev->get_port_immutable = rxe_port_immutable;
1291 	dev->alloc_pd = rxe_alloc_pd;
1292 	dev->dealloc_pd = rxe_dealloc_pd;
1293 	dev->create_ah = rxe_create_ah;
1294 	dev->modify_ah = rxe_modify_ah;
1295 	dev->query_ah = rxe_query_ah;
1296 	dev->destroy_ah = rxe_destroy_ah;
1297 	dev->create_srq = rxe_create_srq;
1298 	dev->modify_srq = rxe_modify_srq;
1299 	dev->query_srq = rxe_query_srq;
1300 	dev->destroy_srq = rxe_destroy_srq;
1301 	dev->post_srq_recv = rxe_post_srq_recv;
1302 	dev->create_qp = rxe_create_qp;
1303 	dev->modify_qp = rxe_modify_qp;
1304 	dev->query_qp = rxe_query_qp;
1305 	dev->destroy_qp = rxe_destroy_qp;
1306 	dev->post_send = rxe_post_send;
1307 	dev->post_recv = rxe_post_recv;
1308 	dev->create_cq = rxe_create_cq;
1309 	dev->destroy_cq = rxe_destroy_cq;
1310 	dev->resize_cq = rxe_resize_cq;
1311 	dev->poll_cq = rxe_poll_cq;
1312 	dev->peek_cq = rxe_peek_cq;
1313 	dev->req_notify_cq = rxe_req_notify_cq;
1314 	dev->get_dma_mr = rxe_get_dma_mr;
1315 	dev->reg_user_mr = rxe_reg_user_mr;
1316 	dev->dereg_mr = rxe_dereg_mr;
1317 	dev->alloc_mr = rxe_alloc_mr;
1318 	dev->map_mr_sg = rxe_map_mr_sg;
1319 	dev->attach_mcast = rxe_attach_mcast;
1320 	dev->detach_mcast = rxe_detach_mcast;
1321 
1322 	err = ib_register_device(dev, NULL);
1323 	if (err) {
1324 		pr_warn("rxe_register_device failed, err = %d\n", err);
1325 		goto err1;
1326 	}
1327 
1328 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1329 		err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1330 		if (err) {
1331 			pr_warn("device_create_file failed, i = %d, err = %d\n",
1332 				i, err);
1333 			goto err2;
1334 		}
1335 	}
1336 
1337 	return 0;
1338 
1339 err2:
1340 	ib_unregister_device(dev);
1341 err1:
1342 	return err;
1343 }
1344 
1345 int rxe_unregister_device(struct rxe_dev *rxe)
1346 {
1347 	int i;
1348 	struct ib_device *dev = &rxe->ib_dev;
1349 
1350 	for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1351 		device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1352 
1353 	ib_unregister_device(dev);
1354 
1355 	return 0;
1356 }
1357