1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "rxe.h"
35 #include "rxe_loc.h"
36 #include "rxe_queue.h"
37 
38 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
39 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
40 {
41 	if (srq && srq->error) {
42 		pr_warn("srq in error state\n");
43 		goto err1;
44 	}
45 
46 	if (mask & IB_SRQ_MAX_WR) {
47 		if (attr->max_wr > rxe->attr.max_srq_wr) {
48 			pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
49 				attr->max_wr, rxe->attr.max_srq_wr);
50 			goto err1;
51 		}
52 
53 		if (attr->max_wr <= 0) {
54 			pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
55 			goto err1;
56 		}
57 
58 		if (srq && srq->limit && (attr->max_wr < srq->limit)) {
59 			pr_warn("max_wr (%d) < srq->limit (%d)\n",
60 				attr->max_wr, srq->limit);
61 			goto err1;
62 		}
63 
64 		if (attr->max_wr < RXE_MIN_SRQ_WR)
65 			attr->max_wr = RXE_MIN_SRQ_WR;
66 	}
67 
68 	if (mask & IB_SRQ_LIMIT) {
69 		if (attr->srq_limit > rxe->attr.max_srq_wr) {
70 			pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
71 				attr->srq_limit, rxe->attr.max_srq_wr);
72 			goto err1;
73 		}
74 
75 		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
76 			pr_warn("srq_limit (%d) > cur limit(%d)\n",
77 				attr->srq_limit,
78 				 srq->rq.queue->buf->index_mask);
79 			goto err1;
80 		}
81 	}
82 
83 	if (mask == IB_SRQ_INIT_MASK) {
84 		if (attr->max_sge > rxe->attr.max_srq_sge) {
85 			pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
86 				attr->max_sge, rxe->attr.max_srq_sge);
87 			goto err1;
88 		}
89 
90 		if (attr->max_sge < RXE_MIN_SRQ_SGE)
91 			attr->max_sge = RXE_MIN_SRQ_SGE;
92 	}
93 
94 	return 0;
95 
96 err1:
97 	return -EINVAL;
98 }
99 
100 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
101 		      struct ib_srq_init_attr *init,
102 		      struct ib_ucontext *context, struct ib_udata *udata)
103 {
104 	int err;
105 	int srq_wqe_size;
106 	struct rxe_queue *q;
107 
108 	srq->ibsrq.event_handler	= init->event_handler;
109 	srq->ibsrq.srq_context		= init->srq_context;
110 	srq->limit		= init->attr.srq_limit;
111 	srq->srq_num		= srq->pelem.index;
112 	srq->rq.max_wr		= init->attr.max_wr;
113 	srq->rq.max_sge		= init->attr.max_sge;
114 
115 	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
116 
117 	spin_lock_init(&srq->rq.producer_lock);
118 	spin_lock_init(&srq->rq.consumer_lock);
119 
120 	q = rxe_queue_init(rxe, &srq->rq.max_wr,
121 			   srq_wqe_size);
122 	if (!q) {
123 		pr_warn("unable to allocate queue for srq\n");
124 		return -ENOMEM;
125 	}
126 
127 	srq->rq.queue = q;
128 
129 	err = do_mmap_info(rxe, udata, false, context, q->buf,
130 			   q->buf_size, &q->ip);
131 	if (err)
132 		return err;
133 
134 	if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) {
135 		if (copy_to_user(udata->outbuf + sizeof(struct mminfo),
136 				 &srq->srq_num, sizeof(u32)))
137 			return -EFAULT;
138 	}
139 	return 0;
140 }
141 
142 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
143 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
144 		      struct ib_udata *udata)
145 {
146 	int err;
147 	struct rxe_queue *q = srq->rq.queue;
148 	struct mminfo mi = { .offset = 1, .size = 0};
149 
150 	if (mask & IB_SRQ_MAX_WR) {
151 		/* Check that we can write the mminfo struct to user space */
152 		if (udata && udata->inlen >= sizeof(__u64)) {
153 			__u64 mi_addr;
154 
155 			/* Get address of user space mminfo struct */
156 			err = ib_copy_from_udata(&mi_addr, udata,
157 						 sizeof(mi_addr));
158 			if (err)
159 				goto err1;
160 
161 			udata->outbuf = (void __user *)(unsigned long)mi_addr;
162 			udata->outlen = sizeof(mi);
163 
164 			if (!access_ok(VERIFY_WRITE,
165 				       (void __user *)udata->outbuf,
166 					udata->outlen)) {
167 				err = -EFAULT;
168 				goto err1;
169 			}
170 		}
171 
172 		err = rxe_queue_resize(q, &attr->max_wr,
173 				       rcv_wqe_size(srq->rq.max_sge),
174 				       srq->rq.queue->ip ?
175 						srq->rq.queue->ip->context :
176 						NULL,
177 				       udata, &srq->rq.producer_lock,
178 				       &srq->rq.consumer_lock);
179 		if (err)
180 			goto err2;
181 	}
182 
183 	if (mask & IB_SRQ_LIMIT)
184 		srq->limit = attr->srq_limit;
185 
186 	return 0;
187 
188 err2:
189 	rxe_queue_cleanup(q);
190 	srq->rq.queue = NULL;
191 err1:
192 	return err;
193 }
194