1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/mlx5/driver.h>
8 #include <linux/mlx5/cmd.h>
9 #include "mlx5_ib.h"
10 #include "srq.h"
11 
12 static int get_pas_size(struct mlx5_srq_attr *in)
13 {
14 	u32 log_page_size = in->log_page_size + 12;
15 	u32 log_srq_size  = in->log_size;
16 	u32 log_rq_stride = in->wqe_shift;
17 	u32 page_offset   = in->page_offset;
18 	u32 po_quanta	  = 1 << (log_page_size - 6);
19 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
20 	u32 page_size	  = 1 << log_page_size;
21 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
22 	u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
23 
24 	return rq_num_pas * sizeof(u64);
25 }
26 
27 static void set_wq(void *wq, struct mlx5_srq_attr *in)
28 {
29 	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
30 		 & MLX5_SRQ_FLAG_WQ_SIG));
31 	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
32 	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
33 	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
34 	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
35 	MLX5_SET(wq,   wq, lwm,		  in->lwm);
36 	MLX5_SET(wq,   wq, pd,		  in->pd);
37 	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
38 }
39 
40 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
41 {
42 	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
43 		 & MLX5_SRQ_FLAG_WQ_SIG));
44 	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
45 	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
46 	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
47 	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
48 	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
49 	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
50 	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
51 	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
52 	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
53 }
54 
55 static void get_wq(void *wq, struct mlx5_srq_attr *in)
56 {
57 	if (MLX5_GET(wq, wq, wq_signature))
58 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
59 	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
60 	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
61 	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
62 	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
63 	in->lwm		  = MLX5_GET(wq,   wq, lwm);
64 	in->pd		  = MLX5_GET(wq,   wq, pd);
65 	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
66 }
67 
68 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
69 {
70 	if (MLX5_GET(srqc, srqc, wq_signature))
71 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
72 	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
73 	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
74 	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
75 	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
76 	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
77 	in->pd		  = MLX5_GET(srqc,   srqc, pd);
78 	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
79 }
80 
81 struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
82 {
83 	struct mlx5_srq_table *table = &dev->srq_table;
84 	struct mlx5_core_srq *srq;
85 
86 	spin_lock(&table->lock);
87 
88 	srq = radix_tree_lookup(&table->tree, srqn);
89 	if (srq)
90 		atomic_inc(&srq->refcount);
91 
92 	spin_unlock(&table->lock);
93 
94 	return srq;
95 }
96 
97 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
98 			  struct mlx5_srq_attr *in)
99 {
100 	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
101 	void *create_in;
102 	void *srqc;
103 	void *pas;
104 	int pas_size;
105 	int inlen;
106 	int err;
107 
108 	pas_size  = get_pas_size(in);
109 	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
110 	create_in = kvzalloc(inlen, GFP_KERNEL);
111 	if (!create_in)
112 		return -ENOMEM;
113 
114 	MLX5_SET(create_srq_in, create_in, uid, in->uid);
115 	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
116 	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
117 
118 	set_srqc(srqc, in);
119 	memcpy(pas, in->pas, pas_size);
120 
121 	MLX5_SET(create_srq_in, create_in, opcode,
122 		 MLX5_CMD_OP_CREATE_SRQ);
123 
124 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
125 			    sizeof(create_out));
126 	kvfree(create_in);
127 	if (!err) {
128 		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
129 		srq->uid = in->uid;
130 	}
131 
132 	return err;
133 }
134 
135 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
136 {
137 	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
138 	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
139 
140 	MLX5_SET(destroy_srq_in, srq_in, opcode,
141 		 MLX5_CMD_OP_DESTROY_SRQ);
142 	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
143 	MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
144 
145 	return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
146 			     sizeof(srq_out));
147 }
148 
149 static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
150 		       u16 lwm, int is_srq)
151 {
152 	u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
153 	u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
154 
155 	MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
156 	MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
157 	MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
158 	MLX5_SET(arm_rq_in, srq_in, lwm,      lwm);
159 	MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
160 
161 	return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
162 			     sizeof(srq_out));
163 }
164 
165 static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
166 			 struct mlx5_srq_attr *out)
167 {
168 	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
169 	u32 *srq_out;
170 	void *srqc;
171 	int err;
172 
173 	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
174 	if (!srq_out)
175 		return -ENOMEM;
176 
177 	MLX5_SET(query_srq_in, srq_in, opcode,
178 		 MLX5_CMD_OP_QUERY_SRQ);
179 	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
180 	err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
181 			    MLX5_ST_SZ_BYTES(query_srq_out));
182 	if (err)
183 		goto out;
184 
185 	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
186 	get_srqc(srqc, out);
187 	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
188 		out->flags |= MLX5_SRQ_FLAG_ERR;
189 out:
190 	kvfree(srq_out);
191 	return err;
192 }
193 
194 static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
195 			      struct mlx5_core_srq *srq,
196 			      struct mlx5_srq_attr *in)
197 {
198 	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
199 	void *create_in;
200 	void *xrc_srqc;
201 	void *pas;
202 	int pas_size;
203 	int inlen;
204 	int err;
205 
206 	pas_size  = get_pas_size(in);
207 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
208 	create_in = kvzalloc(inlen, GFP_KERNEL);
209 	if (!create_in)
210 		return -ENOMEM;
211 
212 	MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
213 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
214 				xrc_srq_context_entry);
215 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
216 
217 	set_srqc(xrc_srqc, in);
218 	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
219 	memcpy(pas, in->pas, pas_size);
220 	MLX5_SET(create_xrc_srq_in, create_in, opcode,
221 		 MLX5_CMD_OP_CREATE_XRC_SRQ);
222 
223 	memset(create_out, 0, sizeof(create_out));
224 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
225 			    sizeof(create_out));
226 	if (err)
227 		goto out;
228 
229 	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
230 	srq->uid = in->uid;
231 out:
232 	kvfree(create_in);
233 	return err;
234 }
235 
236 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
237 			       struct mlx5_core_srq *srq)
238 {
239 	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
240 	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
241 
242 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
243 		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
244 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
245 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
246 
247 	return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
248 			     xrcsrq_out, sizeof(xrcsrq_out));
249 }
250 
251 static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
252 			   u16 lwm)
253 {
254 	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
255 	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
256 
257 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
258 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
259 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
260 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
261 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
262 
263 	return  mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
264 			      xrcsrq_out, sizeof(xrcsrq_out));
265 }
266 
267 static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
268 			     struct mlx5_core_srq *srq,
269 			     struct mlx5_srq_attr *out)
270 {
271 	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
272 	u32 *xrcsrq_out;
273 	void *xrc_srqc;
274 	int err;
275 
276 	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
277 	if (!xrcsrq_out)
278 		return -ENOMEM;
279 	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
280 
281 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
282 		 MLX5_CMD_OP_QUERY_XRC_SRQ);
283 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
284 
285 	err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
286 			    xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
287 	if (err)
288 		goto out;
289 
290 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
291 				xrc_srq_context_entry);
292 	get_srqc(xrc_srqc, out);
293 	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
294 		out->flags |= MLX5_SRQ_FLAG_ERR;
295 
296 out:
297 	kvfree(xrcsrq_out);
298 	return err;
299 }
300 
301 static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
302 			  struct mlx5_srq_attr *in)
303 {
304 	void *create_out = NULL;
305 	void *create_in = NULL;
306 	void *rmpc;
307 	void *wq;
308 	int pas_size;
309 	int outlen;
310 	int inlen;
311 	int err;
312 
313 	pas_size = get_pas_size(in);
314 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
315 	outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
316 	create_in = kvzalloc(inlen, GFP_KERNEL);
317 	create_out = kvzalloc(outlen, GFP_KERNEL);
318 	if (!create_in || !create_out) {
319 		err = -ENOMEM;
320 		goto out;
321 	}
322 
323 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
324 	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
325 
326 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
327 	MLX5_SET(create_rmp_in, create_in, uid, in->uid);
328 	set_wq(wq, in);
329 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
330 
331 	MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
332 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
333 	if (!err) {
334 		srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
335 		srq->uid = in->uid;
336 	}
337 
338 out:
339 	kvfree(create_in);
340 	kvfree(create_out);
341 	return err;
342 }
343 
344 static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
345 {
346 	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]   = {};
347 	u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
348 
349 	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
350 	MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
351 	MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
352 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
353 }
354 
355 static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
356 		       u16 lwm)
357 {
358 	void *out = NULL;
359 	void *in = NULL;
360 	void *rmpc;
361 	void *wq;
362 	void *bitmask;
363 	int outlen;
364 	int inlen;
365 	int err;
366 
367 	inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
368 	outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
369 
370 	in = kvzalloc(inlen, GFP_KERNEL);
371 	out = kvzalloc(outlen, GFP_KERNEL);
372 	if (!in || !out) {
373 		err = -ENOMEM;
374 		goto out;
375 	}
376 
377 	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
378 	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
379 	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
380 
381 	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
382 	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
383 	MLX5_SET(modify_rmp_in, in, uid, srq->uid);
384 	MLX5_SET(wq,		wq,	 lwm,	    lwm);
385 	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
386 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
387 	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
388 
389 	err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
390 
391 out:
392 	kvfree(in);
393 	kvfree(out);
394 	return err;
395 }
396 
397 static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
398 			 struct mlx5_srq_attr *out)
399 {
400 	u32 *rmp_out = NULL;
401 	u32 *rmp_in = NULL;
402 	void *rmpc;
403 	int outlen;
404 	int inlen;
405 	int err;
406 
407 	outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
408 	inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
409 
410 	rmp_out = kvzalloc(outlen, GFP_KERNEL);
411 	rmp_in = kvzalloc(inlen, GFP_KERNEL);
412 	if (!rmp_out || !rmp_in) {
413 		err = -ENOMEM;
414 		goto out;
415 	}
416 
417 	MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
418 	MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
419 	err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
420 	if (err)
421 		goto out;
422 
423 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
424 	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
425 	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
426 		out->flags |= MLX5_SRQ_FLAG_ERR;
427 
428 out:
429 	kvfree(rmp_out);
430 	kvfree(rmp_in);
431 	return err;
432 }
433 
434 static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
435 			  struct mlx5_srq_attr *in)
436 {
437 	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
438 	void *create_in;
439 	void *xrqc;
440 	void *wq;
441 	int pas_size;
442 	int inlen;
443 	int err;
444 
445 	pas_size = get_pas_size(in);
446 	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
447 	create_in = kvzalloc(inlen, GFP_KERNEL);
448 	if (!create_in)
449 		return -ENOMEM;
450 
451 	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
452 	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
453 
454 	set_wq(wq, in);
455 	memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
456 
457 	if (in->type == IB_SRQT_TM) {
458 		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
459 		if (in->flags & MLX5_SRQ_FLAG_RNDV)
460 			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
461 		MLX5_SET(xrqc, xrqc,
462 			 tag_matching_topology_context.log_matching_list_sz,
463 			 in->tm_log_list_size);
464 	}
465 	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
466 	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
467 	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
468 	MLX5_SET(create_xrq_in, create_in, uid, in->uid);
469 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
470 			    sizeof(create_out));
471 	kvfree(create_in);
472 	if (!err) {
473 		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
474 		srq->uid = in->uid;
475 	}
476 
477 	return err;
478 }
479 
480 static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
481 {
482 	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
483 	u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
484 
485 	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
486 	MLX5_SET(destroy_xrq_in, in, xrqn,   srq->srqn);
487 	MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
488 
489 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
490 }
491 
492 static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
493 		       struct mlx5_core_srq *srq,
494 		       u16 lwm)
495 {
496 	u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
497 	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
498 
499 	MLX5_SET(arm_rq_in, in, opcode,     MLX5_CMD_OP_ARM_RQ);
500 	MLX5_SET(arm_rq_in, in, op_mod,     MLX5_ARM_RQ_IN_OP_MOD_XRQ);
501 	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
502 	MLX5_SET(arm_rq_in, in, lwm,	    lwm);
503 	MLX5_SET(arm_rq_in, in, uid, srq->uid);
504 
505 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
506 }
507 
508 static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
509 			 struct mlx5_srq_attr *out)
510 {
511 	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
512 	u32 *xrq_out;
513 	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
514 	void *xrqc;
515 	int err;
516 
517 	xrq_out = kvzalloc(outlen, GFP_KERNEL);
518 	if (!xrq_out)
519 		return -ENOMEM;
520 
521 	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
522 	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
523 
524 	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
525 	if (err)
526 		goto out;
527 
528 	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
529 	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
530 	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
531 		out->flags |= MLX5_SRQ_FLAG_ERR;
532 	out->tm_next_tag =
533 		MLX5_GET(xrqc, xrqc,
534 			 tag_matching_topology_context.append_next_index);
535 	out->tm_hw_phase_cnt =
536 		MLX5_GET(xrqc, xrqc,
537 			 tag_matching_topology_context.hw_phase_cnt);
538 	out->tm_sw_phase_cnt =
539 		MLX5_GET(xrqc, xrqc,
540 			 tag_matching_topology_context.sw_phase_cnt);
541 
542 out:
543 	kvfree(xrq_out);
544 	return err;
545 }
546 
547 static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
548 			    struct mlx5_srq_attr *in)
549 {
550 	if (!dev->mdev->issi)
551 		return create_srq_cmd(dev, srq, in);
552 	switch (srq->common.res) {
553 	case MLX5_RES_XSRQ:
554 		return create_xrc_srq_cmd(dev, srq, in);
555 	case MLX5_RES_XRQ:
556 		return create_xrq_cmd(dev, srq, in);
557 	default:
558 		return create_rmp_cmd(dev, srq, in);
559 	}
560 }
561 
562 static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
563 {
564 	if (!dev->mdev->issi)
565 		return destroy_srq_cmd(dev, srq);
566 	switch (srq->common.res) {
567 	case MLX5_RES_XSRQ:
568 		return destroy_xrc_srq_cmd(dev, srq);
569 	case MLX5_RES_XRQ:
570 		return destroy_xrq_cmd(dev, srq);
571 	default:
572 		return destroy_rmp_cmd(dev, srq);
573 	}
574 }
575 
576 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
577 			struct mlx5_srq_attr *in)
578 {
579 	struct mlx5_srq_table *table = &dev->srq_table;
580 	int err;
581 
582 	switch (in->type) {
583 	case IB_SRQT_XRC:
584 		srq->common.res = MLX5_RES_XSRQ;
585 		break;
586 	case IB_SRQT_TM:
587 		srq->common.res = MLX5_RES_XRQ;
588 		break;
589 	default:
590 		srq->common.res = MLX5_RES_SRQ;
591 	}
592 
593 	err = create_srq_split(dev, srq, in);
594 	if (err)
595 		return err;
596 
597 	atomic_set(&srq->refcount, 1);
598 	init_completion(&srq->free);
599 
600 	spin_lock_irq(&table->lock);
601 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
602 	spin_unlock_irq(&table->lock);
603 	if (err)
604 		goto err_destroy_srq_split;
605 
606 	return 0;
607 
608 err_destroy_srq_split:
609 	destroy_srq_split(dev, srq);
610 
611 	return err;
612 }
613 
614 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
615 {
616 	struct mlx5_srq_table *table = &dev->srq_table;
617 	struct mlx5_core_srq *tmp;
618 	int err;
619 
620 	spin_lock_irq(&table->lock);
621 	tmp = radix_tree_delete(&table->tree, srq->srqn);
622 	spin_unlock_irq(&table->lock);
623 	if (!tmp || tmp != srq)
624 		return -EINVAL;
625 
626 	err = destroy_srq_split(dev, srq);
627 	if (err)
628 		return err;
629 
630 	if (atomic_dec_and_test(&srq->refcount))
631 		complete(&srq->free);
632 	wait_for_completion(&srq->free);
633 
634 	return 0;
635 }
636 
637 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
638 		       struct mlx5_srq_attr *out)
639 {
640 	if (!dev->mdev->issi)
641 		return query_srq_cmd(dev, srq, out);
642 	switch (srq->common.res) {
643 	case MLX5_RES_XSRQ:
644 		return query_xrc_srq_cmd(dev, srq, out);
645 	case MLX5_RES_XRQ:
646 		return query_xrq_cmd(dev, srq, out);
647 	default:
648 		return query_rmp_cmd(dev, srq, out);
649 	}
650 }
651 
652 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
653 		     u16 lwm, int is_srq)
654 {
655 	if (!dev->mdev->issi)
656 		return arm_srq_cmd(dev, srq, lwm, is_srq);
657 	switch (srq->common.res) {
658 	case MLX5_RES_XSRQ:
659 		return arm_xrc_srq_cmd(dev, srq, lwm);
660 	case MLX5_RES_XRQ:
661 		return arm_xrq_cmd(dev, srq, lwm);
662 	default:
663 		return arm_rmp_cmd(dev, srq, lwm);
664 	}
665 }
666 
667 static int srq_event_notifier(struct notifier_block *nb,
668 			      unsigned long type, void *data)
669 {
670 	struct mlx5_srq_table *table;
671 	struct mlx5_core_srq *srq;
672 	struct mlx5_eqe *eqe;
673 	u32 srqn;
674 
675 	if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
676 	    type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
677 		return NOTIFY_DONE;
678 
679 	table = container_of(nb, struct mlx5_srq_table, nb);
680 
681 	eqe = data;
682 	srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
683 
684 	spin_lock(&table->lock);
685 
686 	srq = radix_tree_lookup(&table->tree, srqn);
687 	if (srq)
688 		atomic_inc(&srq->refcount);
689 
690 	spin_unlock(&table->lock);
691 
692 	if (!srq)
693 		return NOTIFY_OK;
694 
695 	srq->event(srq, eqe->type);
696 
697 	if (atomic_dec_and_test(&srq->refcount))
698 		complete(&srq->free);
699 
700 	return NOTIFY_OK;
701 }
702 
703 int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
704 {
705 	struct mlx5_srq_table *table = &dev->srq_table;
706 
707 	memset(table, 0, sizeof(*table));
708 	spin_lock_init(&table->lock);
709 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
710 
711 	table->nb.notifier_call = srq_event_notifier;
712 	mlx5_notifier_register(dev->mdev, &table->nb);
713 
714 	return 0;
715 }
716 
717 void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
718 {
719 	struct mlx5_srq_table *table = &dev->srq_table;
720 
721 	mlx5_notifier_unregister(dev->mdev, &table->nb);
722 }
723