xref: /openbmc/linux/drivers/infiniband/hw/mlx5/qp.c (revision 86edee97)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_ib.h"
40 #include "ib_rep.h"
41 #include "cmd.h"
42 
43 /* not supported currently */
44 static int wq_signature;
45 
46 enum {
47 	MLX5_IB_ACK_REQ_FREQ	= 8,
48 };
49 
50 enum {
51 	MLX5_IB_DEFAULT_SCHED_QUEUE	= 0x83,
52 	MLX5_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
53 	MLX5_IB_LINK_TYPE_IB		= 0,
54 	MLX5_IB_LINK_TYPE_ETH		= 1
55 };
56 
57 enum {
58 	MLX5_IB_SQ_STRIDE	= 6,
59 	MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
60 };
61 
62 static const u32 mlx5_ib_opcode[] = {
63 	[IB_WR_SEND]				= MLX5_OPCODE_SEND,
64 	[IB_WR_LSO]				= MLX5_OPCODE_LSO,
65 	[IB_WR_SEND_WITH_IMM]			= MLX5_OPCODE_SEND_IMM,
66 	[IB_WR_RDMA_WRITE]			= MLX5_OPCODE_RDMA_WRITE,
67 	[IB_WR_RDMA_WRITE_WITH_IMM]		= MLX5_OPCODE_RDMA_WRITE_IMM,
68 	[IB_WR_RDMA_READ]			= MLX5_OPCODE_RDMA_READ,
69 	[IB_WR_ATOMIC_CMP_AND_SWP]		= MLX5_OPCODE_ATOMIC_CS,
70 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= MLX5_OPCODE_ATOMIC_FA,
71 	[IB_WR_SEND_WITH_INV]			= MLX5_OPCODE_SEND_INVAL,
72 	[IB_WR_LOCAL_INV]			= MLX5_OPCODE_UMR,
73 	[IB_WR_REG_MR]				= MLX5_OPCODE_UMR,
74 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= MLX5_OPCODE_ATOMIC_MASKED_CS,
75 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= MLX5_OPCODE_ATOMIC_MASKED_FA,
76 	[MLX5_IB_WR_UMR]			= MLX5_OPCODE_UMR,
77 };
78 
79 struct mlx5_wqe_eth_pad {
80 	u8 rsvd0[16];
81 };
82 
83 enum raw_qp_set_mask_map {
84 	MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID		= 1UL << 0,
85 	MLX5_RAW_QP_RATE_LIMIT			= 1UL << 1,
86 };
87 
88 struct mlx5_modify_raw_qp_param {
89 	u16 operation;
90 
91 	u32 set_mask; /* raw_qp_set_mask_map */
92 
93 	struct mlx5_rate_limit rl;
94 
95 	u8 rq_q_ctr_id;
96 	u16 port;
97 };
98 
99 static void get_cqs(enum ib_qp_type qp_type,
100 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
101 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
102 
103 static int is_qp0(enum ib_qp_type qp_type)
104 {
105 	return qp_type == IB_QPT_SMI;
106 }
107 
108 static int is_sqp(enum ib_qp_type qp_type)
109 {
110 	return is_qp0(qp_type) || is_qp1(qp_type);
111 }
112 
113 /**
114  * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
115  * to kernel buffer
116  *
117  * @umem: User space memory where the WQ is
118  * @buffer: buffer to copy to
119  * @buflen: buffer length
120  * @wqe_index: index of WQE to copy from
121  * @wq_offset: offset to start of WQ
122  * @wq_wqe_cnt: number of WQEs in WQ
123  * @wq_wqe_shift: log2 of WQE size
124  * @bcnt: number of bytes to copy
125  * @bytes_copied: number of bytes to copy (return value)
126  *
127  * Copies from start of WQE bcnt or less bytes.
128  * Does not gurantee to copy the entire WQE.
129  *
130  * Return: zero on success, or an error code.
131  */
132 static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
133 					size_t buflen, int wqe_index,
134 					int wq_offset, int wq_wqe_cnt,
135 					int wq_wqe_shift, int bcnt,
136 					size_t *bytes_copied)
137 {
138 	size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
139 	size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
140 	size_t copy_length;
141 	int ret;
142 
143 	/* don't copy more than requested, more than buffer length or
144 	 * beyond WQ end
145 	 */
146 	copy_length = min_t(u32, buflen, wq_end - offset);
147 	copy_length = min_t(u32, copy_length, bcnt);
148 
149 	ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
150 	if (ret)
151 		return ret;
152 
153 	if (!ret && bytes_copied)
154 		*bytes_copied = copy_length;
155 
156 	return 0;
157 }
158 
159 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
160 				      void *buffer, size_t buflen, size_t *bc)
161 {
162 	struct mlx5_wqe_ctrl_seg *ctrl;
163 	size_t bytes_copied = 0;
164 	size_t wqe_length;
165 	void *p;
166 	int ds;
167 
168 	wqe_index = wqe_index & qp->sq.fbc.sz_m1;
169 
170 	/* read the control segment first */
171 	p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
172 	ctrl = p;
173 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
174 	wqe_length = ds * MLX5_WQE_DS_UNITS;
175 
176 	/* read rest of WQE if it spreads over more than one stride */
177 	while (bytes_copied < wqe_length) {
178 		size_t copy_length =
179 			min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
180 
181 		if (!copy_length)
182 			break;
183 
184 		memcpy(buffer + bytes_copied, p, copy_length);
185 		bytes_copied += copy_length;
186 
187 		wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
188 		p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
189 	}
190 	*bc = bytes_copied;
191 	return 0;
192 }
193 
194 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
195 				    void *buffer, size_t buflen, size_t *bc)
196 {
197 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
198 	struct ib_umem *umem = base->ubuffer.umem;
199 	struct mlx5_ib_wq *wq = &qp->sq;
200 	struct mlx5_wqe_ctrl_seg *ctrl;
201 	size_t bytes_copied;
202 	size_t bytes_copied2;
203 	size_t wqe_length;
204 	int ret;
205 	int ds;
206 
207 	/* at first read as much as possible */
208 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
209 					   wq->offset, wq->wqe_cnt,
210 					   wq->wqe_shift, buflen,
211 					   &bytes_copied);
212 	if (ret)
213 		return ret;
214 
215 	/* we need at least control segment size to proceed */
216 	if (bytes_copied < sizeof(*ctrl))
217 		return -EINVAL;
218 
219 	ctrl = buffer;
220 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
221 	wqe_length = ds * MLX5_WQE_DS_UNITS;
222 
223 	/* if we copied enough then we are done */
224 	if (bytes_copied >= wqe_length) {
225 		*bc = bytes_copied;
226 		return 0;
227 	}
228 
229 	/* otherwise this a wrapped around wqe
230 	 * so read the remaining bytes starting
231 	 * from  wqe_index 0
232 	 */
233 	ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
234 					   buflen - bytes_copied, 0, wq->offset,
235 					   wq->wqe_cnt, wq->wqe_shift,
236 					   wqe_length - bytes_copied,
237 					   &bytes_copied2);
238 
239 	if (ret)
240 		return ret;
241 	*bc = bytes_copied + bytes_copied2;
242 	return 0;
243 }
244 
245 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
246 			size_t buflen, size_t *bc)
247 {
248 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
249 	struct ib_umem *umem = base->ubuffer.umem;
250 
251 	if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
252 		return -EINVAL;
253 
254 	if (!umem)
255 		return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
256 						  buflen, bc);
257 
258 	return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
259 }
260 
261 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
262 				    void *buffer, size_t buflen, size_t *bc)
263 {
264 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
265 	struct ib_umem *umem = base->ubuffer.umem;
266 	struct mlx5_ib_wq *wq = &qp->rq;
267 	size_t bytes_copied;
268 	int ret;
269 
270 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
271 					   wq->offset, wq->wqe_cnt,
272 					   wq->wqe_shift, buflen,
273 					   &bytes_copied);
274 
275 	if (ret)
276 		return ret;
277 	*bc = bytes_copied;
278 	return 0;
279 }
280 
281 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
282 			size_t buflen, size_t *bc)
283 {
284 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
285 	struct ib_umem *umem = base->ubuffer.umem;
286 	struct mlx5_ib_wq *wq = &qp->rq;
287 	size_t wqe_size = 1 << wq->wqe_shift;
288 
289 	if (buflen < wqe_size)
290 		return -EINVAL;
291 
292 	if (!umem)
293 		return -EOPNOTSUPP;
294 
295 	return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
296 }
297 
298 static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
299 				     void *buffer, size_t buflen, size_t *bc)
300 {
301 	struct ib_umem *umem = srq->umem;
302 	size_t bytes_copied;
303 	int ret;
304 
305 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
306 					   srq->msrq.max, srq->msrq.wqe_shift,
307 					   buflen, &bytes_copied);
308 
309 	if (ret)
310 		return ret;
311 	*bc = bytes_copied;
312 	return 0;
313 }
314 
315 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
316 			 size_t buflen, size_t *bc)
317 {
318 	struct ib_umem *umem = srq->umem;
319 	size_t wqe_size = 1 << srq->msrq.wqe_shift;
320 
321 	if (buflen < wqe_size)
322 		return -EINVAL;
323 
324 	if (!umem)
325 		return -EOPNOTSUPP;
326 
327 	return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
328 }
329 
330 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
331 {
332 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
333 	struct ib_event event;
334 
335 	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
336 		/* This event is only valid for trans_qps */
337 		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
338 	}
339 
340 	if (ibqp->event_handler) {
341 		event.device     = ibqp->device;
342 		event.element.qp = ibqp;
343 		switch (type) {
344 		case MLX5_EVENT_TYPE_PATH_MIG:
345 			event.event = IB_EVENT_PATH_MIG;
346 			break;
347 		case MLX5_EVENT_TYPE_COMM_EST:
348 			event.event = IB_EVENT_COMM_EST;
349 			break;
350 		case MLX5_EVENT_TYPE_SQ_DRAINED:
351 			event.event = IB_EVENT_SQ_DRAINED;
352 			break;
353 		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
354 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
355 			break;
356 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
357 			event.event = IB_EVENT_QP_FATAL;
358 			break;
359 		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
360 			event.event = IB_EVENT_PATH_MIG_ERR;
361 			break;
362 		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
363 			event.event = IB_EVENT_QP_REQ_ERR;
364 			break;
365 		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
366 			event.event = IB_EVENT_QP_ACCESS_ERR;
367 			break;
368 		default:
369 			pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
370 			return;
371 		}
372 
373 		ibqp->event_handler(&event, ibqp->qp_context);
374 	}
375 }
376 
377 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
378 		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
379 {
380 	int wqe_size;
381 	int wq_size;
382 
383 	/* Sanity check RQ size before proceeding */
384 	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
385 		return -EINVAL;
386 
387 	if (!has_rq) {
388 		qp->rq.max_gs = 0;
389 		qp->rq.wqe_cnt = 0;
390 		qp->rq.wqe_shift = 0;
391 		cap->max_recv_wr = 0;
392 		cap->max_recv_sge = 0;
393 	} else {
394 		if (ucmd) {
395 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
396 			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
397 				return -EINVAL;
398 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
399 			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
400 				return -EINVAL;
401 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
402 			qp->rq.max_post = qp->rq.wqe_cnt;
403 		} else {
404 			wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
405 			wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
406 			wqe_size = roundup_pow_of_two(wqe_size);
407 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
408 			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
409 			qp->rq.wqe_cnt = wq_size / wqe_size;
410 			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
411 				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
412 					    wqe_size,
413 					    MLX5_CAP_GEN(dev->mdev,
414 							 max_wqe_sz_rq));
415 				return -EINVAL;
416 			}
417 			qp->rq.wqe_shift = ilog2(wqe_size);
418 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
419 			qp->rq.max_post = qp->rq.wqe_cnt;
420 		}
421 	}
422 
423 	return 0;
424 }
425 
426 static int sq_overhead(struct ib_qp_init_attr *attr)
427 {
428 	int size = 0;
429 
430 	switch (attr->qp_type) {
431 	case IB_QPT_XRC_INI:
432 		size += sizeof(struct mlx5_wqe_xrc_seg);
433 		/* fall through */
434 	case IB_QPT_RC:
435 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
436 			max(sizeof(struct mlx5_wqe_atomic_seg) +
437 			    sizeof(struct mlx5_wqe_raddr_seg),
438 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
439 			    sizeof(struct mlx5_mkey_seg) +
440 			    MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
441 			    MLX5_IB_UMR_OCTOWORD);
442 		break;
443 
444 	case IB_QPT_XRC_TGT:
445 		return 0;
446 
447 	case IB_QPT_UC:
448 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
449 			max(sizeof(struct mlx5_wqe_raddr_seg),
450 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
451 			    sizeof(struct mlx5_mkey_seg));
452 		break;
453 
454 	case IB_QPT_UD:
455 		if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
456 			size += sizeof(struct mlx5_wqe_eth_pad) +
457 				sizeof(struct mlx5_wqe_eth_seg);
458 		/* fall through */
459 	case IB_QPT_SMI:
460 	case MLX5_IB_QPT_HW_GSI:
461 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
462 			sizeof(struct mlx5_wqe_datagram_seg);
463 		break;
464 
465 	case MLX5_IB_QPT_REG_UMR:
466 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
467 			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
468 			sizeof(struct mlx5_mkey_seg);
469 		break;
470 
471 	default:
472 		return -EINVAL;
473 	}
474 
475 	return size;
476 }
477 
478 static int calc_send_wqe(struct ib_qp_init_attr *attr)
479 {
480 	int inl_size = 0;
481 	int size;
482 
483 	size = sq_overhead(attr);
484 	if (size < 0)
485 		return size;
486 
487 	if (attr->cap.max_inline_data) {
488 		inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
489 			attr->cap.max_inline_data;
490 	}
491 
492 	size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
493 	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
494 	    ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
495 		return MLX5_SIG_WQE_SIZE;
496 	else
497 		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
498 }
499 
500 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
501 {
502 	int max_sge;
503 
504 	if (attr->qp_type == IB_QPT_RC)
505 		max_sge = (min_t(int, wqe_size, 512) -
506 			   sizeof(struct mlx5_wqe_ctrl_seg) -
507 			   sizeof(struct mlx5_wqe_raddr_seg)) /
508 			sizeof(struct mlx5_wqe_data_seg);
509 	else if (attr->qp_type == IB_QPT_XRC_INI)
510 		max_sge = (min_t(int, wqe_size, 512) -
511 			   sizeof(struct mlx5_wqe_ctrl_seg) -
512 			   sizeof(struct mlx5_wqe_xrc_seg) -
513 			   sizeof(struct mlx5_wqe_raddr_seg)) /
514 			sizeof(struct mlx5_wqe_data_seg);
515 	else
516 		max_sge = (wqe_size - sq_overhead(attr)) /
517 			sizeof(struct mlx5_wqe_data_seg);
518 
519 	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
520 		     sizeof(struct mlx5_wqe_data_seg));
521 }
522 
523 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
524 			struct mlx5_ib_qp *qp)
525 {
526 	int wqe_size;
527 	int wq_size;
528 
529 	if (!attr->cap.max_send_wr)
530 		return 0;
531 
532 	wqe_size = calc_send_wqe(attr);
533 	mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
534 	if (wqe_size < 0)
535 		return wqe_size;
536 
537 	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
538 		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
539 			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
540 		return -EINVAL;
541 	}
542 
543 	qp->max_inline_data = wqe_size - sq_overhead(attr) -
544 			      sizeof(struct mlx5_wqe_inline_seg);
545 	attr->cap.max_inline_data = qp->max_inline_data;
546 
547 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
548 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
549 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
550 		mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
551 			    attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
552 			    qp->sq.wqe_cnt,
553 			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
554 		return -ENOMEM;
555 	}
556 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
557 	qp->sq.max_gs = get_send_sge(attr, wqe_size);
558 	if (qp->sq.max_gs < attr->cap.max_send_sge)
559 		return -ENOMEM;
560 
561 	attr->cap.max_send_sge = qp->sq.max_gs;
562 	qp->sq.max_post = wq_size / wqe_size;
563 	attr->cap.max_send_wr = qp->sq.max_post;
564 
565 	return wq_size;
566 }
567 
568 static int set_user_buf_size(struct mlx5_ib_dev *dev,
569 			    struct mlx5_ib_qp *qp,
570 			    struct mlx5_ib_create_qp *ucmd,
571 			    struct mlx5_ib_qp_base *base,
572 			    struct ib_qp_init_attr *attr)
573 {
574 	int desc_sz = 1 << qp->sq.wqe_shift;
575 
576 	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
577 		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
578 			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
579 		return -EINVAL;
580 	}
581 
582 	if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
583 		mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
584 			     ucmd->sq_wqe_count);
585 		return -EINVAL;
586 	}
587 
588 	qp->sq.wqe_cnt = ucmd->sq_wqe_count;
589 
590 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
591 		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
592 			     qp->sq.wqe_cnt,
593 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
594 		return -EINVAL;
595 	}
596 
597 	if (attr->qp_type == IB_QPT_RAW_PACKET ||
598 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
599 		base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
600 		qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
601 	} else {
602 		base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
603 					 (qp->sq.wqe_cnt << 6);
604 	}
605 
606 	return 0;
607 }
608 
609 static int qp_has_rq(struct ib_qp_init_attr *attr)
610 {
611 	if (attr->qp_type == IB_QPT_XRC_INI ||
612 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
613 	    attr->qp_type == MLX5_IB_QPT_REG_UMR ||
614 	    !attr->cap.max_recv_wr)
615 		return 0;
616 
617 	return 1;
618 }
619 
620 enum {
621 	/* this is the first blue flame register in the array of bfregs assigned
622 	 * to a processes. Since we do not use it for blue flame but rather
623 	 * regular 64 bit doorbells, we do not need a lock for maintaiing
624 	 * "odd/even" order
625 	 */
626 	NUM_NON_BLUE_FLAME_BFREGS = 1,
627 };
628 
629 static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
630 {
631 	return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
632 }
633 
634 static int num_med_bfreg(struct mlx5_ib_dev *dev,
635 			 struct mlx5_bfreg_info *bfregi)
636 {
637 	int n;
638 
639 	n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
640 	    NUM_NON_BLUE_FLAME_BFREGS;
641 
642 	return n >= 0 ? n : 0;
643 }
644 
645 static int first_med_bfreg(struct mlx5_ib_dev *dev,
646 			   struct mlx5_bfreg_info *bfregi)
647 {
648 	return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
649 }
650 
651 static int first_hi_bfreg(struct mlx5_ib_dev *dev,
652 			  struct mlx5_bfreg_info *bfregi)
653 {
654 	int med;
655 
656 	med = num_med_bfreg(dev, bfregi);
657 	return ++med;
658 }
659 
660 static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
661 				  struct mlx5_bfreg_info *bfregi)
662 {
663 	int i;
664 
665 	for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
666 		if (!bfregi->count[i]) {
667 			bfregi->count[i]++;
668 			return i;
669 		}
670 	}
671 
672 	return -ENOMEM;
673 }
674 
675 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
676 				 struct mlx5_bfreg_info *bfregi)
677 {
678 	int minidx = first_med_bfreg(dev, bfregi);
679 	int i;
680 
681 	if (minidx < 0)
682 		return minidx;
683 
684 	for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
685 		if (bfregi->count[i] < bfregi->count[minidx])
686 			minidx = i;
687 		if (!bfregi->count[minidx])
688 			break;
689 	}
690 
691 	bfregi->count[minidx]++;
692 	return minidx;
693 }
694 
695 static int alloc_bfreg(struct mlx5_ib_dev *dev,
696 		       struct mlx5_bfreg_info *bfregi)
697 {
698 	int bfregn = -ENOMEM;
699 
700 	mutex_lock(&bfregi->lock);
701 	if (bfregi->ver >= 2) {
702 		bfregn = alloc_high_class_bfreg(dev, bfregi);
703 		if (bfregn < 0)
704 			bfregn = alloc_med_class_bfreg(dev, bfregi);
705 	}
706 
707 	if (bfregn < 0) {
708 		BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
709 		bfregn = 0;
710 		bfregi->count[bfregn]++;
711 	}
712 	mutex_unlock(&bfregi->lock);
713 
714 	return bfregn;
715 }
716 
717 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
718 {
719 	mutex_lock(&bfregi->lock);
720 	bfregi->count[bfregn]--;
721 	mutex_unlock(&bfregi->lock);
722 }
723 
724 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
725 {
726 	switch (state) {
727 	case IB_QPS_RESET:	return MLX5_QP_STATE_RST;
728 	case IB_QPS_INIT:	return MLX5_QP_STATE_INIT;
729 	case IB_QPS_RTR:	return MLX5_QP_STATE_RTR;
730 	case IB_QPS_RTS:	return MLX5_QP_STATE_RTS;
731 	case IB_QPS_SQD:	return MLX5_QP_STATE_SQD;
732 	case IB_QPS_SQE:	return MLX5_QP_STATE_SQER;
733 	case IB_QPS_ERR:	return MLX5_QP_STATE_ERR;
734 	default:		return -1;
735 	}
736 }
737 
738 static int to_mlx5_st(enum ib_qp_type type)
739 {
740 	switch (type) {
741 	case IB_QPT_RC:			return MLX5_QP_ST_RC;
742 	case IB_QPT_UC:			return MLX5_QP_ST_UC;
743 	case IB_QPT_UD:			return MLX5_QP_ST_UD;
744 	case MLX5_IB_QPT_REG_UMR:	return MLX5_QP_ST_REG_UMR;
745 	case IB_QPT_XRC_INI:
746 	case IB_QPT_XRC_TGT:		return MLX5_QP_ST_XRC;
747 	case IB_QPT_SMI:		return MLX5_QP_ST_QP0;
748 	case MLX5_IB_QPT_HW_GSI:	return MLX5_QP_ST_QP1;
749 	case MLX5_IB_QPT_DCI:		return MLX5_QP_ST_DCI;
750 	case IB_QPT_RAW_IPV6:		return MLX5_QP_ST_RAW_IPV6;
751 	case IB_QPT_RAW_PACKET:
752 	case IB_QPT_RAW_ETHERTYPE:	return MLX5_QP_ST_RAW_ETHERTYPE;
753 	case IB_QPT_MAX:
754 	default:		return -EINVAL;
755 	}
756 }
757 
758 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
759 			     struct mlx5_ib_cq *recv_cq);
760 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
761 			       struct mlx5_ib_cq *recv_cq);
762 
763 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
764 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
765 			bool dyn_bfreg)
766 {
767 	unsigned int bfregs_per_sys_page;
768 	u32 index_of_sys_page;
769 	u32 offset;
770 
771 	bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
772 				MLX5_NON_FP_BFREGS_PER_UAR;
773 	index_of_sys_page = bfregn / bfregs_per_sys_page;
774 
775 	if (dyn_bfreg) {
776 		index_of_sys_page += bfregi->num_static_sys_pages;
777 
778 		if (index_of_sys_page >= bfregi->num_sys_pages)
779 			return -EINVAL;
780 
781 		if (bfregn > bfregi->num_dyn_bfregs ||
782 		    bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
783 			mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
784 			return -EINVAL;
785 		}
786 	}
787 
788 	offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
789 	return bfregi->sys_pages[index_of_sys_page] + offset;
790 }
791 
792 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
793 			    unsigned long addr, size_t size,
794 			    struct ib_umem **umem, int *npages, int *page_shift,
795 			    int *ncont, u32 *offset)
796 {
797 	int err;
798 
799 	*umem = ib_umem_get(&dev->ib_dev, addr, size, 0);
800 	if (IS_ERR(*umem)) {
801 		mlx5_ib_dbg(dev, "umem_get failed\n");
802 		return PTR_ERR(*umem);
803 	}
804 
805 	mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
806 
807 	err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
808 	if (err) {
809 		mlx5_ib_warn(dev, "bad offset\n");
810 		goto err_umem;
811 	}
812 
813 	mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
814 		    addr, size, *npages, *page_shift, *ncont, *offset);
815 
816 	return 0;
817 
818 err_umem:
819 	ib_umem_release(*umem);
820 	*umem = NULL;
821 
822 	return err;
823 }
824 
825 static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
826 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
827 {
828 	struct mlx5_ib_ucontext *context =
829 		rdma_udata_to_drv_context(
830 			udata,
831 			struct mlx5_ib_ucontext,
832 			ibucontext);
833 
834 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
835 		atomic_dec(&dev->delay_drop.rqs_cnt);
836 
837 	mlx5_ib_db_unmap_user(context, &rwq->db);
838 	ib_umem_release(rwq->umem);
839 }
840 
841 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
842 			  struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
843 			  struct mlx5_ib_create_wq *ucmd)
844 {
845 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
846 		udata, struct mlx5_ib_ucontext, ibucontext);
847 	int page_shift = 0;
848 	int npages;
849 	u32 offset = 0;
850 	int ncont = 0;
851 	int err;
852 
853 	if (!ucmd->buf_addr)
854 		return -EINVAL;
855 
856 	rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
857 	if (IS_ERR(rwq->umem)) {
858 		mlx5_ib_dbg(dev, "umem_get failed\n");
859 		err = PTR_ERR(rwq->umem);
860 		return err;
861 	}
862 
863 	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
864 			   &ncont, NULL);
865 	err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
866 				     &rwq->rq_page_offset);
867 	if (err) {
868 		mlx5_ib_warn(dev, "bad offset\n");
869 		goto err_umem;
870 	}
871 
872 	rwq->rq_num_pas = ncont;
873 	rwq->page_shift = page_shift;
874 	rwq->log_page_size =  page_shift - MLX5_ADAPTER_PAGE_SHIFT;
875 	rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
876 
877 	mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
878 		    (unsigned long long)ucmd->buf_addr, rwq->buf_size,
879 		    npages, page_shift, ncont, offset);
880 
881 	err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
882 	if (err) {
883 		mlx5_ib_dbg(dev, "map failed\n");
884 		goto err_umem;
885 	}
886 
887 	rwq->create_type = MLX5_WQ_USER;
888 	return 0;
889 
890 err_umem:
891 	ib_umem_release(rwq->umem);
892 	return err;
893 }
894 
895 static int adjust_bfregn(struct mlx5_ib_dev *dev,
896 			 struct mlx5_bfreg_info *bfregi, int bfregn)
897 {
898 	return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
899 				bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
900 }
901 
902 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
903 			  struct mlx5_ib_qp *qp, struct ib_udata *udata,
904 			  struct ib_qp_init_attr *attr,
905 			  u32 **in,
906 			  struct mlx5_ib_create_qp_resp *resp, int *inlen,
907 			  struct mlx5_ib_qp_base *base)
908 {
909 	struct mlx5_ib_ucontext *context;
910 	struct mlx5_ib_create_qp ucmd;
911 	struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
912 	int page_shift = 0;
913 	int uar_index = 0;
914 	int npages;
915 	u32 offset = 0;
916 	int bfregn;
917 	int ncont = 0;
918 	__be64 *pas;
919 	void *qpc;
920 	int err;
921 	u16 uid;
922 
923 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
924 	if (err) {
925 		mlx5_ib_dbg(dev, "copy failed\n");
926 		return err;
927 	}
928 
929 	context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
930 					    ibucontext);
931 	if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
932 		uar_index = bfregn_to_uar_index(dev, &context->bfregi,
933 						ucmd.bfreg_index, true);
934 		if (uar_index < 0)
935 			return uar_index;
936 
937 		bfregn = MLX5_IB_INVALID_BFREG;
938 	} else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
939 		/*
940 		 * TBD: should come from the verbs when we have the API
941 		 */
942 		/* In CROSS_CHANNEL CQ and QP must use the same UAR */
943 		bfregn = MLX5_CROSS_CHANNEL_BFREG;
944 	}
945 	else {
946 		bfregn = alloc_bfreg(dev, &context->bfregi);
947 		if (bfregn < 0)
948 			return bfregn;
949 	}
950 
951 	mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
952 	if (bfregn != MLX5_IB_INVALID_BFREG)
953 		uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
954 						false);
955 
956 	qp->rq.offset = 0;
957 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
958 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
959 
960 	err = set_user_buf_size(dev, qp, &ucmd, base, attr);
961 	if (err)
962 		goto err_bfreg;
963 
964 	if (ucmd.buf_addr && ubuffer->buf_size) {
965 		ubuffer->buf_addr = ucmd.buf_addr;
966 		err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
967 				       ubuffer->buf_size, &ubuffer->umem,
968 				       &npages, &page_shift, &ncont, &offset);
969 		if (err)
970 			goto err_bfreg;
971 	} else {
972 		ubuffer->umem = NULL;
973 	}
974 
975 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
976 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
977 	*in = kvzalloc(*inlen, GFP_KERNEL);
978 	if (!*in) {
979 		err = -ENOMEM;
980 		goto err_umem;
981 	}
982 
983 	uid = (attr->qp_type != IB_QPT_XRC_TGT &&
984 	       attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
985 	MLX5_SET(create_qp_in, *in, uid, uid);
986 	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
987 	if (ubuffer->umem)
988 		mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
989 
990 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
991 
992 	MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
993 	MLX5_SET(qpc, qpc, page_offset, offset);
994 
995 	MLX5_SET(qpc, qpc, uar_page, uar_index);
996 	if (bfregn != MLX5_IB_INVALID_BFREG)
997 		resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
998 	else
999 		resp->bfreg_index = MLX5_IB_INVALID_BFREG;
1000 	qp->bfregn = bfregn;
1001 
1002 	err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
1003 	if (err) {
1004 		mlx5_ib_dbg(dev, "map failed\n");
1005 		goto err_free;
1006 	}
1007 
1008 	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1009 	if (err) {
1010 		mlx5_ib_dbg(dev, "copy failed\n");
1011 		goto err_unmap;
1012 	}
1013 	qp->create_type = MLX5_QP_USER;
1014 
1015 	return 0;
1016 
1017 err_unmap:
1018 	mlx5_ib_db_unmap_user(context, &qp->db);
1019 
1020 err_free:
1021 	kvfree(*in);
1022 
1023 err_umem:
1024 	ib_umem_release(ubuffer->umem);
1025 
1026 err_bfreg:
1027 	if (bfregn != MLX5_IB_INVALID_BFREG)
1028 		mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
1029 	return err;
1030 }
1031 
1032 static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1033 			    struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
1034 			    struct ib_udata *udata)
1035 {
1036 	struct mlx5_ib_ucontext *context =
1037 		rdma_udata_to_drv_context(
1038 			udata,
1039 			struct mlx5_ib_ucontext,
1040 			ibucontext);
1041 
1042 	mlx5_ib_db_unmap_user(context, &qp->db);
1043 	ib_umem_release(base->ubuffer.umem);
1044 
1045 	/*
1046 	 * Free only the BFREGs which are handled by the kernel.
1047 	 * BFREGs of UARs allocated dynamically are handled by user.
1048 	 */
1049 	if (qp->bfregn != MLX5_IB_INVALID_BFREG)
1050 		mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1051 }
1052 
1053 /* get_sq_edge - Get the next nearby edge.
1054  *
1055  * An 'edge' is defined as the first following address after the end
1056  * of the fragment or the SQ. Accordingly, during the WQE construction
1057  * which repetitively increases the pointer to write the next data, it
1058  * simply should check if it gets to an edge.
1059  *
1060  * @sq - SQ buffer.
1061  * @idx - Stride index in the SQ buffer.
1062  *
1063  * Return:
1064  *	The new edge.
1065  */
1066 static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
1067 {
1068 	void *fragment_end;
1069 
1070 	fragment_end = mlx5_frag_buf_get_wqe
1071 		(&sq->fbc,
1072 		 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
1073 
1074 	return fragment_end + MLX5_SEND_WQE_BB;
1075 }
1076 
1077 static int create_kernel_qp(struct mlx5_ib_dev *dev,
1078 			    struct ib_qp_init_attr *init_attr,
1079 			    struct mlx5_ib_qp *qp,
1080 			    u32 **in, int *inlen,
1081 			    struct mlx5_ib_qp_base *base)
1082 {
1083 	int uar_index;
1084 	void *qpc;
1085 	int err;
1086 
1087 	if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
1088 					IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1089 					IB_QP_CREATE_IPOIB_UD_LSO |
1090 					IB_QP_CREATE_NETIF_QP |
1091 					MLX5_IB_QP_CREATE_SQPN_QP1 |
1092 					MLX5_IB_QP_CREATE_WC_TEST))
1093 		return -EINVAL;
1094 
1095 	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
1096 		qp->bf.bfreg = &dev->fp_bfreg;
1097 	else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
1098 		qp->bf.bfreg = &dev->wc_bfreg;
1099 	else
1100 		qp->bf.bfreg = &dev->bfreg;
1101 
1102 	/* We need to divide by two since each register is comprised of
1103 	 * two buffers of identical size, namely odd and even
1104 	 */
1105 	qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1106 	uar_index = qp->bf.bfreg->index;
1107 
1108 	err = calc_sq_size(dev, init_attr, qp);
1109 	if (err < 0) {
1110 		mlx5_ib_dbg(dev, "err %d\n", err);
1111 		return err;
1112 	}
1113 
1114 	qp->rq.offset = 0;
1115 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1116 	base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1117 
1118 	err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
1119 				       &qp->buf, dev->mdev->priv.numa_node);
1120 	if (err) {
1121 		mlx5_ib_dbg(dev, "err %d\n", err);
1122 		return err;
1123 	}
1124 
1125 	if (qp->rq.wqe_cnt)
1126 		mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
1127 			      ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
1128 
1129 	if (qp->sq.wqe_cnt) {
1130 		int sq_strides_offset = (qp->sq.offset  & (PAGE_SIZE - 1)) /
1131 					MLX5_SEND_WQE_BB;
1132 		mlx5_init_fbc_offset(qp->buf.frags +
1133 				     (qp->sq.offset / PAGE_SIZE),
1134 				     ilog2(MLX5_SEND_WQE_BB),
1135 				     ilog2(qp->sq.wqe_cnt),
1136 				     sq_strides_offset, &qp->sq.fbc);
1137 
1138 		qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1139 	}
1140 
1141 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
1142 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1143 	*in = kvzalloc(*inlen, GFP_KERNEL);
1144 	if (!*in) {
1145 		err = -ENOMEM;
1146 		goto err_buf;
1147 	}
1148 
1149 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
1150 	MLX5_SET(qpc, qpc, uar_page, uar_index);
1151 	MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1152 
1153 	/* Set "fast registration enabled" for all kernel QPs */
1154 	MLX5_SET(qpc, qpc, fre, 1);
1155 	MLX5_SET(qpc, qpc, rlky, 1);
1156 
1157 	if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
1158 		MLX5_SET(qpc, qpc, deth_sqpn, 1);
1159 		qp->flags |= MLX5_IB_QP_SQPN_QP1;
1160 	}
1161 
1162 	mlx5_fill_page_frag_array(&qp->buf,
1163 				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
1164 							 *in, pas));
1165 
1166 	err = mlx5_db_alloc(dev->mdev, &qp->db);
1167 	if (err) {
1168 		mlx5_ib_dbg(dev, "err %d\n", err);
1169 		goto err_free;
1170 	}
1171 
1172 	qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1173 				     sizeof(*qp->sq.wrid), GFP_KERNEL);
1174 	qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
1175 					sizeof(*qp->sq.wr_data), GFP_KERNEL);
1176 	qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1177 				     sizeof(*qp->rq.wrid), GFP_KERNEL);
1178 	qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
1179 				       sizeof(*qp->sq.w_list), GFP_KERNEL);
1180 	qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
1181 					 sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1182 
1183 	if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
1184 	    !qp->sq.w_list || !qp->sq.wqe_head) {
1185 		err = -ENOMEM;
1186 		goto err_wrid;
1187 	}
1188 	qp->create_type = MLX5_QP_KERNEL;
1189 
1190 	return 0;
1191 
1192 err_wrid:
1193 	kvfree(qp->sq.wqe_head);
1194 	kvfree(qp->sq.w_list);
1195 	kvfree(qp->sq.wrid);
1196 	kvfree(qp->sq.wr_data);
1197 	kvfree(qp->rq.wrid);
1198 	mlx5_db_free(dev->mdev, &qp->db);
1199 
1200 err_free:
1201 	kvfree(*in);
1202 
1203 err_buf:
1204 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1205 	return err;
1206 }
1207 
1208 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1209 {
1210 	kvfree(qp->sq.wqe_head);
1211 	kvfree(qp->sq.w_list);
1212 	kvfree(qp->sq.wrid);
1213 	kvfree(qp->sq.wr_data);
1214 	kvfree(qp->rq.wrid);
1215 	mlx5_db_free(dev->mdev, &qp->db);
1216 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1217 }
1218 
1219 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1220 {
1221 	if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
1222 	    (attr->qp_type == MLX5_IB_QPT_DCI) ||
1223 	    (attr->qp_type == IB_QPT_XRC_INI))
1224 		return MLX5_SRQ_RQ;
1225 	else if (!qp->has_rq)
1226 		return MLX5_ZERO_LEN_RQ;
1227 	else
1228 		return MLX5_NON_ZERO_RQ;
1229 }
1230 
1231 static int is_connected(enum ib_qp_type qp_type)
1232 {
1233 	if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
1234 	    qp_type == MLX5_IB_QPT_DCI)
1235 		return 1;
1236 
1237 	return 0;
1238 }
1239 
1240 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1241 				    struct mlx5_ib_qp *qp,
1242 				    struct mlx5_ib_sq *sq, u32 tdn,
1243 				    struct ib_pd *pd)
1244 {
1245 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1246 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1247 
1248 	MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
1249 	MLX5_SET(tisc, tisc, transport_domain, tdn);
1250 	if (qp->flags & MLX5_IB_QP_UNDERLAY)
1251 		MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
1252 
1253 	return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
1254 }
1255 
1256 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1257 				      struct mlx5_ib_sq *sq, struct ib_pd *pd)
1258 {
1259 	mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1260 }
1261 
1262 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1263 {
1264 	if (sq->flow_rule)
1265 		mlx5_del_flow_rules(sq->flow_rule);
1266 	sq->flow_rule = NULL;
1267 }
1268 
1269 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1270 				   struct ib_udata *udata,
1271 				   struct mlx5_ib_sq *sq, void *qpin,
1272 				   struct ib_pd *pd)
1273 {
1274 	struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
1275 	__be64 *pas;
1276 	void *in;
1277 	void *sqc;
1278 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1279 	void *wq;
1280 	int inlen;
1281 	int err;
1282 	int page_shift = 0;
1283 	int npages;
1284 	int ncont = 0;
1285 	u32 offset = 0;
1286 
1287 	err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr, ubuffer->buf_size,
1288 			       &sq->ubuffer.umem, &npages, &page_shift, &ncont,
1289 			       &offset);
1290 	if (err)
1291 		return err;
1292 
1293 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
1294 	in = kvzalloc(inlen, GFP_KERNEL);
1295 	if (!in) {
1296 		err = -ENOMEM;
1297 		goto err_umem;
1298 	}
1299 
1300 	MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
1301 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1302 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1303 	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
1304 		MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
1305 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1306 	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
1307 	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
1308 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1309 	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1310 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1311 	    MLX5_CAP_ETH(dev->mdev, swp))
1312 		MLX5_SET(sqc, sqc, allow_swp, 1);
1313 
1314 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1315 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1316 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1317 	MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
1318 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1319 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1320 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
1321 	MLX5_SET(wq, wq, log_wq_pg_sz,  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1322 	MLX5_SET(wq, wq, page_offset, offset);
1323 
1324 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1325 	mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
1326 
1327 	err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
1328 
1329 	kvfree(in);
1330 
1331 	if (err)
1332 		goto err_umem;
1333 
1334 	return 0;
1335 
1336 err_umem:
1337 	ib_umem_release(sq->ubuffer.umem);
1338 	sq->ubuffer.umem = NULL;
1339 
1340 	return err;
1341 }
1342 
1343 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1344 				     struct mlx5_ib_sq *sq)
1345 {
1346 	destroy_flow_rule_vport_sq(sq);
1347 	mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
1348 	ib_umem_release(sq->ubuffer.umem);
1349 }
1350 
1351 static size_t get_rq_pas_size(void *qpc)
1352 {
1353 	u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1354 	u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
1355 	u32 log_rq_size   = MLX5_GET(qpc, qpc, log_rq_size);
1356 	u32 page_offset   = MLX5_GET(qpc, qpc, page_offset);
1357 	u32 po_quanta	  = 1 << (log_page_size - 6);
1358 	u32 rq_sz	  = 1 << (log_rq_size + 4 + log_rq_stride);
1359 	u32 page_size	  = 1 << log_page_size;
1360 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
1361 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
1362 
1363 	return rq_num_pas * sizeof(u64);
1364 }
1365 
1366 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1367 				   struct mlx5_ib_rq *rq, void *qpin,
1368 				   size_t qpinlen, struct ib_pd *pd)
1369 {
1370 	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1371 	__be64 *pas;
1372 	__be64 *qp_pas;
1373 	void *in;
1374 	void *rqc;
1375 	void *wq;
1376 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1377 	size_t rq_pas_size = get_rq_pas_size(qpc);
1378 	size_t inlen;
1379 	int err;
1380 
1381 	if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1382 		return -EINVAL;
1383 
1384 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1385 	in = kvzalloc(inlen, GFP_KERNEL);
1386 	if (!in)
1387 		return -ENOMEM;
1388 
1389 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
1390 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1391 	if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
1392 		MLX5_SET(rqc, rqc, vsd, 1);
1393 	MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
1394 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1395 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1396 	MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
1397 	MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
1398 
1399 	if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
1400 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
1401 
1402 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
1403 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1404 	if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
1405 		MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1406 	MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
1407 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1408 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1409 	MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
1410 	MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
1411 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
1412 
1413 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1414 	qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
1415 	memcpy(pas, qp_pas, rq_pas_size);
1416 
1417 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
1418 
1419 	kvfree(in);
1420 
1421 	return err;
1422 }
1423 
1424 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1425 				     struct mlx5_ib_rq *rq)
1426 {
1427 	mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
1428 }
1429 
1430 static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
1431 {
1432 	return  (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
1433 		 MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
1434 		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
1435 }
1436 
1437 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1438 				      struct mlx5_ib_rq *rq,
1439 				      u32 qp_flags_en,
1440 				      struct ib_pd *pd)
1441 {
1442 	if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1443 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1444 		mlx5_ib_disable_lb(dev, false, true);
1445 	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1446 }
1447 
1448 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1449 				    struct mlx5_ib_rq *rq, u32 tdn,
1450 				    u32 *qp_flags_en,
1451 				    struct ib_pd *pd,
1452 				    u32 *out, int outlen)
1453 {
1454 	u8 lb_flag = 0;
1455 	u32 *in;
1456 	void *tirc;
1457 	int inlen;
1458 	int err;
1459 
1460 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1461 	in = kvzalloc(inlen, GFP_KERNEL);
1462 	if (!in)
1463 		return -ENOMEM;
1464 
1465 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1466 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1467 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
1468 	MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
1469 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1470 	if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1471 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1472 
1473 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1474 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1475 
1476 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1477 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1478 
1479 	if (dev->is_rep) {
1480 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1481 		*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1482 	}
1483 
1484 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1485 
1486 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
1487 
1488 	rq->tirn = MLX5_GET(create_tir_out, out, tirn);
1489 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1490 		err = mlx5_ib_enable_lb(dev, false, true);
1491 
1492 		if (err)
1493 			destroy_raw_packet_qp_tir(dev, rq, 0, pd);
1494 	}
1495 	kvfree(in);
1496 
1497 	return err;
1498 }
1499 
1500 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1501 				u32 *in, size_t inlen,
1502 				struct ib_pd *pd,
1503 				struct ib_udata *udata,
1504 				struct mlx5_ib_create_qp_resp *resp)
1505 {
1506 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1507 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1508 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1509 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1510 		udata, struct mlx5_ib_ucontext, ibucontext);
1511 	int err;
1512 	u32 tdn = mucontext->tdn;
1513 	u16 uid = to_mpd(pd)->uid;
1514 	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
1515 
1516 	if (qp->sq.wqe_cnt) {
1517 		err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1518 		if (err)
1519 			return err;
1520 
1521 		err = create_raw_packet_qp_sq(dev, udata, sq, in, pd);
1522 		if (err)
1523 			goto err_destroy_tis;
1524 
1525 		if (uid) {
1526 			resp->tisn = sq->tisn;
1527 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
1528 			resp->sqn = sq->base.mqp.qpn;
1529 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
1530 		}
1531 
1532 		sq->base.container_mibqp = qp;
1533 		sq->base.mqp.event = mlx5_ib_qp_event;
1534 	}
1535 
1536 	if (qp->rq.wqe_cnt) {
1537 		rq->base.container_mibqp = qp;
1538 
1539 		if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
1540 			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1541 		if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1542 			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1543 		err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
1544 		if (err)
1545 			goto err_destroy_sq;
1546 
1547 		err = create_raw_packet_qp_tir(
1548 			dev, rq, tdn, &qp->flags_en, pd, out,
1549 			MLX5_ST_SZ_BYTES(create_tir_out));
1550 		if (err)
1551 			goto err_destroy_rq;
1552 
1553 		if (uid) {
1554 			resp->rqn = rq->base.mqp.qpn;
1555 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
1556 			resp->tirn = rq->tirn;
1557 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1558 			if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1559 				resp->tir_icm_addr = MLX5_GET(
1560 					create_tir_out, out, icm_address_31_0);
1561 				resp->tir_icm_addr |=
1562 					(u64)MLX5_GET(create_tir_out, out,
1563 						      icm_address_39_32)
1564 					<< 32;
1565 				resp->tir_icm_addr |=
1566 					(u64)MLX5_GET(create_tir_out, out,
1567 						      icm_address_63_40)
1568 					<< 40;
1569 				resp->comp_mask |=
1570 					MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1571 			}
1572 		}
1573 	}
1574 
1575 	qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1576 						     rq->base.mqp.qpn;
1577 	err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
1578 	if (err)
1579 		goto err_destroy_tir;
1580 
1581 	return 0;
1582 
1583 err_destroy_tir:
1584 	destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
1585 err_destroy_rq:
1586 	destroy_raw_packet_qp_rq(dev, rq);
1587 err_destroy_sq:
1588 	if (!qp->sq.wqe_cnt)
1589 		return err;
1590 	destroy_raw_packet_qp_sq(dev, sq);
1591 err_destroy_tis:
1592 	destroy_raw_packet_qp_tis(dev, sq, pd);
1593 
1594 	return err;
1595 }
1596 
1597 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
1598 				  struct mlx5_ib_qp *qp)
1599 {
1600 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1601 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1602 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1603 
1604 	if (qp->rq.wqe_cnt) {
1605 		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1606 		destroy_raw_packet_qp_rq(dev, rq);
1607 	}
1608 
1609 	if (qp->sq.wqe_cnt) {
1610 		destroy_raw_packet_qp_sq(dev, sq);
1611 		destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1612 	}
1613 }
1614 
1615 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1616 				    struct mlx5_ib_raw_packet_qp *raw_packet_qp)
1617 {
1618 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1619 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1620 
1621 	sq->sq = &qp->sq;
1622 	rq->rq = &qp->rq;
1623 	sq->doorbell = &qp->db;
1624 	rq->doorbell = &qp->db;
1625 }
1626 
1627 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1628 {
1629 	if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1630 			    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1631 		mlx5_ib_disable_lb(dev, false, true);
1632 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1633 			     to_mpd(qp->ibqp.pd)->uid);
1634 }
1635 
1636 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1637 				 struct ib_pd *pd,
1638 				 struct ib_qp_init_attr *init_attr,
1639 				 struct ib_udata *udata)
1640 {
1641 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1642 		udata, struct mlx5_ib_ucontext, ibucontext);
1643 	struct mlx5_ib_create_qp_resp resp = {};
1644 	int inlen;
1645 	int outlen;
1646 	int err;
1647 	u32 *in;
1648 	u32 *out;
1649 	void *tirc;
1650 	void *hfso;
1651 	u32 selected_fields = 0;
1652 	u32 outer_l4;
1653 	size_t min_resp_len;
1654 	u32 tdn = mucontext->tdn;
1655 	struct mlx5_ib_create_qp_rss ucmd = {};
1656 	size_t required_cmd_sz;
1657 	u8 lb_flag = 0;
1658 
1659 	if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1660 		return -EOPNOTSUPP;
1661 
1662 	if (init_attr->create_flags || init_attr->send_cq)
1663 		return -EINVAL;
1664 
1665 	min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
1666 	if (udata->outlen < min_resp_len)
1667 		return -EINVAL;
1668 
1669 	required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
1670 	if (udata->inlen < required_cmd_sz) {
1671 		mlx5_ib_dbg(dev, "invalid inlen\n");
1672 		return -EINVAL;
1673 	}
1674 
1675 	if (udata->inlen > sizeof(ucmd) &&
1676 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
1677 				 udata->inlen - sizeof(ucmd))) {
1678 		mlx5_ib_dbg(dev, "inlen is not supported\n");
1679 		return -EOPNOTSUPP;
1680 	}
1681 
1682 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
1683 		mlx5_ib_dbg(dev, "copy failed\n");
1684 		return -EFAULT;
1685 	}
1686 
1687 	if (ucmd.comp_mask) {
1688 		mlx5_ib_dbg(dev, "invalid comp mask\n");
1689 		return -EOPNOTSUPP;
1690 	}
1691 
1692 	if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1693 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1694 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
1695 		mlx5_ib_dbg(dev, "invalid flags\n");
1696 		return -EOPNOTSUPP;
1697 	}
1698 
1699 	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
1700 	    !tunnel_offload_supported(dev->mdev)) {
1701 		mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
1702 		return -EOPNOTSUPP;
1703 	}
1704 
1705 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1706 	    !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1707 		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
1708 		return -EOPNOTSUPP;
1709 	}
1710 
1711 	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
1712 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1713 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1714 	}
1715 
1716 	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
1717 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1718 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
1719 	}
1720 
1721 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1722 	if (err) {
1723 		mlx5_ib_dbg(dev, "copy failed\n");
1724 		return -EINVAL;
1725 	}
1726 
1727 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1728 	outlen = MLX5_ST_SZ_BYTES(create_tir_out);
1729 	in = kvzalloc(inlen + outlen, GFP_KERNEL);
1730 	if (!in)
1731 		return -ENOMEM;
1732 
1733 	out = in + MLX5_ST_SZ_DW(create_tir_in);
1734 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1735 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1736 	MLX5_SET(tirc, tirc, disp_type,
1737 		 MLX5_TIRC_DISP_TYPE_INDIRECT);
1738 	MLX5_SET(tirc, tirc, indirect_table,
1739 		 init_attr->rwq_ind_tbl->ind_tbl_num);
1740 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1741 
1742 	hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1743 
1744 	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1745 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1746 
1747 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1748 
1749 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1750 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
1751 	else
1752 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1753 
1754 	switch (ucmd.rx_hash_function) {
1755 	case MLX5_RX_HASH_FUNC_TOEPLITZ:
1756 	{
1757 		void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1758 		size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
1759 
1760 		if (len != ucmd.rx_key_len) {
1761 			err = -EINVAL;
1762 			goto err;
1763 		}
1764 
1765 		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1766 		memcpy(rss_key, ucmd.rx_hash_key, len);
1767 		break;
1768 	}
1769 	default:
1770 		err = -EOPNOTSUPP;
1771 		goto err;
1772 	}
1773 
1774 	if (!ucmd.rx_hash_fields_mask) {
1775 		/* special case when this TIR serves as steering entry without hashing */
1776 		if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
1777 			goto create_tir;
1778 		err = -EINVAL;
1779 		goto err;
1780 	}
1781 
1782 	if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1783 	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1784 	     ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1785 	     (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1786 		err = -EINVAL;
1787 		goto err;
1788 	}
1789 
1790 	/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1791 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1792 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1793 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1794 			 MLX5_L3_PROT_TYPE_IPV4);
1795 	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1796 		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1797 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1798 			 MLX5_L3_PROT_TYPE_IPV6);
1799 
1800 	outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1801 		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
1802 		   ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1803 		    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
1804 		   (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1805 
1806 	/* Check that only one l4 protocol is set */
1807 	if (outer_l4 & (outer_l4 - 1)) {
1808 		err = -EINVAL;
1809 		goto err;
1810 	}
1811 
1812 	/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1813 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1814 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1815 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1816 			 MLX5_L4_PROT_TYPE_TCP);
1817 	else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1818 		 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1819 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1820 			 MLX5_L4_PROT_TYPE_UDP);
1821 
1822 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1823 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1824 		selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
1825 
1826 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1827 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1828 		selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
1829 
1830 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1831 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1832 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
1833 
1834 	if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1835 	    (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1836 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
1837 
1838 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
1839 		selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
1840 
1841 	MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
1842 
1843 create_tir:
1844 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
1845 
1846 	qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1847 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1848 		err = mlx5_ib_enable_lb(dev, false, true);
1849 
1850 		if (err)
1851 			mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1852 					     to_mpd(pd)->uid);
1853 	}
1854 
1855 	if (err)
1856 		goto err;
1857 
1858 	if (mucontext->devx_uid) {
1859 		resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1860 		resp.tirn = qp->rss_qp.tirn;
1861 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
1862 			resp.tir_icm_addr =
1863 				MLX5_GET(create_tir_out, out, icm_address_31_0);
1864 			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1865 							   icm_address_39_32)
1866 					     << 32;
1867 			resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
1868 							   icm_address_63_40)
1869 					     << 40;
1870 			resp.comp_mask |=
1871 				MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1872 		}
1873 	}
1874 
1875 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
1876 	if (err)
1877 		goto err_copy;
1878 
1879 	kvfree(in);
1880 	/* qpn is reserved for that QP */
1881 	qp->trans_qp.base.mqp.qpn = 0;
1882 	qp->flags |= MLX5_IB_QP_RSS;
1883 	return 0;
1884 
1885 err_copy:
1886 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
1887 err:
1888 	kvfree(in);
1889 	return err;
1890 }
1891 
1892 static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
1893 					 void *qpc)
1894 {
1895 	int rcqe_sz;
1896 
1897 	if (init_attr->qp_type == MLX5_IB_QPT_DCI)
1898 		return;
1899 
1900 	rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
1901 
1902 	if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
1903 		if (rcqe_sz == 128)
1904 			MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
1905 
1906 		return;
1907 	}
1908 
1909 	MLX5_SET(qpc, qpc, cs_res,
1910 		 rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
1911 				  MLX5_RES_SCAT_DATA32_CQE);
1912 }
1913 
1914 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1915 					 struct ib_qp_init_attr *init_attr,
1916 					 struct mlx5_ib_create_qp *ucmd,
1917 					 void *qpc)
1918 {
1919 	enum ib_qp_type qpt = init_attr->qp_type;
1920 	int scqe_sz;
1921 	bool allow_scat_cqe = false;
1922 
1923 	if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
1924 		return;
1925 
1926 	if (ucmd)
1927 		allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1928 
1929 	if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1930 		return;
1931 
1932 	scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
1933 	if (scqe_sz == 128) {
1934 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
1935 		return;
1936 	}
1937 
1938 	if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
1939 	    MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
1940 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
1941 }
1942 
1943 static int atomic_size_to_mode(int size_mask)
1944 {
1945 	/* driver does not support atomic_size > 256B
1946 	 * and does not know how to translate bigger sizes
1947 	 */
1948 	int supported_size_mask = size_mask & 0x1ff;
1949 	int log_max_size;
1950 
1951 	if (!supported_size_mask)
1952 		return -EOPNOTSUPP;
1953 
1954 	log_max_size = __fls(supported_size_mask);
1955 
1956 	if (log_max_size > 3)
1957 		return log_max_size;
1958 
1959 	return MLX5_ATOMIC_MODE_8B;
1960 }
1961 
1962 static int get_atomic_mode(struct mlx5_ib_dev *dev,
1963 			   enum ib_qp_type qp_type)
1964 {
1965 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
1966 	u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
1967 	int atomic_mode = -EOPNOTSUPP;
1968 	int atomic_size_mask;
1969 
1970 	if (!atomic)
1971 		return -EOPNOTSUPP;
1972 
1973 	if (qp_type == MLX5_IB_QPT_DCT)
1974 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
1975 	else
1976 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
1977 
1978 	if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
1979 	    (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
1980 		atomic_mode = atomic_size_to_mode(atomic_size_mask);
1981 
1982 	if (atomic_mode <= 0 &&
1983 	    (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
1984 	     atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
1985 		atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
1986 
1987 	return atomic_mode;
1988 }
1989 
1990 static inline bool check_flags_mask(uint64_t input, uint64_t supported)
1991 {
1992 	return (input & ~supported) == 0;
1993 }
1994 
1995 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1996 			    struct ib_qp_init_attr *init_attr,
1997 			    struct ib_udata *udata, struct mlx5_ib_qp *qp)
1998 {
1999 	struct mlx5_ib_resources *devr = &dev->devr;
2000 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2001 	struct mlx5_core_dev *mdev = dev->mdev;
2002 	struct mlx5_ib_create_qp_resp resp = {};
2003 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2004 		udata, struct mlx5_ib_ucontext, ibucontext);
2005 	struct mlx5_ib_cq *send_cq;
2006 	struct mlx5_ib_cq *recv_cq;
2007 	unsigned long flags;
2008 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
2009 	struct mlx5_ib_create_qp ucmd;
2010 	struct mlx5_ib_qp_base *base;
2011 	int mlx5_st;
2012 	void *qpc;
2013 	u32 *in;
2014 	int err;
2015 
2016 	mutex_init(&qp->mutex);
2017 	spin_lock_init(&qp->sq.lock);
2018 	spin_lock_init(&qp->rq.lock);
2019 
2020 	mlx5_st = to_mlx5_st(init_attr->qp_type);
2021 	if (mlx5_st < 0)
2022 		return -EINVAL;
2023 
2024 	if (init_attr->rwq_ind_tbl) {
2025 		if (!udata)
2026 			return -ENOSYS;
2027 
2028 		err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
2029 		return err;
2030 	}
2031 
2032 	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
2033 		if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
2034 			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
2035 			return -EINVAL;
2036 		} else {
2037 			qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
2038 		}
2039 	}
2040 
2041 	if (init_attr->create_flags &
2042 			(IB_QP_CREATE_CROSS_CHANNEL |
2043 			 IB_QP_CREATE_MANAGED_SEND |
2044 			 IB_QP_CREATE_MANAGED_RECV)) {
2045 		if (!MLX5_CAP_GEN(mdev, cd)) {
2046 			mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
2047 			return -EINVAL;
2048 		}
2049 		if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
2050 			qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
2051 		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
2052 			qp->flags |= MLX5_IB_QP_MANAGED_SEND;
2053 		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
2054 			qp->flags |= MLX5_IB_QP_MANAGED_RECV;
2055 	}
2056 
2057 	if (init_attr->qp_type == IB_QPT_UD &&
2058 	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
2059 		if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
2060 			mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
2061 			return -EOPNOTSUPP;
2062 		}
2063 
2064 	if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
2065 		if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2066 			mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
2067 			return -EOPNOTSUPP;
2068 		}
2069 		if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
2070 		    !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
2071 			mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
2072 			return -EOPNOTSUPP;
2073 		}
2074 		qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
2075 	}
2076 
2077 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2078 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2079 
2080 	if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
2081 		if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
2082 		      MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
2083 		    (init_attr->qp_type != IB_QPT_RAW_PACKET))
2084 			return -EOPNOTSUPP;
2085 		qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
2086 	}
2087 
2088 	if (udata) {
2089 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
2090 			mlx5_ib_dbg(dev, "copy failed\n");
2091 			return -EFAULT;
2092 		}
2093 
2094 		if (!check_flags_mask(ucmd.flags,
2095 				      MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
2096 				      MLX5_QP_FLAG_BFREG_INDEX |
2097 				      MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
2098 				      MLX5_QP_FLAG_SCATTER_CQE |
2099 				      MLX5_QP_FLAG_SIGNATURE |
2100 				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
2101 				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
2102 				      MLX5_QP_FLAG_TUNNEL_OFFLOADS |
2103 				      MLX5_QP_FLAG_TYPE_DCI |
2104 				      MLX5_QP_FLAG_TYPE_DCT))
2105 			return -EINVAL;
2106 
2107 		err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
2108 		if (err)
2109 			return err;
2110 
2111 		qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
2112 		if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
2113 			qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
2114 		if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
2115 			if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
2116 			    !tunnel_offload_supported(mdev)) {
2117 				mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
2118 				return -EOPNOTSUPP;
2119 			}
2120 			qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
2121 		}
2122 
2123 		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
2124 			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2125 				mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
2126 				return -EOPNOTSUPP;
2127 			}
2128 			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
2129 		}
2130 
2131 		if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
2132 			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2133 				mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
2134 				return -EOPNOTSUPP;
2135 			}
2136 			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
2137 		}
2138 
2139 		if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
2140 			if (init_attr->qp_type != IB_QPT_RC ||
2141 				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
2142 				mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
2143 				return -EOPNOTSUPP;
2144 			}
2145 			qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
2146 		}
2147 
2148 		if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
2149 			if (init_attr->qp_type != IB_QPT_UD ||
2150 			    (MLX5_CAP_GEN(dev->mdev, port_type) !=
2151 			     MLX5_CAP_PORT_TYPE_IB) ||
2152 			    !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
2153 				mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
2154 				return -EOPNOTSUPP;
2155 			}
2156 
2157 			qp->flags |= MLX5_IB_QP_UNDERLAY;
2158 			qp->underlay_qpn = init_attr->source_qpn;
2159 		}
2160 	} else {
2161 		qp->wq_sig = !!wq_signature;
2162 	}
2163 
2164 	base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2165 		qp->flags & MLX5_IB_QP_UNDERLAY) ?
2166 	       &qp->raw_packet_qp.rq.base :
2167 	       &qp->trans_qp.base;
2168 
2169 	qp->has_rq = qp_has_rq(init_attr);
2170 	err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
2171 			  qp, udata ? &ucmd : NULL);
2172 	if (err) {
2173 		mlx5_ib_dbg(dev, "err %d\n", err);
2174 		return err;
2175 	}
2176 
2177 	if (pd) {
2178 		if (udata) {
2179 			__u32 max_wqes =
2180 				1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
2181 			mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
2182 			if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
2183 			    ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
2184 				mlx5_ib_dbg(dev, "invalid rq params\n");
2185 				return -EINVAL;
2186 			}
2187 			if (ucmd.sq_wqe_count > max_wqes) {
2188 				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
2189 					    ucmd.sq_wqe_count, max_wqes);
2190 				return -EINVAL;
2191 			}
2192 			if (init_attr->create_flags &
2193 			    MLX5_IB_QP_CREATE_SQPN_QP1) {
2194 				mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
2195 				return -EINVAL;
2196 			}
2197 			err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
2198 					     &resp, &inlen, base);
2199 			if (err)
2200 				mlx5_ib_dbg(dev, "err %d\n", err);
2201 		} else {
2202 			err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
2203 					       base);
2204 			if (err)
2205 				mlx5_ib_dbg(dev, "err %d\n", err);
2206 		}
2207 
2208 		if (err)
2209 			return err;
2210 	} else {
2211 		in = kvzalloc(inlen, GFP_KERNEL);
2212 		if (!in)
2213 			return -ENOMEM;
2214 
2215 		qp->create_type = MLX5_QP_EMPTY;
2216 	}
2217 
2218 	if (is_sqp(init_attr->qp_type))
2219 		qp->port = init_attr->port_num;
2220 
2221 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2222 
2223 	MLX5_SET(qpc, qpc, st, mlx5_st);
2224 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2225 
2226 	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
2227 		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
2228 	else
2229 		MLX5_SET(qpc, qpc, latency_sensitive, 1);
2230 
2231 
2232 	if (qp->wq_sig)
2233 		MLX5_SET(qpc, qpc, wq_signature, 1);
2234 
2235 	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2236 		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2237 
2238 	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
2239 		MLX5_SET(qpc, qpc, cd_master, 1);
2240 	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
2241 		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2242 	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
2243 		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2244 	if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
2245 		MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
2246 	if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
2247 		configure_responder_scat_cqe(init_attr, qpc);
2248 		configure_requester_scat_cqe(dev, init_attr,
2249 					     udata ? &ucmd : NULL,
2250 					     qpc);
2251 	}
2252 
2253 	if (qp->rq.wqe_cnt) {
2254 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2255 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2256 	}
2257 
2258 	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2259 
2260 	if (qp->sq.wqe_cnt) {
2261 		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2262 	} else {
2263 		MLX5_SET(qpc, qpc, no_sq, 1);
2264 		if (init_attr->srq &&
2265 		    init_attr->srq->srq_type == IB_SRQT_TM)
2266 			MLX5_SET(qpc, qpc, offload_type,
2267 				 MLX5_QPC_OFFLOAD_TYPE_RNDV);
2268 	}
2269 
2270 	/* Set default resources */
2271 	switch (init_attr->qp_type) {
2272 	case IB_QPT_XRC_TGT:
2273 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2274 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
2275 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2276 		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
2277 		break;
2278 	case IB_QPT_XRC_INI:
2279 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2280 		MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2281 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2282 		break;
2283 	default:
2284 		if (init_attr->srq) {
2285 			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
2286 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
2287 		} else {
2288 			MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
2289 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
2290 		}
2291 	}
2292 
2293 	if (init_attr->send_cq)
2294 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
2295 
2296 	if (init_attr->recv_cq)
2297 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
2298 
2299 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2300 
2301 	/* 0xffffff means we ask to work with cqe version 0 */
2302 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2303 		MLX5_SET(qpc, qpc, user_index, uidx);
2304 
2305 	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2306 	if (init_attr->qp_type == IB_QPT_UD &&
2307 	    (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
2308 		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
2309 		qp->flags |= MLX5_IB_QP_LSO;
2310 	}
2311 
2312 	if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2313 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
2314 			mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
2315 			err = -EOPNOTSUPP;
2316 			goto err;
2317 		} else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2318 			MLX5_SET(qpc, qpc, end_padding_mode,
2319 				 MLX5_WQ_END_PAD_MODE_ALIGN);
2320 		} else {
2321 			qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
2322 		}
2323 	}
2324 
2325 	if (inlen < 0) {
2326 		err = -EINVAL;
2327 		goto err;
2328 	}
2329 
2330 	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2331 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2332 		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
2333 		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2334 		err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2335 					   &resp);
2336 	} else {
2337 		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
2338 	}
2339 
2340 	if (err) {
2341 		mlx5_ib_dbg(dev, "create qp failed\n");
2342 		goto err_create;
2343 	}
2344 
2345 	kvfree(in);
2346 
2347 	base->container_mibqp = qp;
2348 	base->mqp.event = mlx5_ib_qp_event;
2349 
2350 	get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
2351 		&send_cq, &recv_cq);
2352 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2353 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2354 	/* Maintain device to QPs access, needed for further handling via reset
2355 	 * flow
2356 	 */
2357 	list_add_tail(&qp->qps_list, &dev->qp_list);
2358 	/* Maintain CQ to QPs access, needed for further handling via reset flow
2359 	 */
2360 	if (send_cq)
2361 		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2362 	if (recv_cq)
2363 		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2364 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2365 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2366 
2367 	return 0;
2368 
2369 err_create:
2370 	if (qp->create_type == MLX5_QP_USER)
2371 		destroy_qp_user(dev, pd, qp, base, udata);
2372 	else if (qp->create_type == MLX5_QP_KERNEL)
2373 		destroy_qp_kernel(dev, qp);
2374 
2375 err:
2376 	kvfree(in);
2377 	return err;
2378 }
2379 
2380 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2381 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
2382 {
2383 	if (send_cq) {
2384 		if (recv_cq) {
2385 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2386 				spin_lock(&send_cq->lock);
2387 				spin_lock_nested(&recv_cq->lock,
2388 						 SINGLE_DEPTH_NESTING);
2389 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2390 				spin_lock(&send_cq->lock);
2391 				__acquire(&recv_cq->lock);
2392 			} else {
2393 				spin_lock(&recv_cq->lock);
2394 				spin_lock_nested(&send_cq->lock,
2395 						 SINGLE_DEPTH_NESTING);
2396 			}
2397 		} else {
2398 			spin_lock(&send_cq->lock);
2399 			__acquire(&recv_cq->lock);
2400 		}
2401 	} else if (recv_cq) {
2402 		spin_lock(&recv_cq->lock);
2403 		__acquire(&send_cq->lock);
2404 	} else {
2405 		__acquire(&send_cq->lock);
2406 		__acquire(&recv_cq->lock);
2407 	}
2408 }
2409 
2410 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2411 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
2412 {
2413 	if (send_cq) {
2414 		if (recv_cq) {
2415 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2416 				spin_unlock(&recv_cq->lock);
2417 				spin_unlock(&send_cq->lock);
2418 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2419 				__release(&recv_cq->lock);
2420 				spin_unlock(&send_cq->lock);
2421 			} else {
2422 				spin_unlock(&send_cq->lock);
2423 				spin_unlock(&recv_cq->lock);
2424 			}
2425 		} else {
2426 			__release(&recv_cq->lock);
2427 			spin_unlock(&send_cq->lock);
2428 		}
2429 	} else if (recv_cq) {
2430 		__release(&send_cq->lock);
2431 		spin_unlock(&recv_cq->lock);
2432 	} else {
2433 		__release(&recv_cq->lock);
2434 		__release(&send_cq->lock);
2435 	}
2436 }
2437 
2438 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
2439 {
2440 	return to_mpd(qp->ibqp.pd);
2441 }
2442 
2443 static void get_cqs(enum ib_qp_type qp_type,
2444 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
2445 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
2446 {
2447 	switch (qp_type) {
2448 	case IB_QPT_XRC_TGT:
2449 		*send_cq = NULL;
2450 		*recv_cq = NULL;
2451 		break;
2452 	case MLX5_IB_QPT_REG_UMR:
2453 	case IB_QPT_XRC_INI:
2454 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2455 		*recv_cq = NULL;
2456 		break;
2457 
2458 	case IB_QPT_SMI:
2459 	case MLX5_IB_QPT_HW_GSI:
2460 	case IB_QPT_RC:
2461 	case IB_QPT_UC:
2462 	case IB_QPT_UD:
2463 	case IB_QPT_RAW_IPV6:
2464 	case IB_QPT_RAW_ETHERTYPE:
2465 	case IB_QPT_RAW_PACKET:
2466 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2467 		*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
2468 		break;
2469 
2470 	case IB_QPT_MAX:
2471 	default:
2472 		*send_cq = NULL;
2473 		*recv_cq = NULL;
2474 		break;
2475 	}
2476 }
2477 
2478 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2479 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
2480 				u8 lag_tx_affinity);
2481 
2482 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2483 			      struct ib_udata *udata)
2484 {
2485 	struct mlx5_ib_cq *send_cq, *recv_cq;
2486 	struct mlx5_ib_qp_base *base;
2487 	unsigned long flags;
2488 	int err;
2489 
2490 	if (qp->ibqp.rwq_ind_tbl) {
2491 		destroy_rss_raw_qp_tir(dev, qp);
2492 		return;
2493 	}
2494 
2495 	base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
2496 		qp->flags & MLX5_IB_QP_UNDERLAY) ?
2497 	       &qp->raw_packet_qp.rq.base :
2498 	       &qp->trans_qp.base;
2499 
2500 	if (qp->state != IB_QPS_RESET) {
2501 		if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
2502 		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
2503 			err = mlx5_core_qp_modify(dev->mdev,
2504 						  MLX5_CMD_OP_2RST_QP, 0,
2505 						  NULL, &base->mqp);
2506 		} else {
2507 			struct mlx5_modify_raw_qp_param raw_qp_param = {
2508 				.operation = MLX5_CMD_OP_2RST_QP
2509 			};
2510 
2511 			err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2512 		}
2513 		if (err)
2514 			mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2515 				     base->mqp.qpn);
2516 	}
2517 
2518 	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
2519 		&send_cq, &recv_cq);
2520 
2521 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2522 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2523 	/* del from lists under both locks above to protect reset flow paths */
2524 	list_del(&qp->qps_list);
2525 	if (send_cq)
2526 		list_del(&qp->cq_send_list);
2527 
2528 	if (recv_cq)
2529 		list_del(&qp->cq_recv_list);
2530 
2531 	if (qp->create_type == MLX5_QP_KERNEL) {
2532 		__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2533 				   qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
2534 		if (send_cq != recv_cq)
2535 			__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
2536 					   NULL);
2537 	}
2538 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2539 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2540 
2541 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
2542 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
2543 		destroy_raw_packet_qp(dev, qp);
2544 	} else {
2545 		err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
2546 		if (err)
2547 			mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
2548 				     base->mqp.qpn);
2549 	}
2550 
2551 	if (qp->create_type == MLX5_QP_KERNEL)
2552 		destroy_qp_kernel(dev, qp);
2553 	else if (qp->create_type == MLX5_QP_USER)
2554 		destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
2555 }
2556 
2557 static const char *ib_qp_type_str(enum ib_qp_type type)
2558 {
2559 	switch (type) {
2560 	case IB_QPT_SMI:
2561 		return "IB_QPT_SMI";
2562 	case IB_QPT_GSI:
2563 		return "IB_QPT_GSI";
2564 	case IB_QPT_RC:
2565 		return "IB_QPT_RC";
2566 	case IB_QPT_UC:
2567 		return "IB_QPT_UC";
2568 	case IB_QPT_UD:
2569 		return "IB_QPT_UD";
2570 	case IB_QPT_RAW_IPV6:
2571 		return "IB_QPT_RAW_IPV6";
2572 	case IB_QPT_RAW_ETHERTYPE:
2573 		return "IB_QPT_RAW_ETHERTYPE";
2574 	case IB_QPT_XRC_INI:
2575 		return "IB_QPT_XRC_INI";
2576 	case IB_QPT_XRC_TGT:
2577 		return "IB_QPT_XRC_TGT";
2578 	case IB_QPT_RAW_PACKET:
2579 		return "IB_QPT_RAW_PACKET";
2580 	case MLX5_IB_QPT_REG_UMR:
2581 		return "MLX5_IB_QPT_REG_UMR";
2582 	case IB_QPT_DRIVER:
2583 		return "IB_QPT_DRIVER";
2584 	case IB_QPT_MAX:
2585 	default:
2586 		return "Invalid QP type";
2587 	}
2588 }
2589 
2590 static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
2591 					struct ib_qp_init_attr *attr,
2592 					struct mlx5_ib_create_qp *ucmd,
2593 					struct ib_udata *udata)
2594 {
2595 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2596 		udata, struct mlx5_ib_ucontext, ibucontext);
2597 	struct mlx5_ib_qp *qp;
2598 	int err = 0;
2599 	u32 uidx = MLX5_IB_DEFAULT_UIDX;
2600 	void *dctc;
2601 
2602 	if (!attr->srq || !attr->recv_cq)
2603 		return ERR_PTR(-EINVAL);
2604 
2605 	err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
2606 	if (err)
2607 		return ERR_PTR(err);
2608 
2609 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2610 	if (!qp)
2611 		return ERR_PTR(-ENOMEM);
2612 
2613 	qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2614 	if (!qp->dct.in) {
2615 		err = -ENOMEM;
2616 		goto err_free;
2617 	}
2618 
2619 	MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2620 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2621 	qp->qp_sub_type = MLX5_IB_QPT_DCT;
2622 	MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2623 	MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
2624 	MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
2625 	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2626 	MLX5_SET(dctc, dctc, user_index, uidx);
2627 
2628 	if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
2629 		configure_responder_scat_cqe(attr, dctc);
2630 
2631 	qp->state = IB_QPS_RESET;
2632 
2633 	return &qp->ibqp;
2634 err_free:
2635 	kfree(qp);
2636 	return ERR_PTR(err);
2637 }
2638 
2639 static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
2640 			   struct ib_qp_init_attr *init_attr,
2641 			   struct mlx5_ib_create_qp *ucmd,
2642 			   struct ib_udata *udata)
2643 {
2644 	enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
2645 	int err;
2646 
2647 	if (!udata)
2648 		return -EINVAL;
2649 
2650 	if (udata->inlen < sizeof(*ucmd)) {
2651 		mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
2652 		return -EINVAL;
2653 	}
2654 	err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
2655 	if (err)
2656 		return err;
2657 
2658 	if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
2659 		init_attr->qp_type = MLX5_IB_QPT_DCI;
2660 	} else {
2661 		if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
2662 			init_attr->qp_type = MLX5_IB_QPT_DCT;
2663 		} else {
2664 			mlx5_ib_dbg(dev, "Invalid QP flags\n");
2665 			return -EINVAL;
2666 		}
2667 	}
2668 
2669 	if (!MLX5_CAP_GEN(dev->mdev, dct)) {
2670 		mlx5_ib_dbg(dev, "DC transport is not supported\n");
2671 		return -EOPNOTSUPP;
2672 	}
2673 
2674 	return 0;
2675 }
2676 
2677 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
2678 				struct ib_qp_init_attr *verbs_init_attr,
2679 				struct ib_udata *udata)
2680 {
2681 	struct mlx5_ib_dev *dev;
2682 	struct mlx5_ib_qp *qp;
2683 	u16 xrcdn = 0;
2684 	int err;
2685 	struct ib_qp_init_attr mlx_init_attr;
2686 	struct ib_qp_init_attr *init_attr = verbs_init_attr;
2687 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2688 		udata, struct mlx5_ib_ucontext, ibucontext);
2689 
2690 	if (pd) {
2691 		dev = to_mdev(pd->device);
2692 
2693 		if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
2694 			if (!ucontext) {
2695 				mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
2696 				return ERR_PTR(-EINVAL);
2697 			} else if (!ucontext->cqe_version) {
2698 				mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
2699 				return ERR_PTR(-EINVAL);
2700 			}
2701 		}
2702 	} else {
2703 		/* being cautious here */
2704 		if (init_attr->qp_type != IB_QPT_XRC_TGT &&
2705 		    init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
2706 			pr_warn("%s: no PD for transport %s\n", __func__,
2707 				ib_qp_type_str(init_attr->qp_type));
2708 			return ERR_PTR(-EINVAL);
2709 		}
2710 		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
2711 	}
2712 
2713 	if (init_attr->qp_type == IB_QPT_DRIVER) {
2714 		struct mlx5_ib_create_qp ucmd;
2715 
2716 		init_attr = &mlx_init_attr;
2717 		memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
2718 		err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
2719 		if (err)
2720 			return ERR_PTR(err);
2721 
2722 		if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
2723 			if (init_attr->cap.max_recv_wr ||
2724 			    init_attr->cap.max_recv_sge) {
2725 				mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
2726 				return ERR_PTR(-EINVAL);
2727 			}
2728 		} else {
2729 			return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
2730 		}
2731 	}
2732 
2733 	switch (init_attr->qp_type) {
2734 	case IB_QPT_XRC_TGT:
2735 	case IB_QPT_XRC_INI:
2736 		if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
2737 			mlx5_ib_dbg(dev, "XRC not supported\n");
2738 			return ERR_PTR(-ENOSYS);
2739 		}
2740 		init_attr->recv_cq = NULL;
2741 		if (init_attr->qp_type == IB_QPT_XRC_TGT) {
2742 			xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
2743 			init_attr->send_cq = NULL;
2744 		}
2745 
2746 		/* fall through */
2747 	case IB_QPT_RAW_PACKET:
2748 	case IB_QPT_RC:
2749 	case IB_QPT_UC:
2750 	case IB_QPT_UD:
2751 	case IB_QPT_SMI:
2752 	case MLX5_IB_QPT_HW_GSI:
2753 	case MLX5_IB_QPT_REG_UMR:
2754 	case MLX5_IB_QPT_DCI:
2755 		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2756 		if (!qp)
2757 			return ERR_PTR(-ENOMEM);
2758 
2759 		err = create_qp_common(dev, pd, init_attr, udata, qp);
2760 		if (err) {
2761 			mlx5_ib_dbg(dev, "create_qp_common failed\n");
2762 			kfree(qp);
2763 			return ERR_PTR(err);
2764 		}
2765 
2766 		if (is_qp0(init_attr->qp_type))
2767 			qp->ibqp.qp_num = 0;
2768 		else if (is_qp1(init_attr->qp_type))
2769 			qp->ibqp.qp_num = 1;
2770 		else
2771 			qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
2772 
2773 		mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2774 			    qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
2775 			    init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
2776 			    init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
2777 
2778 		qp->trans_qp.xrcdn = xrcdn;
2779 
2780 		break;
2781 
2782 	case IB_QPT_GSI:
2783 		return mlx5_ib_gsi_create_qp(pd, init_attr);
2784 
2785 	case IB_QPT_RAW_IPV6:
2786 	case IB_QPT_RAW_ETHERTYPE:
2787 	case IB_QPT_MAX:
2788 	default:
2789 		mlx5_ib_dbg(dev, "unsupported qp type %d\n",
2790 			    init_attr->qp_type);
2791 		/* Don't support raw QPs */
2792 		return ERR_PTR(-EINVAL);
2793 	}
2794 
2795 	if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
2796 		qp->qp_sub_type = init_attr->qp_type;
2797 
2798 	return &qp->ibqp;
2799 }
2800 
2801 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
2802 {
2803 	struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
2804 
2805 	if (mqp->state == IB_QPS_RTR) {
2806 		int err;
2807 
2808 		err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
2809 		if (err) {
2810 			mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
2811 			return err;
2812 		}
2813 	}
2814 
2815 	kfree(mqp->dct.in);
2816 	kfree(mqp);
2817 	return 0;
2818 }
2819 
2820 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
2821 {
2822 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
2823 	struct mlx5_ib_qp *mqp = to_mqp(qp);
2824 
2825 	if (unlikely(qp->qp_type == IB_QPT_GSI))
2826 		return mlx5_ib_gsi_destroy_qp(qp);
2827 
2828 	if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
2829 		return mlx5_ib_destroy_dct(mqp);
2830 
2831 	destroy_qp_common(dev, mqp, udata);
2832 
2833 	kfree(mqp);
2834 
2835 	return 0;
2836 }
2837 
2838 static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
2839 				const struct ib_qp_attr *attr,
2840 				int attr_mask, __be32 *hw_access_flags_be)
2841 {
2842 	u8 dest_rd_atomic;
2843 	u32 access_flags, hw_access_flags = 0;
2844 
2845 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
2846 
2847 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2848 		dest_rd_atomic = attr->max_dest_rd_atomic;
2849 	else
2850 		dest_rd_atomic = qp->trans_qp.resp_depth;
2851 
2852 	if (attr_mask & IB_QP_ACCESS_FLAGS)
2853 		access_flags = attr->qp_access_flags;
2854 	else
2855 		access_flags = qp->trans_qp.atomic_rd_en;
2856 
2857 	if (!dest_rd_atomic)
2858 		access_flags &= IB_ACCESS_REMOTE_WRITE;
2859 
2860 	if (access_flags & IB_ACCESS_REMOTE_READ)
2861 		hw_access_flags |= MLX5_QP_BIT_RRE;
2862 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
2863 		int atomic_mode;
2864 
2865 		atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
2866 		if (atomic_mode < 0)
2867 			return -EOPNOTSUPP;
2868 
2869 		hw_access_flags |= MLX5_QP_BIT_RAE;
2870 		hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
2871 	}
2872 
2873 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
2874 		hw_access_flags |= MLX5_QP_BIT_RWE;
2875 
2876 	*hw_access_flags_be = cpu_to_be32(hw_access_flags);
2877 
2878 	return 0;
2879 }
2880 
2881 enum {
2882 	MLX5_PATH_FLAG_FL	= 1 << 0,
2883 	MLX5_PATH_FLAG_FREE_AR	= 1 << 1,
2884 	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
2885 };
2886 
2887 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
2888 {
2889 	if (rate == IB_RATE_PORT_CURRENT)
2890 		return 0;
2891 
2892 	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
2893 		return -EINVAL;
2894 
2895 	while (rate != IB_RATE_PORT_CURRENT &&
2896 	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
2897 		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
2898 		--rate;
2899 
2900 	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
2901 }
2902 
2903 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
2904 				      struct mlx5_ib_sq *sq, u8 sl,
2905 				      struct ib_pd *pd)
2906 {
2907 	void *in;
2908 	void *tisc;
2909 	int inlen;
2910 	int err;
2911 
2912 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2913 	in = kvzalloc(inlen, GFP_KERNEL);
2914 	if (!in)
2915 		return -ENOMEM;
2916 
2917 	MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
2918 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
2919 
2920 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
2921 	MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
2922 
2923 	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2924 
2925 	kvfree(in);
2926 
2927 	return err;
2928 }
2929 
2930 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
2931 					 struct mlx5_ib_sq *sq, u8 tx_affinity,
2932 					 struct ib_pd *pd)
2933 {
2934 	void *in;
2935 	void *tisc;
2936 	int inlen;
2937 	int err;
2938 
2939 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
2940 	in = kvzalloc(inlen, GFP_KERNEL);
2941 	if (!in)
2942 		return -ENOMEM;
2943 
2944 	MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
2945 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
2946 
2947 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
2948 	MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
2949 
2950 	err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2951 
2952 	kvfree(in);
2953 
2954 	return err;
2955 }
2956 
2957 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2958 			 const struct rdma_ah_attr *ah,
2959 			 struct mlx5_qp_path *path, u8 port, int attr_mask,
2960 			 u32 path_flags, const struct ib_qp_attr *attr,
2961 			 bool alt)
2962 {
2963 	const struct ib_global_route *grh = rdma_ah_read_grh(ah);
2964 	int err;
2965 	enum ib_gid_type gid_type;
2966 	u8 ah_flags = rdma_ah_get_ah_flags(ah);
2967 	u8 sl = rdma_ah_get_sl(ah);
2968 
2969 	if (attr_mask & IB_QP_PKEY_INDEX)
2970 		path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
2971 						     attr->pkey_index);
2972 
2973 	if (ah_flags & IB_AH_GRH) {
2974 		if (grh->sgid_index >=
2975 		    dev->mdev->port_caps[port - 1].gid_table_len) {
2976 			pr_err("sgid_index (%u) too large. max is %d\n",
2977 			       grh->sgid_index,
2978 			       dev->mdev->port_caps[port - 1].gid_table_len);
2979 			return -EINVAL;
2980 		}
2981 	}
2982 
2983 	if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
2984 		if (!(ah_flags & IB_AH_GRH))
2985 			return -EINVAL;
2986 
2987 		memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
2988 		if (qp->ibqp.qp_type == IB_QPT_RC ||
2989 		    qp->ibqp.qp_type == IB_QPT_UC ||
2990 		    qp->ibqp.qp_type == IB_QPT_XRC_INI ||
2991 		    qp->ibqp.qp_type == IB_QPT_XRC_TGT)
2992 			path->udp_sport =
2993 				mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
2994 		path->dci_cfi_prio_sl = (sl & 0x7) << 4;
2995 		gid_type = ah->grh.sgid_attr->gid_type;
2996 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
2997 			path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
2998 	} else {
2999 		path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
3000 		path->fl_free_ar |=
3001 			(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
3002 		path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
3003 		path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
3004 		if (ah_flags & IB_AH_GRH)
3005 			path->grh_mlid	|= 1 << 7;
3006 		path->dci_cfi_prio_sl = sl & 0xf;
3007 	}
3008 
3009 	if (ah_flags & IB_AH_GRH) {
3010 		path->mgid_index = grh->sgid_index;
3011 		path->hop_limit  = grh->hop_limit;
3012 		path->tclass_flowlabel =
3013 			cpu_to_be32((grh->traffic_class << 20) |
3014 				    (grh->flow_label));
3015 		memcpy(path->rgid, grh->dgid.raw, 16);
3016 	}
3017 
3018 	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
3019 	if (err < 0)
3020 		return err;
3021 	path->static_rate = err;
3022 	path->port = port;
3023 
3024 	if (attr_mask & IB_QP_TIMEOUT)
3025 		path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
3026 
3027 	if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
3028 		return modify_raw_packet_eth_prio(dev->mdev,
3029 						  &qp->raw_packet_qp.sq,
3030 						  sl & 0xf, qp->ibqp.pd);
3031 
3032 	return 0;
3033 }
3034 
3035 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
3036 	[MLX5_QP_STATE_INIT] = {
3037 		[MLX5_QP_STATE_INIT] = {
3038 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
3039 					  MLX5_QP_OPTPAR_RAE		|
3040 					  MLX5_QP_OPTPAR_RWE		|
3041 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3042 					  MLX5_QP_OPTPAR_PRI_PORT,
3043 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3044 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3045 					  MLX5_QP_OPTPAR_PRI_PORT,
3046 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX	|
3047 					  MLX5_QP_OPTPAR_Q_KEY		|
3048 					  MLX5_QP_OPTPAR_PRI_PORT,
3049 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3050 					  MLX5_QP_OPTPAR_RAE		|
3051 					  MLX5_QP_OPTPAR_RWE		|
3052 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3053 					  MLX5_QP_OPTPAR_PRI_PORT,
3054 		},
3055 		[MLX5_QP_STATE_RTR] = {
3056 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3057 					  MLX5_QP_OPTPAR_RRE            |
3058 					  MLX5_QP_OPTPAR_RAE            |
3059 					  MLX5_QP_OPTPAR_RWE            |
3060 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3061 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3062 					  MLX5_QP_OPTPAR_RWE            |
3063 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3064 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
3065 					  MLX5_QP_OPTPAR_Q_KEY,
3066 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
3067 					   MLX5_QP_OPTPAR_Q_KEY,
3068 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
3069 					  MLX5_QP_OPTPAR_RRE            |
3070 					  MLX5_QP_OPTPAR_RAE            |
3071 					  MLX5_QP_OPTPAR_RWE            |
3072 					  MLX5_QP_OPTPAR_PKEY_INDEX,
3073 		},
3074 	},
3075 	[MLX5_QP_STATE_RTR] = {
3076 		[MLX5_QP_STATE_RTS] = {
3077 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3078 					  MLX5_QP_OPTPAR_RRE		|
3079 					  MLX5_QP_OPTPAR_RAE		|
3080 					  MLX5_QP_OPTPAR_RWE		|
3081 					  MLX5_QP_OPTPAR_PM_STATE	|
3082 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3083 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3084 					  MLX5_QP_OPTPAR_RWE		|
3085 					  MLX5_QP_OPTPAR_PM_STATE,
3086 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3087 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3088 					  MLX5_QP_OPTPAR_RRE		|
3089 					  MLX5_QP_OPTPAR_RAE		|
3090 					  MLX5_QP_OPTPAR_RWE		|
3091 					  MLX5_QP_OPTPAR_PM_STATE	|
3092 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3093 		},
3094 	},
3095 	[MLX5_QP_STATE_RTS] = {
3096 		[MLX5_QP_STATE_RTS] = {
3097 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
3098 					  MLX5_QP_OPTPAR_RAE		|
3099 					  MLX5_QP_OPTPAR_RWE		|
3100 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3101 					  MLX5_QP_OPTPAR_PM_STATE	|
3102 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3103 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3104 					  MLX5_QP_OPTPAR_PM_STATE	|
3105 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3106 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY		|
3107 					  MLX5_QP_OPTPAR_SRQN		|
3108 					  MLX5_QP_OPTPAR_CQN_RCV,
3109 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3110 					  MLX5_QP_OPTPAR_RAE		|
3111 					  MLX5_QP_OPTPAR_RWE		|
3112 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3113 					  MLX5_QP_OPTPAR_PM_STATE	|
3114 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3115 		},
3116 	},
3117 	[MLX5_QP_STATE_SQER] = {
3118 		[MLX5_QP_STATE_RTS] = {
3119 			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
3120 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3121 			[MLX5_QP_ST_UC]	 = MLX5_QP_OPTPAR_RWE,
3122 			[MLX5_QP_ST_RC]	 = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3123 					   MLX5_QP_OPTPAR_RWE		|
3124 					   MLX5_QP_OPTPAR_RAE		|
3125 					   MLX5_QP_OPTPAR_RRE,
3126 			[MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3127 					   MLX5_QP_OPTPAR_RWE		|
3128 					   MLX5_QP_OPTPAR_RAE		|
3129 					   MLX5_QP_OPTPAR_RRE,
3130 		},
3131 	},
3132 };
3133 
3134 static int ib_nr_to_mlx5_nr(int ib_mask)
3135 {
3136 	switch (ib_mask) {
3137 	case IB_QP_STATE:
3138 		return 0;
3139 	case IB_QP_CUR_STATE:
3140 		return 0;
3141 	case IB_QP_EN_SQD_ASYNC_NOTIFY:
3142 		return 0;
3143 	case IB_QP_ACCESS_FLAGS:
3144 		return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
3145 			MLX5_QP_OPTPAR_RAE;
3146 	case IB_QP_PKEY_INDEX:
3147 		return MLX5_QP_OPTPAR_PKEY_INDEX;
3148 	case IB_QP_PORT:
3149 		return MLX5_QP_OPTPAR_PRI_PORT;
3150 	case IB_QP_QKEY:
3151 		return MLX5_QP_OPTPAR_Q_KEY;
3152 	case IB_QP_AV:
3153 		return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
3154 			MLX5_QP_OPTPAR_PRI_PORT;
3155 	case IB_QP_PATH_MTU:
3156 		return 0;
3157 	case IB_QP_TIMEOUT:
3158 		return MLX5_QP_OPTPAR_ACK_TIMEOUT;
3159 	case IB_QP_RETRY_CNT:
3160 		return MLX5_QP_OPTPAR_RETRY_COUNT;
3161 	case IB_QP_RNR_RETRY:
3162 		return MLX5_QP_OPTPAR_RNR_RETRY;
3163 	case IB_QP_RQ_PSN:
3164 		return 0;
3165 	case IB_QP_MAX_QP_RD_ATOMIC:
3166 		return MLX5_QP_OPTPAR_SRA_MAX;
3167 	case IB_QP_ALT_PATH:
3168 		return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
3169 	case IB_QP_MIN_RNR_TIMER:
3170 		return MLX5_QP_OPTPAR_RNR_TIMEOUT;
3171 	case IB_QP_SQ_PSN:
3172 		return 0;
3173 	case IB_QP_MAX_DEST_RD_ATOMIC:
3174 		return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
3175 			MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
3176 	case IB_QP_PATH_MIG_STATE:
3177 		return MLX5_QP_OPTPAR_PM_STATE;
3178 	case IB_QP_CAP:
3179 		return 0;
3180 	case IB_QP_DEST_QPN:
3181 		return 0;
3182 	}
3183 	return 0;
3184 }
3185 
3186 static int ib_mask_to_mlx5_opt(int ib_mask)
3187 {
3188 	int result = 0;
3189 	int i;
3190 
3191 	for (i = 0; i < 8 * sizeof(int); i++) {
3192 		if ((1 << i) & ib_mask)
3193 			result |= ib_nr_to_mlx5_nr(1 << i);
3194 	}
3195 
3196 	return result;
3197 }
3198 
3199 static int modify_raw_packet_qp_rq(
3200 	struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
3201 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3202 {
3203 	void *in;
3204 	void *rqc;
3205 	int inlen;
3206 	int err;
3207 
3208 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
3209 	in = kvzalloc(inlen, GFP_KERNEL);
3210 	if (!in)
3211 		return -ENOMEM;
3212 
3213 	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
3214 	MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
3215 
3216 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
3217 	MLX5_SET(rqc, rqc, state, new_state);
3218 
3219 	if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
3220 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
3221 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
3222 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
3223 			MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
3224 		} else
3225 			dev_info_once(
3226 				&dev->ib_dev.dev,
3227 				"RAW PACKET QP counters are not supported on current FW\n");
3228 	}
3229 
3230 	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
3231 	if (err)
3232 		goto out;
3233 
3234 	rq->state = new_state;
3235 
3236 out:
3237 	kvfree(in);
3238 	return err;
3239 }
3240 
3241 static int modify_raw_packet_qp_sq(
3242 	struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
3243 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3244 {
3245 	struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
3246 	struct mlx5_rate_limit old_rl = ibqp->rl;
3247 	struct mlx5_rate_limit new_rl = old_rl;
3248 	bool new_rate_added = false;
3249 	u16 rl_index = 0;
3250 	void *in;
3251 	void *sqc;
3252 	int inlen;
3253 	int err;
3254 
3255 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
3256 	in = kvzalloc(inlen, GFP_KERNEL);
3257 	if (!in)
3258 		return -ENOMEM;
3259 
3260 	MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
3261 	MLX5_SET(modify_sq_in, in, sq_state, sq->state);
3262 
3263 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
3264 	MLX5_SET(sqc, sqc, state, new_state);
3265 
3266 	if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
3267 		if (new_state != MLX5_SQC_STATE_RDY)
3268 			pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3269 				__func__);
3270 		else
3271 			new_rl = raw_qp_param->rl;
3272 	}
3273 
3274 	if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
3275 		if (new_rl.rate) {
3276 			err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
3277 			if (err) {
3278 				pr_err("Failed configuring rate limit(err %d): \
3279 				       rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3280 				       err, new_rl.rate, new_rl.max_burst_sz,
3281 				       new_rl.typical_pkt_sz);
3282 
3283 				goto out;
3284 			}
3285 			new_rate_added = true;
3286 		}
3287 
3288 		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
3289 		/* index 0 means no limit */
3290 		MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
3291 	}
3292 
3293 	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
3294 	if (err) {
3295 		/* Remove new rate from table if failed */
3296 		if (new_rate_added)
3297 			mlx5_rl_remove_rate(dev, &new_rl);
3298 		goto out;
3299 	}
3300 
3301 	/* Only remove the old rate after new rate was set */
3302 	if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
3303 	    (new_state != MLX5_SQC_STATE_RDY)) {
3304 		mlx5_rl_remove_rate(dev, &old_rl);
3305 		if (new_state != MLX5_SQC_STATE_RDY)
3306 			memset(&new_rl, 0, sizeof(new_rl));
3307 	}
3308 
3309 	ibqp->rl = new_rl;
3310 	sq->state = new_state;
3311 
3312 out:
3313 	kvfree(in);
3314 	return err;
3315 }
3316 
3317 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3318 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
3319 				u8 tx_affinity)
3320 {
3321 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3322 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
3323 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3324 	int modify_rq = !!qp->rq.wqe_cnt;
3325 	int modify_sq = !!qp->sq.wqe_cnt;
3326 	int rq_state;
3327 	int sq_state;
3328 	int err;
3329 
3330 	switch (raw_qp_param->operation) {
3331 	case MLX5_CMD_OP_RST2INIT_QP:
3332 		rq_state = MLX5_RQC_STATE_RDY;
3333 		sq_state = MLX5_SQC_STATE_RDY;
3334 		break;
3335 	case MLX5_CMD_OP_2ERR_QP:
3336 		rq_state = MLX5_RQC_STATE_ERR;
3337 		sq_state = MLX5_SQC_STATE_ERR;
3338 		break;
3339 	case MLX5_CMD_OP_2RST_QP:
3340 		rq_state = MLX5_RQC_STATE_RST;
3341 		sq_state = MLX5_SQC_STATE_RST;
3342 		break;
3343 	case MLX5_CMD_OP_RTR2RTS_QP:
3344 	case MLX5_CMD_OP_RTS2RTS_QP:
3345 		if (raw_qp_param->set_mask ==
3346 		    MLX5_RAW_QP_RATE_LIMIT) {
3347 			modify_rq = 0;
3348 			sq_state = sq->state;
3349 		} else {
3350 			return raw_qp_param->set_mask ? -EINVAL : 0;
3351 		}
3352 		break;
3353 	case MLX5_CMD_OP_INIT2INIT_QP:
3354 	case MLX5_CMD_OP_INIT2RTR_QP:
3355 		if (raw_qp_param->set_mask)
3356 			return -EINVAL;
3357 		else
3358 			return 0;
3359 	default:
3360 		WARN_ON(1);
3361 		return -EINVAL;
3362 	}
3363 
3364 	if (modify_rq) {
3365 		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
3366 					       qp->ibqp.pd);
3367 		if (err)
3368 			return err;
3369 	}
3370 
3371 	if (modify_sq) {
3372 		struct mlx5_flow_handle *flow_rule;
3373 
3374 		if (tx_affinity) {
3375 			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3376 							    tx_affinity,
3377 							    qp->ibqp.pd);
3378 			if (err)
3379 				return err;
3380 		}
3381 
3382 		flow_rule = create_flow_rule_vport_sq(dev, sq,
3383 						      raw_qp_param->port);
3384 		if (IS_ERR(flow_rule))
3385 			return PTR_ERR(flow_rule);
3386 
3387 		err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
3388 					      raw_qp_param, qp->ibqp.pd);
3389 		if (err) {
3390 			if (flow_rule)
3391 				mlx5_del_flow_rules(flow_rule);
3392 			return err;
3393 		}
3394 
3395 		if (flow_rule) {
3396 			destroy_flow_rule_vport_sq(sq);
3397 			sq->flow_rule = flow_rule;
3398 		}
3399 
3400 		return err;
3401 	}
3402 
3403 	return 0;
3404 }
3405 
3406 static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
3407 				    struct mlx5_ib_pd *pd,
3408 				    struct mlx5_ib_qp_base *qp_base,
3409 				    u8 port_num, struct ib_udata *udata)
3410 {
3411 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3412 		udata, struct mlx5_ib_ucontext, ibucontext);
3413 	unsigned int tx_port_affinity;
3414 
3415 	if (ucontext) {
3416 		tx_port_affinity = (unsigned int)atomic_add_return(
3417 					   1, &ucontext->tx_port_affinity) %
3418 					   MLX5_MAX_PORTS +
3419 				   1;
3420 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3421 				tx_port_affinity, qp_base->mqp.qpn, ucontext);
3422 	} else {
3423 		tx_port_affinity =
3424 			(unsigned int)atomic_add_return(
3425 				1, &dev->port[port_num].roce.tx_port_affinity) %
3426 				MLX5_MAX_PORTS +
3427 			1;
3428 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
3429 				tx_port_affinity, qp_base->mqp.qpn);
3430 	}
3431 
3432 	return tx_port_affinity;
3433 }
3434 
3435 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
3436 				    struct rdma_counter *counter)
3437 {
3438 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3439 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3440 	struct mlx5_qp_context context = {};
3441 	struct mlx5_ib_qp_base *base;
3442 	u32 set_id;
3443 
3444 	if (counter)
3445 		set_id = counter->id;
3446 	else
3447 		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
3448 
3449 	base = &mqp->trans_qp.base;
3450 	context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
3451 	context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
3452 	return mlx5_core_qp_modify(dev->mdev,
3453 				   MLX5_CMD_OP_RTS2RTS_QP,
3454 				   MLX5_QP_OPTPAR_COUNTER_SET_ID,
3455 				   &context, &base->mqp);
3456 }
3457 
3458 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3459 			       const struct ib_qp_attr *attr, int attr_mask,
3460 			       enum ib_qp_state cur_state,
3461 			       enum ib_qp_state new_state,
3462 			       const struct mlx5_ib_modify_qp *ucmd,
3463 			       struct ib_udata *udata)
3464 {
3465 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
3466 		[MLX5_QP_STATE_RST] = {
3467 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3468 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3469 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
3470 		},
3471 		[MLX5_QP_STATE_INIT]  = {
3472 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3473 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3474 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
3475 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
3476 		},
3477 		[MLX5_QP_STATE_RTR]   = {
3478 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3479 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3480 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
3481 		},
3482 		[MLX5_QP_STATE_RTS]   = {
3483 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3484 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3485 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
3486 		},
3487 		[MLX5_QP_STATE_SQD] = {
3488 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3489 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3490 		},
3491 		[MLX5_QP_STATE_SQER] = {
3492 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3493 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3494 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
3495 		},
3496 		[MLX5_QP_STATE_ERR] = {
3497 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
3498 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
3499 		}
3500 	};
3501 
3502 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3503 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3504 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
3505 	struct mlx5_ib_cq *send_cq, *recv_cq;
3506 	struct mlx5_qp_context *context;
3507 	struct mlx5_ib_pd *pd;
3508 	enum mlx5_qp_state mlx5_cur, mlx5_new;
3509 	enum mlx5_qp_optpar optpar;
3510 	u32 set_id = 0;
3511 	int mlx5_st;
3512 	int err;
3513 	u16 op;
3514 	u8 tx_affinity = 0;
3515 
3516 	mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
3517 			     qp->qp_sub_type : ibqp->qp_type);
3518 	if (mlx5_st < 0)
3519 		return -EINVAL;
3520 
3521 	context = kzalloc(sizeof(*context), GFP_KERNEL);
3522 	if (!context)
3523 		return -ENOMEM;
3524 
3525 	pd = get_pd(qp);
3526 	context->flags = cpu_to_be32(mlx5_st << 16);
3527 
3528 	if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
3529 		context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
3530 	} else {
3531 		switch (attr->path_mig_state) {
3532 		case IB_MIG_MIGRATED:
3533 			context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
3534 			break;
3535 		case IB_MIG_REARM:
3536 			context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
3537 			break;
3538 		case IB_MIG_ARMED:
3539 			context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
3540 			break;
3541 		}
3542 	}
3543 
3544 	if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
3545 		if ((ibqp->qp_type == IB_QPT_RC) ||
3546 		    (ibqp->qp_type == IB_QPT_UD &&
3547 		     !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
3548 		    (ibqp->qp_type == IB_QPT_UC) ||
3549 		    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
3550 		    (ibqp->qp_type == IB_QPT_XRC_INI) ||
3551 		    (ibqp->qp_type == IB_QPT_XRC_TGT)) {
3552 			if (dev->lag_active) {
3553 				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
3554 				tx_affinity = get_tx_affinity(dev, pd, base, p,
3555 							      udata);
3556 				context->flags |= cpu_to_be32(tx_affinity << 24);
3557 			}
3558 		}
3559 	}
3560 
3561 	if (is_sqp(ibqp->qp_type)) {
3562 		context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
3563 	} else if ((ibqp->qp_type == IB_QPT_UD &&
3564 		    !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
3565 		   ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
3566 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
3567 	} else if (attr_mask & IB_QP_PATH_MTU) {
3568 		if (attr->path_mtu < IB_MTU_256 ||
3569 		    attr->path_mtu > IB_MTU_4096) {
3570 			mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
3571 			err = -EINVAL;
3572 			goto out;
3573 		}
3574 		context->mtu_msgmax = (attr->path_mtu << 5) |
3575 				      (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
3576 	}
3577 
3578 	if (attr_mask & IB_QP_DEST_QPN)
3579 		context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
3580 
3581 	if (attr_mask & IB_QP_PKEY_INDEX)
3582 		context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
3583 
3584 	/* todo implement counter_index functionality */
3585 
3586 	if (is_sqp(ibqp->qp_type))
3587 		context->pri_path.port = qp->port;
3588 
3589 	if (attr_mask & IB_QP_PORT)
3590 		context->pri_path.port = attr->port_num;
3591 
3592 	if (attr_mask & IB_QP_AV) {
3593 		err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
3594 				    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
3595 				    attr_mask, 0, attr, false);
3596 		if (err)
3597 			goto out;
3598 	}
3599 
3600 	if (attr_mask & IB_QP_TIMEOUT)
3601 		context->pri_path.ackto_lt |= attr->timeout << 3;
3602 
3603 	if (attr_mask & IB_QP_ALT_PATH) {
3604 		err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
3605 				    &context->alt_path,
3606 				    attr->alt_port_num,
3607 				    attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
3608 				    0, attr, true);
3609 		if (err)
3610 			goto out;
3611 	}
3612 
3613 	get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
3614 		&send_cq, &recv_cq);
3615 
3616 	context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
3617 	context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
3618 	context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
3619 	context->params1  = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
3620 
3621 	if (attr_mask & IB_QP_RNR_RETRY)
3622 		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
3623 
3624 	if (attr_mask & IB_QP_RETRY_CNT)
3625 		context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
3626 
3627 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3628 		if (attr->max_rd_atomic)
3629 			context->params1 |=
3630 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
3631 	}
3632 
3633 	if (attr_mask & IB_QP_SQ_PSN)
3634 		context->next_send_psn = cpu_to_be32(attr->sq_psn);
3635 
3636 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3637 		if (attr->max_dest_rd_atomic)
3638 			context->params2 |=
3639 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
3640 	}
3641 
3642 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
3643 		__be32 access_flags;
3644 
3645 		err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
3646 		if (err)
3647 			goto out;
3648 
3649 		context->params2 |= access_flags;
3650 	}
3651 
3652 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
3653 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
3654 
3655 	if (attr_mask & IB_QP_RQ_PSN)
3656 		context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
3657 
3658 	if (attr_mask & IB_QP_QKEY)
3659 		context->qkey = cpu_to_be32(attr->qkey);
3660 
3661 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3662 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
3663 
3664 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3665 		u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
3666 			       qp->port) - 1;
3667 
3668 		/* Underlay port should be used - index 0 function per port */
3669 		if (qp->flags & MLX5_IB_QP_UNDERLAY)
3670 			port_num = 0;
3671 
3672 		if (ibqp->counter)
3673 			set_id = ibqp->counter->id;
3674 		else
3675 			set_id = mlx5_ib_get_counters_id(dev, port_num);
3676 		context->qp_counter_set_usr_page |=
3677 			cpu_to_be32(set_id << 24);
3678 	}
3679 
3680 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
3681 		context->sq_crq_size |= cpu_to_be16(1 << 4);
3682 
3683 	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
3684 		context->deth_sqpn = cpu_to_be32(1);
3685 
3686 	mlx5_cur = to_mlx5_state(cur_state);
3687 	mlx5_new = to_mlx5_state(new_state);
3688 
3689 	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3690 	    !optab[mlx5_cur][mlx5_new]) {
3691 		err = -EINVAL;
3692 		goto out;
3693 	}
3694 
3695 	op = optab[mlx5_cur][mlx5_new];
3696 	optpar = ib_mask_to_mlx5_opt(attr_mask);
3697 	optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
3698 
3699 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
3700 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
3701 		struct mlx5_modify_raw_qp_param raw_qp_param = {};
3702 
3703 		raw_qp_param.operation = op;
3704 		if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3705 			raw_qp_param.rq_q_ctr_id = set_id;
3706 			raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
3707 		}
3708 
3709 		if (attr_mask & IB_QP_PORT)
3710 			raw_qp_param.port = attr->port_num;
3711 
3712 		if (attr_mask & IB_QP_RATE_LIMIT) {
3713 			raw_qp_param.rl.rate = attr->rate_limit;
3714 
3715 			if (ucmd->burst_info.max_burst_sz) {
3716 				if (attr->rate_limit &&
3717 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
3718 					raw_qp_param.rl.max_burst_sz =
3719 						ucmd->burst_info.max_burst_sz;
3720 				} else {
3721 					err = -EINVAL;
3722 					goto out;
3723 				}
3724 			}
3725 
3726 			if (ucmd->burst_info.typical_pkt_sz) {
3727 				if (attr->rate_limit &&
3728 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
3729 					raw_qp_param.rl.typical_pkt_sz =
3730 						ucmd->burst_info.typical_pkt_sz;
3731 				} else {
3732 					err = -EINVAL;
3733 					goto out;
3734 				}
3735 			}
3736 
3737 			raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
3738 		}
3739 
3740 		err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
3741 	} else {
3742 		err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
3743 					  &base->mqp);
3744 	}
3745 
3746 	if (err)
3747 		goto out;
3748 
3749 	qp->state = new_state;
3750 
3751 	if (attr_mask & IB_QP_ACCESS_FLAGS)
3752 		qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
3753 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3754 		qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
3755 	if (attr_mask & IB_QP_PORT)
3756 		qp->port = attr->port_num;
3757 	if (attr_mask & IB_QP_ALT_PATH)
3758 		qp->trans_qp.alt_port = attr->alt_port_num;
3759 
3760 	/*
3761 	 * If we moved a kernel QP to RESET, clean up all old CQ
3762 	 * entries and reinitialize the QP.
3763 	 */
3764 	if (new_state == IB_QPS_RESET &&
3765 	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
3766 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
3767 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
3768 		if (send_cq != recv_cq)
3769 			mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
3770 
3771 		qp->rq.head = 0;
3772 		qp->rq.tail = 0;
3773 		qp->sq.head = 0;
3774 		qp->sq.tail = 0;
3775 		qp->sq.cur_post = 0;
3776 		if (qp->sq.wqe_cnt)
3777 			qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
3778 		qp->db.db[MLX5_RCV_DBR] = 0;
3779 		qp->db.db[MLX5_SND_DBR] = 0;
3780 	}
3781 
3782 	if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
3783 		err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
3784 		if (!err)
3785 			qp->counter_pending = 0;
3786 	}
3787 
3788 out:
3789 	kfree(context);
3790 	return err;
3791 }
3792 
3793 static inline bool is_valid_mask(int mask, int req, int opt)
3794 {
3795 	if ((mask & req) != req)
3796 		return false;
3797 
3798 	if (mask & ~(req | opt))
3799 		return false;
3800 
3801 	return true;
3802 }
3803 
3804 /* check valid transition for driver QP types
3805  * for now the only QP type that this function supports is DCI
3806  */
3807 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
3808 				enum ib_qp_attr_mask attr_mask)
3809 {
3810 	int req = IB_QP_STATE;
3811 	int opt = 0;
3812 
3813 	if (new_state == IB_QPS_RESET) {
3814 		return is_valid_mask(attr_mask, req, opt);
3815 	} else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3816 		req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
3817 		return is_valid_mask(attr_mask, req, opt);
3818 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3819 		opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
3820 		return is_valid_mask(attr_mask, req, opt);
3821 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3822 		req |= IB_QP_PATH_MTU;
3823 		opt = IB_QP_PKEY_INDEX | IB_QP_AV;
3824 		return is_valid_mask(attr_mask, req, opt);
3825 	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3826 		req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3827 		       IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
3828 		opt = IB_QP_MIN_RNR_TIMER;
3829 		return is_valid_mask(attr_mask, req, opt);
3830 	} else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
3831 		opt = IB_QP_MIN_RNR_TIMER;
3832 		return is_valid_mask(attr_mask, req, opt);
3833 	} else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
3834 		return is_valid_mask(attr_mask, req, opt);
3835 	}
3836 	return false;
3837 }
3838 
3839 /* mlx5_ib_modify_dct: modify a DCT QP
3840  * valid transitions are:
3841  * RESET to INIT: must set access_flags, pkey_index and port
3842  * INIT  to RTR : must set min_rnr_timer, tclass, flow_label,
3843  *			   mtu, gid_index and hop_limit
3844  * Other transitions and attributes are illegal
3845  */
3846 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3847 			      int attr_mask, struct ib_udata *udata)
3848 {
3849 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3850 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3851 	enum ib_qp_state cur_state, new_state;
3852 	int err = 0;
3853 	int required = IB_QP_STATE;
3854 	void *dctc;
3855 
3856 	if (!(attr_mask & IB_QP_STATE))
3857 		return -EINVAL;
3858 
3859 	cur_state = qp->state;
3860 	new_state = attr->qp_state;
3861 
3862 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
3863 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3864 		u16 set_id;
3865 
3866 		required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
3867 		if (!is_valid_mask(attr_mask, required, 0))
3868 			return -EINVAL;
3869 
3870 		if (attr->port_num == 0 ||
3871 		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
3872 			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
3873 				    attr->port_num, dev->num_ports);
3874 			return -EINVAL;
3875 		}
3876 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
3877 			MLX5_SET(dctc, dctc, rre, 1);
3878 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
3879 			MLX5_SET(dctc, dctc, rwe, 1);
3880 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
3881 			int atomic_mode;
3882 
3883 			atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
3884 			if (atomic_mode < 0)
3885 				return -EOPNOTSUPP;
3886 
3887 			MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
3888 			MLX5_SET(dctc, dctc, rae, 1);
3889 		}
3890 		MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
3891 		MLX5_SET(dctc, dctc, port, attr->port_num);
3892 
3893 		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
3894 		MLX5_SET(dctc, dctc, counter_set_id, set_id);
3895 
3896 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3897 		struct mlx5_ib_modify_qp_resp resp = {};
3898 		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
3899 		u32 min_resp_len = offsetof(typeof(resp), dctn) +
3900 				   sizeof(resp.dctn);
3901 
3902 		if (udata->outlen < min_resp_len)
3903 			return -EINVAL;
3904 		resp.response_length = min_resp_len;
3905 
3906 		required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
3907 		if (!is_valid_mask(attr_mask, required, 0))
3908 			return -EINVAL;
3909 		MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
3910 		MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
3911 		MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
3912 		MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
3913 		MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
3914 		MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
3915 
3916 		err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
3917 					   MLX5_ST_SZ_BYTES(create_dct_in), out,
3918 					   sizeof(out));
3919 		if (err)
3920 			return err;
3921 		resp.dctn = qp->dct.mdct.mqp.qpn;
3922 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
3923 		if (err) {
3924 			mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
3925 			return err;
3926 		}
3927 	} else {
3928 		mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
3929 		return -EINVAL;
3930 	}
3931 	if (err)
3932 		qp->state = IB_QPS_ERR;
3933 	else
3934 		qp->state = new_state;
3935 	return err;
3936 }
3937 
3938 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3939 		      int attr_mask, struct ib_udata *udata)
3940 {
3941 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3942 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3943 	struct mlx5_ib_modify_qp ucmd = {};
3944 	enum ib_qp_type qp_type;
3945 	enum ib_qp_state cur_state, new_state;
3946 	size_t required_cmd_sz;
3947 	int err = -EINVAL;
3948 	int port;
3949 
3950 	if (ibqp->rwq_ind_tbl)
3951 		return -ENOSYS;
3952 
3953 	if (udata && udata->inlen) {
3954 		required_cmd_sz = offsetof(typeof(ucmd), reserved) +
3955 			sizeof(ucmd.reserved);
3956 		if (udata->inlen < required_cmd_sz)
3957 			return -EINVAL;
3958 
3959 		if (udata->inlen > sizeof(ucmd) &&
3960 		    !ib_is_udata_cleared(udata, sizeof(ucmd),
3961 					 udata->inlen - sizeof(ucmd)))
3962 			return -EOPNOTSUPP;
3963 
3964 		if (ib_copy_from_udata(&ucmd, udata,
3965 				       min(udata->inlen, sizeof(ucmd))))
3966 			return -EFAULT;
3967 
3968 		if (ucmd.comp_mask ||
3969 		    memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
3970 		    memchr_inv(&ucmd.burst_info.reserved, 0,
3971 			       sizeof(ucmd.burst_info.reserved)))
3972 			return -EOPNOTSUPP;
3973 	}
3974 
3975 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
3976 		return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
3977 
3978 	if (ibqp->qp_type == IB_QPT_DRIVER)
3979 		qp_type = qp->qp_sub_type;
3980 	else
3981 		qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
3982 			IB_QPT_GSI : ibqp->qp_type;
3983 
3984 	if (qp_type == MLX5_IB_QPT_DCT)
3985 		return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
3986 
3987 	mutex_lock(&qp->mutex);
3988 
3989 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
3990 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
3991 
3992 	if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
3993 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
3994 	}
3995 
3996 	if (qp->flags & MLX5_IB_QP_UNDERLAY) {
3997 		if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
3998 			mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
3999 				    attr_mask);
4000 			goto out;
4001 		}
4002 	} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
4003 		   qp_type != MLX5_IB_QPT_DCI &&
4004 		   !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
4005 				       attr_mask)) {
4006 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4007 			    cur_state, new_state, ibqp->qp_type, attr_mask);
4008 		goto out;
4009 	} else if (qp_type == MLX5_IB_QPT_DCI &&
4010 		   !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
4011 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4012 			    cur_state, new_state, qp_type, attr_mask);
4013 		goto out;
4014 	}
4015 
4016 	if ((attr_mask & IB_QP_PORT) &&
4017 	    (attr->port_num == 0 ||
4018 	     attr->port_num > dev->num_ports)) {
4019 		mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
4020 			    attr->port_num, dev->num_ports);
4021 		goto out;
4022 	}
4023 
4024 	if (attr_mask & IB_QP_PKEY_INDEX) {
4025 		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
4026 		if (attr->pkey_index >=
4027 		    dev->mdev->port_caps[port - 1].pkey_table_len) {
4028 			mlx5_ib_dbg(dev, "invalid pkey index %d\n",
4029 				    attr->pkey_index);
4030 			goto out;
4031 		}
4032 	}
4033 
4034 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4035 	    attr->max_rd_atomic >
4036 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
4037 		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
4038 			    attr->max_rd_atomic);
4039 		goto out;
4040 	}
4041 
4042 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4043 	    attr->max_dest_rd_atomic >
4044 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
4045 		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
4046 			    attr->max_dest_rd_atomic);
4047 		goto out;
4048 	}
4049 
4050 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
4051 		err = 0;
4052 		goto out;
4053 	}
4054 
4055 	err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
4056 				  new_state, &ucmd, udata);
4057 
4058 out:
4059 	mutex_unlock(&qp->mutex);
4060 	return err;
4061 }
4062 
4063 static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
4064 				   u32 wqe_sz, void **cur_edge)
4065 {
4066 	u32 idx;
4067 
4068 	idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
4069 	*cur_edge = get_sq_edge(sq, idx);
4070 
4071 	*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
4072 }
4073 
4074 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
4075  * next nearby edge and get new address translation for current WQE position.
4076  * @sq - SQ buffer.
4077  * @seg: Current WQE position (16B aligned).
4078  * @wqe_sz: Total current WQE size [16B].
4079  * @cur_edge: Updated current edge.
4080  */
4081 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
4082 					 u32 wqe_sz, void **cur_edge)
4083 {
4084 	if (likely(*seg != *cur_edge))
4085 		return;
4086 
4087 	_handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
4088 }
4089 
4090 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
4091  * pointers. At the end @seg is aligned to 16B regardless the copied size.
4092  * @sq - SQ buffer.
4093  * @cur_edge: Updated current edge.
4094  * @seg: Current WQE position (16B aligned).
4095  * @wqe_sz: Total current WQE size [16B].
4096  * @src: Pointer to copy from.
4097  * @n: Number of bytes to copy.
4098  */
4099 static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
4100 				   void **seg, u32 *wqe_sz, const void *src,
4101 				   size_t n)
4102 {
4103 	while (likely(n)) {
4104 		size_t leftlen = *cur_edge - *seg;
4105 		size_t copysz = min_t(size_t, leftlen, n);
4106 		size_t stride;
4107 
4108 		memcpy(*seg, src, copysz);
4109 
4110 		n -= copysz;
4111 		src += copysz;
4112 		stride = !n ? ALIGN(copysz, 16) : copysz;
4113 		*seg += stride;
4114 		*wqe_sz += stride >> 4;
4115 		handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
4116 	}
4117 }
4118 
4119 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
4120 {
4121 	struct mlx5_ib_cq *cq;
4122 	unsigned cur;
4123 
4124 	cur = wq->head - wq->tail;
4125 	if (likely(cur + nreq < wq->max_post))
4126 		return 0;
4127 
4128 	cq = to_mcq(ib_cq);
4129 	spin_lock(&cq->lock);
4130 	cur = wq->head - wq->tail;
4131 	spin_unlock(&cq->lock);
4132 
4133 	return cur + nreq >= wq->max_post;
4134 }
4135 
4136 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
4137 					  u64 remote_addr, u32 rkey)
4138 {
4139 	rseg->raddr    = cpu_to_be64(remote_addr);
4140 	rseg->rkey     = cpu_to_be32(rkey);
4141 	rseg->reserved = 0;
4142 }
4143 
4144 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
4145 			void **seg, int *size, void **cur_edge)
4146 {
4147 	struct mlx5_wqe_eth_seg *eseg = *seg;
4148 
4149 	memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
4150 
4151 	if (wr->send_flags & IB_SEND_IP_CSUM)
4152 		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
4153 				 MLX5_ETH_WQE_L4_CSUM;
4154 
4155 	if (wr->opcode == IB_WR_LSO) {
4156 		struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
4157 		size_t left, copysz;
4158 		void *pdata = ud_wr->header;
4159 		size_t stride;
4160 
4161 		left = ud_wr->hlen;
4162 		eseg->mss = cpu_to_be16(ud_wr->mss);
4163 		eseg->inline_hdr.sz = cpu_to_be16(left);
4164 
4165 		/* memcpy_send_wqe should get a 16B align address. Hence, we
4166 		 * first copy up to the current edge and then, if needed,
4167 		 * fall-through to memcpy_send_wqe.
4168 		 */
4169 		copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
4170 			       left);
4171 		memcpy(eseg->inline_hdr.start, pdata, copysz);
4172 		stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
4173 			       sizeof(eseg->inline_hdr.start) + copysz, 16);
4174 		*size += stride / 16;
4175 		*seg += stride;
4176 
4177 		if (copysz < left) {
4178 			handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4179 			left -= copysz;
4180 			pdata += copysz;
4181 			memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
4182 					left);
4183 		}
4184 
4185 		return;
4186 	}
4187 
4188 	*seg += sizeof(struct mlx5_wqe_eth_seg);
4189 	*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
4190 }
4191 
4192 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
4193 			     const struct ib_send_wr *wr)
4194 {
4195 	memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
4196 	dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
4197 	dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
4198 }
4199 
4200 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
4201 {
4202 	dseg->byte_count = cpu_to_be32(sg->length);
4203 	dseg->lkey       = cpu_to_be32(sg->lkey);
4204 	dseg->addr       = cpu_to_be64(sg->addr);
4205 }
4206 
4207 static u64 get_xlt_octo(u64 bytes)
4208 {
4209 	return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
4210 	       MLX5_IB_UMR_OCTOWORD;
4211 }
4212 
4213 static __be64 frwr_mkey_mask(bool atomic)
4214 {
4215 	u64 result;
4216 
4217 	result = MLX5_MKEY_MASK_LEN		|
4218 		MLX5_MKEY_MASK_PAGE_SIZE	|
4219 		MLX5_MKEY_MASK_START_ADDR	|
4220 		MLX5_MKEY_MASK_EN_RINVAL	|
4221 		MLX5_MKEY_MASK_KEY		|
4222 		MLX5_MKEY_MASK_LR		|
4223 		MLX5_MKEY_MASK_LW		|
4224 		MLX5_MKEY_MASK_RR		|
4225 		MLX5_MKEY_MASK_RW		|
4226 		MLX5_MKEY_MASK_SMALL_FENCE	|
4227 		MLX5_MKEY_MASK_FREE;
4228 
4229 	if (atomic)
4230 		result |= MLX5_MKEY_MASK_A;
4231 
4232 	return cpu_to_be64(result);
4233 }
4234 
4235 static __be64 sig_mkey_mask(void)
4236 {
4237 	u64 result;
4238 
4239 	result = MLX5_MKEY_MASK_LEN		|
4240 		MLX5_MKEY_MASK_PAGE_SIZE	|
4241 		MLX5_MKEY_MASK_START_ADDR	|
4242 		MLX5_MKEY_MASK_EN_SIGERR	|
4243 		MLX5_MKEY_MASK_EN_RINVAL	|
4244 		MLX5_MKEY_MASK_KEY		|
4245 		MLX5_MKEY_MASK_LR		|
4246 		MLX5_MKEY_MASK_LW		|
4247 		MLX5_MKEY_MASK_RR		|
4248 		MLX5_MKEY_MASK_RW		|
4249 		MLX5_MKEY_MASK_SMALL_FENCE	|
4250 		MLX5_MKEY_MASK_FREE		|
4251 		MLX5_MKEY_MASK_BSF_EN;
4252 
4253 	return cpu_to_be64(result);
4254 }
4255 
4256 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4257 			    struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4258 {
4259 	int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4260 
4261 	memset(umr, 0, sizeof(*umr));
4262 
4263 	umr->flags = flags;
4264 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4265 	umr->mkey_mask = frwr_mkey_mask(atomic);
4266 }
4267 
4268 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
4269 {
4270 	memset(umr, 0, sizeof(*umr));
4271 	umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
4272 	umr->flags = MLX5_UMR_INLINE;
4273 }
4274 
4275 static __be64 get_umr_enable_mr_mask(void)
4276 {
4277 	u64 result;
4278 
4279 	result = MLX5_MKEY_MASK_KEY |
4280 		 MLX5_MKEY_MASK_FREE;
4281 
4282 	return cpu_to_be64(result);
4283 }
4284 
4285 static __be64 get_umr_disable_mr_mask(void)
4286 {
4287 	u64 result;
4288 
4289 	result = MLX5_MKEY_MASK_FREE;
4290 
4291 	return cpu_to_be64(result);
4292 }
4293 
4294 static __be64 get_umr_update_translation_mask(void)
4295 {
4296 	u64 result;
4297 
4298 	result = MLX5_MKEY_MASK_LEN |
4299 		 MLX5_MKEY_MASK_PAGE_SIZE |
4300 		 MLX5_MKEY_MASK_START_ADDR;
4301 
4302 	return cpu_to_be64(result);
4303 }
4304 
4305 static __be64 get_umr_update_access_mask(int atomic)
4306 {
4307 	u64 result;
4308 
4309 	result = MLX5_MKEY_MASK_LR |
4310 		 MLX5_MKEY_MASK_LW |
4311 		 MLX5_MKEY_MASK_RR |
4312 		 MLX5_MKEY_MASK_RW;
4313 
4314 	if (atomic)
4315 		result |= MLX5_MKEY_MASK_A;
4316 
4317 	return cpu_to_be64(result);
4318 }
4319 
4320 static __be64 get_umr_update_pd_mask(void)
4321 {
4322 	u64 result;
4323 
4324 	result = MLX5_MKEY_MASK_PD;
4325 
4326 	return cpu_to_be64(result);
4327 }
4328 
4329 static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
4330 {
4331 	if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
4332 	     MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
4333 	    (mask & MLX5_MKEY_MASK_A &&
4334 	     MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
4335 		return -EPERM;
4336 	return 0;
4337 }
4338 
4339 static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
4340 			       struct mlx5_wqe_umr_ctrl_seg *umr,
4341 			       const struct ib_send_wr *wr, int atomic)
4342 {
4343 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4344 
4345 	memset(umr, 0, sizeof(*umr));
4346 
4347 	if (!umrwr->ignore_free_state) {
4348 		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
4349 			 /* fail if free */
4350 			umr->flags = MLX5_UMR_CHECK_FREE;
4351 		else
4352 			/* fail if not free */
4353 			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
4354 	}
4355 
4356 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
4357 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
4358 		u64 offset = get_xlt_octo(umrwr->offset);
4359 
4360 		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
4361 		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
4362 		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
4363 	}
4364 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
4365 		umr->mkey_mask |= get_umr_update_translation_mask();
4366 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
4367 		umr->mkey_mask |= get_umr_update_access_mask(atomic);
4368 		umr->mkey_mask |= get_umr_update_pd_mask();
4369 	}
4370 	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
4371 		umr->mkey_mask |= get_umr_enable_mr_mask();
4372 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
4373 		umr->mkey_mask |= get_umr_disable_mr_mask();
4374 
4375 	if (!wr->num_sge)
4376 		umr->flags |= MLX5_UMR_INLINE;
4377 
4378 	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
4379 }
4380 
4381 static u8 get_umr_flags(int acc)
4382 {
4383 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
4384 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
4385 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
4386 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
4387 		MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
4388 }
4389 
4390 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
4391 			     struct mlx5_ib_mr *mr,
4392 			     u32 key, int access)
4393 {
4394 	int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
4395 
4396 	memset(seg, 0, sizeof(*seg));
4397 
4398 	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
4399 		seg->log2_page_size = ilog2(mr->ibmr.page_size);
4400 	else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
4401 		/* KLMs take twice the size of MTTs */
4402 		ndescs *= 2;
4403 
4404 	seg->flags = get_umr_flags(access) | mr->access_mode;
4405 	seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
4406 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
4407 	seg->start_addr = cpu_to_be64(mr->ibmr.iova);
4408 	seg->len = cpu_to_be64(mr->ibmr.length);
4409 	seg->xlt_oct_size = cpu_to_be32(ndescs);
4410 }
4411 
4412 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
4413 {
4414 	memset(seg, 0, sizeof(*seg));
4415 	seg->status = MLX5_MKEY_STATUS_FREE;
4416 }
4417 
4418 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
4419 				 const struct ib_send_wr *wr)
4420 {
4421 	const struct mlx5_umr_wr *umrwr = umr_wr(wr);
4422 
4423 	memset(seg, 0, sizeof(*seg));
4424 	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
4425 		seg->status = MLX5_MKEY_STATUS_FREE;
4426 
4427 	seg->flags = convert_access(umrwr->access_flags);
4428 	if (umrwr->pd)
4429 		seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
4430 	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
4431 	    !umrwr->length)
4432 		seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
4433 
4434 	seg->start_addr = cpu_to_be64(umrwr->virt_addr);
4435 	seg->len = cpu_to_be64(umrwr->length);
4436 	seg->log2_page_size = umrwr->page_shift;
4437 	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
4438 				       mlx5_mkey_variant(umrwr->mkey));
4439 }
4440 
4441 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
4442 			     struct mlx5_ib_mr *mr,
4443 			     struct mlx5_ib_pd *pd)
4444 {
4445 	int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
4446 
4447 	dseg->addr = cpu_to_be64(mr->desc_map);
4448 	dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
4449 	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
4450 }
4451 
4452 static __be32 send_ieth(const struct ib_send_wr *wr)
4453 {
4454 	switch (wr->opcode) {
4455 	case IB_WR_SEND_WITH_IMM:
4456 	case IB_WR_RDMA_WRITE_WITH_IMM:
4457 		return wr->ex.imm_data;
4458 
4459 	case IB_WR_SEND_WITH_INV:
4460 		return cpu_to_be32(wr->ex.invalidate_rkey);
4461 
4462 	default:
4463 		return 0;
4464 	}
4465 }
4466 
4467 static u8 calc_sig(void *wqe, int size)
4468 {
4469 	u8 *p = wqe;
4470 	u8 res = 0;
4471 	int i;
4472 
4473 	for (i = 0; i < size; i++)
4474 		res ^= p[i];
4475 
4476 	return ~res;
4477 }
4478 
4479 static u8 wq_sig(void *wqe)
4480 {
4481 	return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
4482 }
4483 
4484 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
4485 			    void **wqe, int *wqe_sz, void **cur_edge)
4486 {
4487 	struct mlx5_wqe_inline_seg *seg;
4488 	size_t offset;
4489 	int inl = 0;
4490 	int i;
4491 
4492 	seg = *wqe;
4493 	*wqe += sizeof(*seg);
4494 	offset = sizeof(*seg);
4495 
4496 	for (i = 0; i < wr->num_sge; i++) {
4497 		size_t len  = wr->sg_list[i].length;
4498 		void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
4499 
4500 		inl += len;
4501 
4502 		if (unlikely(inl > qp->max_inline_data))
4503 			return -ENOMEM;
4504 
4505 		while (likely(len)) {
4506 			size_t leftlen;
4507 			size_t copysz;
4508 
4509 			handle_post_send_edge(&qp->sq, wqe,
4510 					      *wqe_sz + (offset >> 4),
4511 					      cur_edge);
4512 
4513 			leftlen = *cur_edge - *wqe;
4514 			copysz = min_t(size_t, leftlen, len);
4515 
4516 			memcpy(*wqe, addr, copysz);
4517 			len -= copysz;
4518 			addr += copysz;
4519 			*wqe += copysz;
4520 			offset += copysz;
4521 		}
4522 	}
4523 
4524 	seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
4525 
4526 	*wqe_sz +=  ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
4527 
4528 	return 0;
4529 }
4530 
4531 static u16 prot_field_size(enum ib_signature_type type)
4532 {
4533 	switch (type) {
4534 	case IB_SIG_TYPE_T10_DIF:
4535 		return MLX5_DIF_SIZE;
4536 	default:
4537 		return 0;
4538 	}
4539 }
4540 
4541 static u8 bs_selector(int block_size)
4542 {
4543 	switch (block_size) {
4544 	case 512:	    return 0x1;
4545 	case 520:	    return 0x2;
4546 	case 4096:	    return 0x3;
4547 	case 4160:	    return 0x4;
4548 	case 1073741824:    return 0x5;
4549 	default:	    return 0;
4550 	}
4551 }
4552 
4553 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
4554 			      struct mlx5_bsf_inl *inl)
4555 {
4556 	/* Valid inline section and allow BSF refresh */
4557 	inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
4558 				       MLX5_BSF_REFRESH_DIF);
4559 	inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
4560 	inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
4561 	/* repeating block */
4562 	inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
4563 	inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
4564 			MLX5_DIF_CRC : MLX5_DIF_IPCS;
4565 
4566 	if (domain->sig.dif.ref_remap)
4567 		inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
4568 
4569 	if (domain->sig.dif.app_escape) {
4570 		if (domain->sig.dif.ref_escape)
4571 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
4572 		else
4573 			inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
4574 	}
4575 
4576 	inl->dif_app_bitmask_check =
4577 		cpu_to_be16(domain->sig.dif.apptag_check_mask);
4578 }
4579 
4580 static int mlx5_set_bsf(struct ib_mr *sig_mr,
4581 			struct ib_sig_attrs *sig_attrs,
4582 			struct mlx5_bsf *bsf, u32 data_size)
4583 {
4584 	struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
4585 	struct mlx5_bsf_basic *basic = &bsf->basic;
4586 	struct ib_sig_domain *mem = &sig_attrs->mem;
4587 	struct ib_sig_domain *wire = &sig_attrs->wire;
4588 
4589 	memset(bsf, 0, sizeof(*bsf));
4590 
4591 	/* Basic + Extended + Inline */
4592 	basic->bsf_size_sbs = 1 << 7;
4593 	/* Input domain check byte mask */
4594 	basic->check_byte_mask = sig_attrs->check_mask;
4595 	basic->raw_data_size = cpu_to_be32(data_size);
4596 
4597 	/* Memory domain */
4598 	switch (sig_attrs->mem.sig_type) {
4599 	case IB_SIG_TYPE_NONE:
4600 		break;
4601 	case IB_SIG_TYPE_T10_DIF:
4602 		basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
4603 		basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
4604 		mlx5_fill_inl_bsf(mem, &bsf->m_inl);
4605 		break;
4606 	default:
4607 		return -EINVAL;
4608 	}
4609 
4610 	/* Wire domain */
4611 	switch (sig_attrs->wire.sig_type) {
4612 	case IB_SIG_TYPE_NONE:
4613 		break;
4614 	case IB_SIG_TYPE_T10_DIF:
4615 		if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
4616 		    mem->sig_type == wire->sig_type) {
4617 			/* Same block structure */
4618 			basic->bsf_size_sbs |= 1 << 4;
4619 			if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
4620 				basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
4621 			if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
4622 				basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
4623 			if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
4624 				basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
4625 		} else
4626 			basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
4627 
4628 		basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
4629 		mlx5_fill_inl_bsf(wire, &bsf->w_inl);
4630 		break;
4631 	default:
4632 		return -EINVAL;
4633 	}
4634 
4635 	return 0;
4636 }
4637 
4638 static int set_sig_data_segment(const struct ib_send_wr *send_wr,
4639 				struct ib_mr *sig_mr,
4640 				struct ib_sig_attrs *sig_attrs,
4641 				struct mlx5_ib_qp *qp, void **seg, int *size,
4642 				void **cur_edge)
4643 {
4644 	struct mlx5_bsf *bsf;
4645 	u32 data_len;
4646 	u32 data_key;
4647 	u64 data_va;
4648 	u32 prot_len = 0;
4649 	u32 prot_key = 0;
4650 	u64 prot_va = 0;
4651 	bool prot = false;
4652 	int ret;
4653 	int wqe_size;
4654 	struct mlx5_ib_mr *mr = to_mmr(sig_mr);
4655 	struct mlx5_ib_mr *pi_mr = mr->pi_mr;
4656 
4657 	data_len = pi_mr->data_length;
4658 	data_key = pi_mr->ibmr.lkey;
4659 	data_va = pi_mr->data_iova;
4660 	if (pi_mr->meta_ndescs) {
4661 		prot_len = pi_mr->meta_length;
4662 		prot_key = pi_mr->ibmr.lkey;
4663 		prot_va = pi_mr->pi_iova;
4664 		prot = true;
4665 	}
4666 
4667 	if (!prot || (data_key == prot_key && data_va == prot_va &&
4668 		      data_len == prot_len)) {
4669 		/**
4670 		 * Source domain doesn't contain signature information
4671 		 * or data and protection are interleaved in memory.
4672 		 * So need construct:
4673 		 *                  ------------------
4674 		 *                 |     data_klm     |
4675 		 *                  ------------------
4676 		 *                 |       BSF        |
4677 		 *                  ------------------
4678 		 **/
4679 		struct mlx5_klm *data_klm = *seg;
4680 
4681 		data_klm->bcount = cpu_to_be32(data_len);
4682 		data_klm->key = cpu_to_be32(data_key);
4683 		data_klm->va = cpu_to_be64(data_va);
4684 		wqe_size = ALIGN(sizeof(*data_klm), 64);
4685 	} else {
4686 		/**
4687 		 * Source domain contains signature information
4688 		 * So need construct a strided block format:
4689 		 *               ---------------------------
4690 		 *              |     stride_block_ctrl     |
4691 		 *               ---------------------------
4692 		 *              |          data_klm         |
4693 		 *               ---------------------------
4694 		 *              |          prot_klm         |
4695 		 *               ---------------------------
4696 		 *              |             BSF           |
4697 		 *               ---------------------------
4698 		 **/
4699 		struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
4700 		struct mlx5_stride_block_entry *data_sentry;
4701 		struct mlx5_stride_block_entry *prot_sentry;
4702 		u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
4703 		int prot_size;
4704 
4705 		sblock_ctrl = *seg;
4706 		data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
4707 		prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
4708 
4709 		prot_size = prot_field_size(sig_attrs->mem.sig_type);
4710 		if (!prot_size) {
4711 			pr_err("Bad block size given: %u\n", block_size);
4712 			return -EINVAL;
4713 		}
4714 		sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
4715 							    prot_size);
4716 		sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
4717 		sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
4718 		sblock_ctrl->num_entries = cpu_to_be16(2);
4719 
4720 		data_sentry->bcount = cpu_to_be16(block_size);
4721 		data_sentry->key = cpu_to_be32(data_key);
4722 		data_sentry->va = cpu_to_be64(data_va);
4723 		data_sentry->stride = cpu_to_be16(block_size);
4724 
4725 		prot_sentry->bcount = cpu_to_be16(prot_size);
4726 		prot_sentry->key = cpu_to_be32(prot_key);
4727 		prot_sentry->va = cpu_to_be64(prot_va);
4728 		prot_sentry->stride = cpu_to_be16(prot_size);
4729 
4730 		wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
4731 				 sizeof(*prot_sentry), 64);
4732 	}
4733 
4734 	*seg += wqe_size;
4735 	*size += wqe_size / 16;
4736 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4737 
4738 	bsf = *seg;
4739 	ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
4740 	if (ret)
4741 		return -EINVAL;
4742 
4743 	*seg += sizeof(*bsf);
4744 	*size += sizeof(*bsf) / 16;
4745 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4746 
4747 	return 0;
4748 }
4749 
4750 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
4751 				 struct ib_mr *sig_mr, int access_flags,
4752 				 u32 size, u32 length, u32 pdn)
4753 {
4754 	u32 sig_key = sig_mr->rkey;
4755 	u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
4756 
4757 	memset(seg, 0, sizeof(*seg));
4758 
4759 	seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
4760 	seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
4761 	seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
4762 				    MLX5_MKEY_BSF_EN | pdn);
4763 	seg->len = cpu_to_be64(length);
4764 	seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
4765 	seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
4766 }
4767 
4768 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
4769 				u32 size)
4770 {
4771 	memset(umr, 0, sizeof(*umr));
4772 
4773 	umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
4774 	umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4775 	umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
4776 	umr->mkey_mask = sig_mkey_mask();
4777 }
4778 
4779 static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
4780 			 struct mlx5_ib_qp *qp, void **seg, int *size,
4781 			 void **cur_edge)
4782 {
4783 	const struct ib_reg_wr *wr = reg_wr(send_wr);
4784 	struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
4785 	struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
4786 	struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
4787 	u32 pdn = get_pd(qp)->pdn;
4788 	u32 xlt_size;
4789 	int region_len, ret;
4790 
4791 	if (unlikely(send_wr->num_sge != 0) ||
4792 	    unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
4793 	    unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
4794 	    unlikely(!sig_mr->sig->sig_status_checked))
4795 		return -EINVAL;
4796 
4797 	/* length of the protected region, data + protection */
4798 	region_len = pi_mr->ibmr.length;
4799 
4800 	/**
4801 	 * KLM octoword size - if protection was provided
4802 	 * then we use strided block format (3 octowords),
4803 	 * else we use single KLM (1 octoword)
4804 	 **/
4805 	if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
4806 		xlt_size = 0x30;
4807 	else
4808 		xlt_size = sizeof(struct mlx5_klm);
4809 
4810 	set_sig_umr_segment(*seg, xlt_size);
4811 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4812 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4813 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4814 
4815 	set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
4816 			     pdn);
4817 	*seg += sizeof(struct mlx5_mkey_seg);
4818 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4819 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4820 
4821 	ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
4822 				   cur_edge);
4823 	if (ret)
4824 		return ret;
4825 
4826 	sig_mr->sig->sig_status_checked = false;
4827 	return 0;
4828 }
4829 
4830 static int set_psv_wr(struct ib_sig_domain *domain,
4831 		      u32 psv_idx, void **seg, int *size)
4832 {
4833 	struct mlx5_seg_set_psv *psv_seg = *seg;
4834 
4835 	memset(psv_seg, 0, sizeof(*psv_seg));
4836 	psv_seg->psv_num = cpu_to_be32(psv_idx);
4837 	switch (domain->sig_type) {
4838 	case IB_SIG_TYPE_NONE:
4839 		break;
4840 	case IB_SIG_TYPE_T10_DIF:
4841 		psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
4842 						     domain->sig.dif.app_tag);
4843 		psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
4844 		break;
4845 	default:
4846 		pr_err("Bad signature type (%d) is given.\n",
4847 		       domain->sig_type);
4848 		return -EINVAL;
4849 	}
4850 
4851 	*seg += sizeof(*psv_seg);
4852 	*size += sizeof(*psv_seg) / 16;
4853 
4854 	return 0;
4855 }
4856 
4857 static int set_reg_wr(struct mlx5_ib_qp *qp,
4858 		      const struct ib_reg_wr *wr,
4859 		      void **seg, int *size, void **cur_edge,
4860 		      bool check_not_free)
4861 {
4862 	struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4863 	struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4864 	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4865 	int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4866 	bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4867 	bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4868 	u8 flags = 0;
4869 
4870 	if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
4871 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
4872 			     "Fast update of %s for MR is disabled\n",
4873 			     (MLX5_CAP_GEN(dev->mdev,
4874 					   umr_modify_entity_size_disabled)) ?
4875 				     "entity size" :
4876 				     "atomic access");
4877 		return -EINVAL;
4878 	}
4879 
4880 	if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4881 		mlx5_ib_warn(to_mdev(qp->ibqp.device),
4882 			     "Invalid IB_SEND_INLINE send flag\n");
4883 		return -EINVAL;
4884 	}
4885 
4886 	if (check_not_free)
4887 		flags |= MLX5_UMR_CHECK_NOT_FREE;
4888 	if (umr_inline)
4889 		flags |= MLX5_UMR_INLINE;
4890 
4891 	set_reg_umr_seg(*seg, mr, flags, atomic);
4892 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4893 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4894 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4895 
4896 	set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
4897 	*seg += sizeof(struct mlx5_mkey_seg);
4898 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4899 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4900 
4901 	if (umr_inline) {
4902 		memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
4903 				mr_list_size);
4904 		*size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
4905 	} else {
4906 		set_reg_data_seg(*seg, mr, pd);
4907 		*seg += sizeof(struct mlx5_wqe_data_seg);
4908 		*size += (sizeof(struct mlx5_wqe_data_seg) / 16);
4909 	}
4910 	return 0;
4911 }
4912 
4913 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
4914 			void **cur_edge)
4915 {
4916 	set_linv_umr_seg(*seg);
4917 	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4918 	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4919 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4920 	set_linv_mkey_seg(*seg);
4921 	*seg += sizeof(struct mlx5_mkey_seg);
4922 	*size += sizeof(struct mlx5_mkey_seg) / 16;
4923 	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
4924 }
4925 
4926 static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
4927 {
4928 	__be32 *p = NULL;
4929 	int i, j;
4930 
4931 	pr_debug("dump WQE index %u:\n", idx);
4932 	for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
4933 		if ((i & 0xf) == 0) {
4934 			p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
4935 			pr_debug("WQBB at %p:\n", (void *)p);
4936 			j = 0;
4937 			idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
4938 		}
4939 		pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
4940 			 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
4941 			 be32_to_cpu(p[j + 3]));
4942 	}
4943 }
4944 
4945 static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
4946 		       struct mlx5_wqe_ctrl_seg **ctrl,
4947 		       const struct ib_send_wr *wr, unsigned int *idx,
4948 		       int *size, void **cur_edge, int nreq,
4949 		       bool send_signaled, bool solicited)
4950 {
4951 	if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
4952 		return -ENOMEM;
4953 
4954 	*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
4955 	*seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
4956 	*ctrl = *seg;
4957 	*(uint32_t *)(*seg + 8) = 0;
4958 	(*ctrl)->imm = send_ieth(wr);
4959 	(*ctrl)->fm_ce_se = qp->sq_signal_bits |
4960 		(send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
4961 		(solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
4962 
4963 	*seg += sizeof(**ctrl);
4964 	*size = sizeof(**ctrl) / 16;
4965 	*cur_edge = qp->sq.cur_edge;
4966 
4967 	return 0;
4968 }
4969 
4970 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
4971 		     struct mlx5_wqe_ctrl_seg **ctrl,
4972 		     const struct ib_send_wr *wr, unsigned *idx,
4973 		     int *size, void **cur_edge, int nreq)
4974 {
4975 	return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
4976 			   wr->send_flags & IB_SEND_SIGNALED,
4977 			   wr->send_flags & IB_SEND_SOLICITED);
4978 }
4979 
4980 static void finish_wqe(struct mlx5_ib_qp *qp,
4981 		       struct mlx5_wqe_ctrl_seg *ctrl,
4982 		       void *seg, u8 size, void *cur_edge,
4983 		       unsigned int idx, u64 wr_id, int nreq, u8 fence,
4984 		       u32 mlx5_opcode)
4985 {
4986 	u8 opmod = 0;
4987 
4988 	ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
4989 					     mlx5_opcode | ((u32)opmod << 24));
4990 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
4991 	ctrl->fm_ce_se |= fence;
4992 	if (unlikely(qp->wq_sig))
4993 		ctrl->signature = wq_sig(ctrl);
4994 
4995 	qp->sq.wrid[idx] = wr_id;
4996 	qp->sq.w_list[idx].opcode = mlx5_opcode;
4997 	qp->sq.wqe_head[idx] = qp->sq.head + nreq;
4998 	qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
4999 	qp->sq.w_list[idx].next = qp->sq.cur_post;
5000 
5001 	/* We save the edge which was possibly updated during the WQE
5002 	 * construction, into SQ's cache.
5003 	 */
5004 	seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
5005 	qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
5006 			  get_sq_edge(&qp->sq, qp->sq.cur_post &
5007 				      (qp->sq.wqe_cnt - 1)) :
5008 			  cur_edge;
5009 }
5010 
5011 static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
5012 			      const struct ib_send_wr **bad_wr, bool drain)
5013 {
5014 	struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
5015 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5016 	struct mlx5_core_dev *mdev = dev->mdev;
5017 	struct ib_reg_wr reg_pi_wr;
5018 	struct mlx5_ib_qp *qp;
5019 	struct mlx5_ib_mr *mr;
5020 	struct mlx5_ib_mr *pi_mr;
5021 	struct mlx5_ib_mr pa_pi_mr;
5022 	struct ib_sig_attrs *sig_attrs;
5023 	struct mlx5_wqe_xrc_seg *xrc;
5024 	struct mlx5_bf *bf;
5025 	void *cur_edge;
5026 	int uninitialized_var(size);
5027 	unsigned long flags;
5028 	unsigned idx;
5029 	int err = 0;
5030 	int num_sge;
5031 	void *seg;
5032 	int nreq;
5033 	int i;
5034 	u8 next_fence = 0;
5035 	u8 fence;
5036 
5037 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
5038 		     !drain)) {
5039 		*bad_wr = wr;
5040 		return -EIO;
5041 	}
5042 
5043 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
5044 		return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
5045 
5046 	qp = to_mqp(ibqp);
5047 	bf = &qp->bf;
5048 
5049 	spin_lock_irqsave(&qp->sq.lock, flags);
5050 
5051 	for (nreq = 0; wr; nreq++, wr = wr->next) {
5052 		if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
5053 			mlx5_ib_warn(dev, "\n");
5054 			err = -EINVAL;
5055 			*bad_wr = wr;
5056 			goto out;
5057 		}
5058 
5059 		num_sge = wr->num_sge;
5060 		if (unlikely(num_sge > qp->sq.max_gs)) {
5061 			mlx5_ib_warn(dev, "\n");
5062 			err = -EINVAL;
5063 			*bad_wr = wr;
5064 			goto out;
5065 		}
5066 
5067 		err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
5068 				nreq);
5069 		if (err) {
5070 			mlx5_ib_warn(dev, "\n");
5071 			err = -ENOMEM;
5072 			*bad_wr = wr;
5073 			goto out;
5074 		}
5075 
5076 		if (wr->opcode == IB_WR_REG_MR ||
5077 		    wr->opcode == IB_WR_REG_MR_INTEGRITY) {
5078 			fence = dev->umr_fence;
5079 			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
5080 		} else  {
5081 			if (wr->send_flags & IB_SEND_FENCE) {
5082 				if (qp->next_fence)
5083 					fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
5084 				else
5085 					fence = MLX5_FENCE_MODE_FENCE;
5086 			} else {
5087 				fence = qp->next_fence;
5088 			}
5089 		}
5090 
5091 		switch (ibqp->qp_type) {
5092 		case IB_QPT_XRC_INI:
5093 			xrc = seg;
5094 			seg += sizeof(*xrc);
5095 			size += sizeof(*xrc) / 16;
5096 			/* fall through */
5097 		case IB_QPT_RC:
5098 			switch (wr->opcode) {
5099 			case IB_WR_RDMA_READ:
5100 			case IB_WR_RDMA_WRITE:
5101 			case IB_WR_RDMA_WRITE_WITH_IMM:
5102 				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
5103 					      rdma_wr(wr)->rkey);
5104 				seg += sizeof(struct mlx5_wqe_raddr_seg);
5105 				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
5106 				break;
5107 
5108 			case IB_WR_ATOMIC_CMP_AND_SWP:
5109 			case IB_WR_ATOMIC_FETCH_AND_ADD:
5110 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
5111 				mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
5112 				err = -ENOSYS;
5113 				*bad_wr = wr;
5114 				goto out;
5115 
5116 			case IB_WR_LOCAL_INV:
5117 				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
5118 				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
5119 				set_linv_wr(qp, &seg, &size, &cur_edge);
5120 				num_sge = 0;
5121 				break;
5122 
5123 			case IB_WR_REG_MR:
5124 				qp->sq.wr_data[idx] = IB_WR_REG_MR;
5125 				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
5126 				err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
5127 						 &cur_edge, true);
5128 				if (err) {
5129 					*bad_wr = wr;
5130 					goto out;
5131 				}
5132 				num_sge = 0;
5133 				break;
5134 
5135 			case IB_WR_REG_MR_INTEGRITY:
5136 				qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
5137 
5138 				mr = to_mmr(reg_wr(wr)->mr);
5139 				pi_mr = mr->pi_mr;
5140 
5141 				if (pi_mr) {
5142 					memset(&reg_pi_wr, 0,
5143 					       sizeof(struct ib_reg_wr));
5144 
5145 					reg_pi_wr.mr = &pi_mr->ibmr;
5146 					reg_pi_wr.access = reg_wr(wr)->access;
5147 					reg_pi_wr.key = pi_mr->ibmr.rkey;
5148 
5149 					ctrl->imm = cpu_to_be32(reg_pi_wr.key);
5150 					/* UMR for data + prot registration */
5151 					err = set_reg_wr(qp, &reg_pi_wr, &seg,
5152 							 &size, &cur_edge,
5153 							 false);
5154 					if (err) {
5155 						*bad_wr = wr;
5156 						goto out;
5157 					}
5158 					finish_wqe(qp, ctrl, seg, size,
5159 						   cur_edge, idx, wr->wr_id,
5160 						   nreq, fence,
5161 						   MLX5_OPCODE_UMR);
5162 
5163 					err = begin_wqe(qp, &seg, &ctrl, wr,
5164 							&idx, &size, &cur_edge,
5165 							nreq);
5166 					if (err) {
5167 						mlx5_ib_warn(dev, "\n");
5168 						err = -ENOMEM;
5169 						*bad_wr = wr;
5170 						goto out;
5171 					}
5172 				} else {
5173 					memset(&pa_pi_mr, 0,
5174 					       sizeof(struct mlx5_ib_mr));
5175 					/* No UMR, use local_dma_lkey */
5176 					pa_pi_mr.ibmr.lkey =
5177 						mr->ibmr.pd->local_dma_lkey;
5178 
5179 					pa_pi_mr.ndescs = mr->ndescs;
5180 					pa_pi_mr.data_length = mr->data_length;
5181 					pa_pi_mr.data_iova = mr->data_iova;
5182 					if (mr->meta_ndescs) {
5183 						pa_pi_mr.meta_ndescs =
5184 							mr->meta_ndescs;
5185 						pa_pi_mr.meta_length =
5186 							mr->meta_length;
5187 						pa_pi_mr.pi_iova = mr->pi_iova;
5188 					}
5189 
5190 					pa_pi_mr.ibmr.length = mr->ibmr.length;
5191 					mr->pi_mr = &pa_pi_mr;
5192 				}
5193 				ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
5194 				/* UMR for sig MR */
5195 				err = set_pi_umr_wr(wr, qp, &seg, &size,
5196 						    &cur_edge);
5197 				if (err) {
5198 					mlx5_ib_warn(dev, "\n");
5199 					*bad_wr = wr;
5200 					goto out;
5201 				}
5202 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5203 					   wr->wr_id, nreq, fence,
5204 					   MLX5_OPCODE_UMR);
5205 
5206 				/*
5207 				 * SET_PSV WQEs are not signaled and solicited
5208 				 * on error
5209 				 */
5210 				sig_attrs = mr->ibmr.sig_attrs;
5211 				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
5212 						  &size, &cur_edge, nreq, false,
5213 						  true);
5214 				if (err) {
5215 					mlx5_ib_warn(dev, "\n");
5216 					err = -ENOMEM;
5217 					*bad_wr = wr;
5218 					goto out;
5219 				}
5220 				err = set_psv_wr(&sig_attrs->mem,
5221 						 mr->sig->psv_memory.psv_idx,
5222 						 &seg, &size);
5223 				if (err) {
5224 					mlx5_ib_warn(dev, "\n");
5225 					*bad_wr = wr;
5226 					goto out;
5227 				}
5228 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5229 					   wr->wr_id, nreq, next_fence,
5230 					   MLX5_OPCODE_SET_PSV);
5231 
5232 				err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
5233 						  &size, &cur_edge, nreq, false,
5234 						  true);
5235 				if (err) {
5236 					mlx5_ib_warn(dev, "\n");
5237 					err = -ENOMEM;
5238 					*bad_wr = wr;
5239 					goto out;
5240 				}
5241 				err = set_psv_wr(&sig_attrs->wire,
5242 						 mr->sig->psv_wire.psv_idx,
5243 						 &seg, &size);
5244 				if (err) {
5245 					mlx5_ib_warn(dev, "\n");
5246 					*bad_wr = wr;
5247 					goto out;
5248 				}
5249 				finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
5250 					   wr->wr_id, nreq, next_fence,
5251 					   MLX5_OPCODE_SET_PSV);
5252 
5253 				qp->next_fence =
5254 					MLX5_FENCE_MODE_INITIATOR_SMALL;
5255 				num_sge = 0;
5256 				goto skip_psv;
5257 
5258 			default:
5259 				break;
5260 			}
5261 			break;
5262 
5263 		case IB_QPT_UC:
5264 			switch (wr->opcode) {
5265 			case IB_WR_RDMA_WRITE:
5266 			case IB_WR_RDMA_WRITE_WITH_IMM:
5267 				set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
5268 					      rdma_wr(wr)->rkey);
5269 				seg  += sizeof(struct mlx5_wqe_raddr_seg);
5270 				size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
5271 				break;
5272 
5273 			default:
5274 				break;
5275 			}
5276 			break;
5277 
5278 		case IB_QPT_SMI:
5279 			if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
5280 				mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
5281 				err = -EPERM;
5282 				*bad_wr = wr;
5283 				goto out;
5284 			}
5285 			/* fall through */
5286 		case MLX5_IB_QPT_HW_GSI:
5287 			set_datagram_seg(seg, wr);
5288 			seg += sizeof(struct mlx5_wqe_datagram_seg);
5289 			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5290 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5291 
5292 			break;
5293 		case IB_QPT_UD:
5294 			set_datagram_seg(seg, wr);
5295 			seg += sizeof(struct mlx5_wqe_datagram_seg);
5296 			size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
5297 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5298 
5299 			/* handle qp that supports ud offload */
5300 			if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
5301 				struct mlx5_wqe_eth_pad *pad;
5302 
5303 				pad = seg;
5304 				memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
5305 				seg += sizeof(struct mlx5_wqe_eth_pad);
5306 				size += sizeof(struct mlx5_wqe_eth_pad) / 16;
5307 				set_eth_seg(wr, qp, &seg, &size, &cur_edge);
5308 				handle_post_send_edge(&qp->sq, &seg, size,
5309 						      &cur_edge);
5310 			}
5311 			break;
5312 		case MLX5_IB_QPT_REG_UMR:
5313 			if (wr->opcode != MLX5_IB_WR_UMR) {
5314 				err = -EINVAL;
5315 				mlx5_ib_warn(dev, "bad opcode\n");
5316 				goto out;
5317 			}
5318 			qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
5319 			ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
5320 			err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
5321 			if (unlikely(err))
5322 				goto out;
5323 			seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
5324 			size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
5325 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5326 			set_reg_mkey_segment(seg, wr);
5327 			seg += sizeof(struct mlx5_mkey_seg);
5328 			size += sizeof(struct mlx5_mkey_seg) / 16;
5329 			handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
5330 			break;
5331 
5332 		default:
5333 			break;
5334 		}
5335 
5336 		if (wr->send_flags & IB_SEND_INLINE && num_sge) {
5337 			err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
5338 			if (unlikely(err)) {
5339 				mlx5_ib_warn(dev, "\n");
5340 				*bad_wr = wr;
5341 				goto out;
5342 			}
5343 		} else {
5344 			for (i = 0; i < num_sge; i++) {
5345 				handle_post_send_edge(&qp->sq, &seg, size,
5346 						      &cur_edge);
5347 				if (likely(wr->sg_list[i].length)) {
5348 					set_data_ptr_seg
5349 					((struct mlx5_wqe_data_seg *)seg,
5350 					 wr->sg_list + i);
5351 					size += sizeof(struct mlx5_wqe_data_seg) / 16;
5352 					seg += sizeof(struct mlx5_wqe_data_seg);
5353 				}
5354 			}
5355 		}
5356 
5357 		qp->next_fence = next_fence;
5358 		finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
5359 			   fence, mlx5_ib_opcode[wr->opcode]);
5360 skip_psv:
5361 		if (0)
5362 			dump_wqe(qp, idx, size);
5363 	}
5364 
5365 out:
5366 	if (likely(nreq)) {
5367 		qp->sq.head += nreq;
5368 
5369 		/* Make sure that descriptors are written before
5370 		 * updating doorbell record and ringing the doorbell
5371 		 */
5372 		wmb();
5373 
5374 		qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
5375 
5376 		/* Make sure doorbell record is visible to the HCA before
5377 		 * we hit doorbell */
5378 		wmb();
5379 
5380 		mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
5381 		/* Make sure doorbells don't leak out of SQ spinlock
5382 		 * and reach the HCA out of order.
5383 		 */
5384 		bf->offset ^= bf->buf_size;
5385 	}
5386 
5387 	spin_unlock_irqrestore(&qp->sq.lock, flags);
5388 
5389 	return err;
5390 }
5391 
5392 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
5393 		      const struct ib_send_wr **bad_wr)
5394 {
5395 	return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
5396 }
5397 
5398 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
5399 {
5400 	sig->signature = calc_sig(sig, size);
5401 }
5402 
5403 static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
5404 		      const struct ib_recv_wr **bad_wr, bool drain)
5405 {
5406 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
5407 	struct mlx5_wqe_data_seg *scat;
5408 	struct mlx5_rwqe_sig *sig;
5409 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5410 	struct mlx5_core_dev *mdev = dev->mdev;
5411 	unsigned long flags;
5412 	int err = 0;
5413 	int nreq;
5414 	int ind;
5415 	int i;
5416 
5417 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
5418 		     !drain)) {
5419 		*bad_wr = wr;
5420 		return -EIO;
5421 	}
5422 
5423 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
5424 		return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
5425 
5426 	spin_lock_irqsave(&qp->rq.lock, flags);
5427 
5428 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
5429 
5430 	for (nreq = 0; wr; nreq++, wr = wr->next) {
5431 		if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
5432 			err = -ENOMEM;
5433 			*bad_wr = wr;
5434 			goto out;
5435 		}
5436 
5437 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
5438 			err = -EINVAL;
5439 			*bad_wr = wr;
5440 			goto out;
5441 		}
5442 
5443 		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
5444 		if (qp->wq_sig)
5445 			scat++;
5446 
5447 		for (i = 0; i < wr->num_sge; i++)
5448 			set_data_ptr_seg(scat + i, wr->sg_list + i);
5449 
5450 		if (i < qp->rq.max_gs) {
5451 			scat[i].byte_count = 0;
5452 			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY);
5453 			scat[i].addr       = 0;
5454 		}
5455 
5456 		if (qp->wq_sig) {
5457 			sig = (struct mlx5_rwqe_sig *)scat;
5458 			set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
5459 		}
5460 
5461 		qp->rq.wrid[ind] = wr->wr_id;
5462 
5463 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
5464 	}
5465 
5466 out:
5467 	if (likely(nreq)) {
5468 		qp->rq.head += nreq;
5469 
5470 		/* Make sure that descriptors are written before
5471 		 * doorbell record.
5472 		 */
5473 		wmb();
5474 
5475 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
5476 	}
5477 
5478 	spin_unlock_irqrestore(&qp->rq.lock, flags);
5479 
5480 	return err;
5481 }
5482 
5483 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
5484 		      const struct ib_recv_wr **bad_wr)
5485 {
5486 	return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
5487 }
5488 
5489 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
5490 {
5491 	switch (mlx5_state) {
5492 	case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
5493 	case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
5494 	case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
5495 	case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
5496 	case MLX5_QP_STATE_SQ_DRAINING:
5497 	case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
5498 	case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
5499 	case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
5500 	default:		     return -1;
5501 	}
5502 }
5503 
5504 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
5505 {
5506 	switch (mlx5_mig_state) {
5507 	case MLX5_QP_PM_ARMED:		return IB_MIG_ARMED;
5508 	case MLX5_QP_PM_REARM:		return IB_MIG_REARM;
5509 	case MLX5_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
5510 	default: return -1;
5511 	}
5512 }
5513 
5514 static int to_ib_qp_access_flags(int mlx5_flags)
5515 {
5516 	int ib_flags = 0;
5517 
5518 	if (mlx5_flags & MLX5_QP_BIT_RRE)
5519 		ib_flags |= IB_ACCESS_REMOTE_READ;
5520 	if (mlx5_flags & MLX5_QP_BIT_RWE)
5521 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
5522 	if (mlx5_flags & MLX5_QP_BIT_RAE)
5523 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
5524 
5525 	return ib_flags;
5526 }
5527 
5528 static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
5529 			    struct rdma_ah_attr *ah_attr,
5530 			    struct mlx5_qp_path *path)
5531 {
5532 
5533 	memset(ah_attr, 0, sizeof(*ah_attr));
5534 
5535 	if (!path->port || path->port > ibdev->num_ports)
5536 		return;
5537 
5538 	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
5539 
5540 	rdma_ah_set_port_num(ah_attr, path->port);
5541 	rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
5542 
5543 	rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
5544 	rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
5545 	rdma_ah_set_static_rate(ah_attr,
5546 				path->static_rate ? path->static_rate - 5 : 0);
5547 	if (path->grh_mlid & (1 << 7)) {
5548 		u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
5549 
5550 		rdma_ah_set_grh(ah_attr, NULL,
5551 				tc_fl & 0xfffff,
5552 				path->mgid_index,
5553 				path->hop_limit,
5554 				(tc_fl >> 20) & 0xff);
5555 		rdma_ah_set_dgid_raw(ah_attr, path->rgid);
5556 	}
5557 }
5558 
5559 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
5560 					struct mlx5_ib_sq *sq,
5561 					u8 *sq_state)
5562 {
5563 	int err;
5564 
5565 	err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
5566 	if (err)
5567 		goto out;
5568 	sq->state = *sq_state;
5569 
5570 out:
5571 	return err;
5572 }
5573 
5574 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
5575 					struct mlx5_ib_rq *rq,
5576 					u8 *rq_state)
5577 {
5578 	void *out;
5579 	void *rqc;
5580 	int inlen;
5581 	int err;
5582 
5583 	inlen = MLX5_ST_SZ_BYTES(query_rq_out);
5584 	out = kvzalloc(inlen, GFP_KERNEL);
5585 	if (!out)
5586 		return -ENOMEM;
5587 
5588 	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
5589 	if (err)
5590 		goto out;
5591 
5592 	rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
5593 	*rq_state = MLX5_GET(rqc, rqc, state);
5594 	rq->state = *rq_state;
5595 
5596 out:
5597 	kvfree(out);
5598 	return err;
5599 }
5600 
5601 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
5602 				  struct mlx5_ib_qp *qp, u8 *qp_state)
5603 {
5604 	static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
5605 		[MLX5_RQC_STATE_RST] = {
5606 			[MLX5_SQC_STATE_RST]	= IB_QPS_RESET,
5607 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
5608 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE_BAD,
5609 			[MLX5_SQ_STATE_NA]	= IB_QPS_RESET,
5610 		},
5611 		[MLX5_RQC_STATE_RDY] = {
5612 			[MLX5_SQC_STATE_RST]	= MLX5_QP_STATE_BAD,
5613 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
5614 			[MLX5_SQC_STATE_ERR]	= IB_QPS_SQE,
5615 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE,
5616 		},
5617 		[MLX5_RQC_STATE_ERR] = {
5618 			[MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
5619 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
5620 			[MLX5_SQC_STATE_ERR]	= IB_QPS_ERR,
5621 			[MLX5_SQ_STATE_NA]	= IB_QPS_ERR,
5622 		},
5623 		[MLX5_RQ_STATE_NA] = {
5624 			[MLX5_SQC_STATE_RST]    = IB_QPS_RESET,
5625 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
5626 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE,
5627 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE_BAD,
5628 		},
5629 	};
5630 
5631 	*qp_state = sqrq_trans[rq_state][sq_state];
5632 
5633 	if (*qp_state == MLX5_QP_STATE_BAD) {
5634 		WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
5635 		     qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
5636 		     qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
5637 		return -EINVAL;
5638 	}
5639 
5640 	if (*qp_state == MLX5_QP_STATE)
5641 		*qp_state = qp->state;
5642 
5643 	return 0;
5644 }
5645 
5646 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
5647 				     struct mlx5_ib_qp *qp,
5648 				     u8 *raw_packet_qp_state)
5649 {
5650 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
5651 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
5652 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
5653 	int err;
5654 	u8 sq_state = MLX5_SQ_STATE_NA;
5655 	u8 rq_state = MLX5_RQ_STATE_NA;
5656 
5657 	if (qp->sq.wqe_cnt) {
5658 		err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
5659 		if (err)
5660 			return err;
5661 	}
5662 
5663 	if (qp->rq.wqe_cnt) {
5664 		err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
5665 		if (err)
5666 			return err;
5667 	}
5668 
5669 	return sqrq_state_to_qp_state(sq_state, rq_state, qp,
5670 				      raw_packet_qp_state);
5671 }
5672 
5673 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
5674 			 struct ib_qp_attr *qp_attr)
5675 {
5676 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
5677 	struct mlx5_qp_context *context;
5678 	int mlx5_state;
5679 	u32 *outb;
5680 	int err = 0;
5681 
5682 	outb = kzalloc(outlen, GFP_KERNEL);
5683 	if (!outb)
5684 		return -ENOMEM;
5685 
5686 	err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
5687 				 outlen);
5688 	if (err)
5689 		goto out;
5690 
5691 	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5692 	context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
5693 
5694 	mlx5_state = be32_to_cpu(context->flags) >> 28;
5695 
5696 	qp->state		     = to_ib_qp_state(mlx5_state);
5697 	qp_attr->path_mtu	     = context->mtu_msgmax >> 5;
5698 	qp_attr->path_mig_state	     =
5699 		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
5700 	qp_attr->qkey		     = be32_to_cpu(context->qkey);
5701 	qp_attr->rq_psn		     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
5702 	qp_attr->sq_psn		     = be32_to_cpu(context->next_send_psn) & 0xffffff;
5703 	qp_attr->dest_qp_num	     = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
5704 	qp_attr->qp_access_flags     =
5705 		to_ib_qp_access_flags(be32_to_cpu(context->params2));
5706 
5707 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
5708 		to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
5709 		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
5710 		qp_attr->alt_pkey_index =
5711 			be16_to_cpu(context->alt_path.pkey_index);
5712 		qp_attr->alt_port_num	=
5713 			rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
5714 	}
5715 
5716 	qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
5717 	qp_attr->port_num = context->pri_path.port;
5718 
5719 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5720 	qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
5721 
5722 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
5723 
5724 	qp_attr->max_dest_rd_atomic =
5725 		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
5726 	qp_attr->min_rnr_timer	    =
5727 		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
5728 	qp_attr->timeout	    = context->pri_path.ackto_lt >> 3;
5729 	qp_attr->retry_cnt	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
5730 	qp_attr->rnr_retry	    = (be32_to_cpu(context->params1) >> 13) & 0x7;
5731 	qp_attr->alt_timeout	    = context->alt_path.ackto_lt >> 3;
5732 
5733 out:
5734 	kfree(outb);
5735 	return err;
5736 }
5737 
5738 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
5739 				struct ib_qp_attr *qp_attr, int qp_attr_mask,
5740 				struct ib_qp_init_attr *qp_init_attr)
5741 {
5742 	struct mlx5_core_dct	*dct = &mqp->dct.mdct;
5743 	u32 *out;
5744 	u32 access_flags = 0;
5745 	int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
5746 	void *dctc;
5747 	int err;
5748 	int supported_mask = IB_QP_STATE |
5749 			     IB_QP_ACCESS_FLAGS |
5750 			     IB_QP_PORT |
5751 			     IB_QP_MIN_RNR_TIMER |
5752 			     IB_QP_AV |
5753 			     IB_QP_PATH_MTU |
5754 			     IB_QP_PKEY_INDEX;
5755 
5756 	if (qp_attr_mask & ~supported_mask)
5757 		return -EINVAL;
5758 	if (mqp->state != IB_QPS_RTR)
5759 		return -EINVAL;
5760 
5761 	out = kzalloc(outlen, GFP_KERNEL);
5762 	if (!out)
5763 		return -ENOMEM;
5764 
5765 	err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
5766 	if (err)
5767 		goto out;
5768 
5769 	dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
5770 
5771 	if (qp_attr_mask & IB_QP_STATE)
5772 		qp_attr->qp_state = IB_QPS_RTR;
5773 
5774 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
5775 		if (MLX5_GET(dctc, dctc, rre))
5776 			access_flags |= IB_ACCESS_REMOTE_READ;
5777 		if (MLX5_GET(dctc, dctc, rwe))
5778 			access_flags |= IB_ACCESS_REMOTE_WRITE;
5779 		if (MLX5_GET(dctc, dctc, rae))
5780 			access_flags |= IB_ACCESS_REMOTE_ATOMIC;
5781 		qp_attr->qp_access_flags = access_flags;
5782 	}
5783 
5784 	if (qp_attr_mask & IB_QP_PORT)
5785 		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
5786 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
5787 		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
5788 	if (qp_attr_mask & IB_QP_AV) {
5789 		qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
5790 		qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
5791 		qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
5792 		qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
5793 	}
5794 	if (qp_attr_mask & IB_QP_PATH_MTU)
5795 		qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
5796 	if (qp_attr_mask & IB_QP_PKEY_INDEX)
5797 		qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
5798 out:
5799 	kfree(out);
5800 	return err;
5801 }
5802 
5803 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5804 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
5805 {
5806 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5807 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
5808 	int err = 0;
5809 	u8 raw_packet_qp_state;
5810 
5811 	if (ibqp->rwq_ind_tbl)
5812 		return -ENOSYS;
5813 
5814 	if (unlikely(ibqp->qp_type == IB_QPT_GSI))
5815 		return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
5816 					    qp_init_attr);
5817 
5818 	/* Not all of output fields are applicable, make sure to zero them */
5819 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5820 	memset(qp_attr, 0, sizeof(*qp_attr));
5821 
5822 	if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
5823 		return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
5824 					    qp_attr_mask, qp_init_attr);
5825 
5826 	mutex_lock(&qp->mutex);
5827 
5828 	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
5829 	    qp->flags & MLX5_IB_QP_UNDERLAY) {
5830 		err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
5831 		if (err)
5832 			goto out;
5833 		qp->state = raw_packet_qp_state;
5834 		qp_attr->port_num = 1;
5835 	} else {
5836 		err = query_qp_attr(dev, qp, qp_attr);
5837 		if (err)
5838 			goto out;
5839 	}
5840 
5841 	qp_attr->qp_state	     = qp->state;
5842 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
5843 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
5844 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
5845 
5846 	if (!ibqp->uobject) {
5847 		qp_attr->cap.max_send_wr  = qp->sq.max_post;
5848 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
5849 		qp_init_attr->qp_context = ibqp->qp_context;
5850 	} else {
5851 		qp_attr->cap.max_send_wr  = 0;
5852 		qp_attr->cap.max_send_sge = 0;
5853 	}
5854 
5855 	qp_init_attr->qp_type = ibqp->qp_type;
5856 	qp_init_attr->recv_cq = ibqp->recv_cq;
5857 	qp_init_attr->send_cq = ibqp->send_cq;
5858 	qp_init_attr->srq = ibqp->srq;
5859 	qp_attr->cap.max_inline_data = qp->max_inline_data;
5860 
5861 	qp_init_attr->cap	     = qp_attr->cap;
5862 
5863 	qp_init_attr->create_flags = 0;
5864 	if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
5865 		qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
5866 
5867 	if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
5868 		qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
5869 	if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
5870 		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
5871 	if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
5872 		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
5873 	if (qp->flags & MLX5_IB_QP_SQPN_QP1)
5874 		qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
5875 
5876 	qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
5877 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
5878 
5879 out:
5880 	mutex_unlock(&qp->mutex);
5881 	return err;
5882 }
5883 
5884 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
5885 				   struct ib_udata *udata)
5886 {
5887 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
5888 	struct mlx5_ib_xrcd *xrcd;
5889 	int err;
5890 
5891 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
5892 		return ERR_PTR(-ENOSYS);
5893 
5894 	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
5895 	if (!xrcd)
5896 		return ERR_PTR(-ENOMEM);
5897 
5898 	err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
5899 	if (err) {
5900 		kfree(xrcd);
5901 		return ERR_PTR(-ENOMEM);
5902 	}
5903 
5904 	return &xrcd->ibxrcd;
5905 }
5906 
5907 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
5908 {
5909 	struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
5910 	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
5911 	int err;
5912 
5913 	err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
5914 	if (err)
5915 		mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
5916 
5917 	kfree(xrcd);
5918 	return 0;
5919 }
5920 
5921 static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
5922 {
5923 	struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
5924 	struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
5925 	struct ib_event event;
5926 
5927 	if (rwq->ibwq.event_handler) {
5928 		event.device     = rwq->ibwq.device;
5929 		event.element.wq = &rwq->ibwq;
5930 		switch (type) {
5931 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
5932 			event.event = IB_EVENT_WQ_FATAL;
5933 			break;
5934 		default:
5935 			mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
5936 			return;
5937 		}
5938 
5939 		rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
5940 	}
5941 }
5942 
5943 static int set_delay_drop(struct mlx5_ib_dev *dev)
5944 {
5945 	int err = 0;
5946 
5947 	mutex_lock(&dev->delay_drop.lock);
5948 	if (dev->delay_drop.activate)
5949 		goto out;
5950 
5951 	err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
5952 	if (err)
5953 		goto out;
5954 
5955 	dev->delay_drop.activate = true;
5956 out:
5957 	mutex_unlock(&dev->delay_drop.lock);
5958 
5959 	if (!err)
5960 		atomic_inc(&dev->delay_drop.rqs_cnt);
5961 	return err;
5962 }
5963 
5964 static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
5965 		      struct ib_wq_init_attr *init_attr)
5966 {
5967 	struct mlx5_ib_dev *dev;
5968 	int has_net_offloads;
5969 	__be64 *rq_pas0;
5970 	void *in;
5971 	void *rqc;
5972 	void *wq;
5973 	int inlen;
5974 	int err;
5975 
5976 	dev = to_mdev(pd->device);
5977 
5978 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
5979 	in = kvzalloc(inlen, GFP_KERNEL);
5980 	if (!in)
5981 		return -ENOMEM;
5982 
5983 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
5984 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
5985 	MLX5_SET(rqc,  rqc, mem_rq_type,
5986 		 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
5987 	MLX5_SET(rqc, rqc, user_index, rwq->user_index);
5988 	MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
5989 	MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
5990 	MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
5991 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
5992 	MLX5_SET(wq, wq, wq_type,
5993 		 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
5994 		 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
5995 	if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
5996 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
5997 			mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
5998 			err = -EOPNOTSUPP;
5999 			goto out;
6000 		} else {
6001 			MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
6002 		}
6003 	}
6004 	MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
6005 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
6006 		/*
6007 		 * In Firmware number of strides in each WQE is:
6008 		 *   "512 * 2^single_wqe_log_num_of_strides"
6009 		 * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
6010 		 * accepted as 0 to 9
6011 		 */
6012 		static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
6013 					     2,  3,  4,  5,  6,  7,  8, 9 };
6014 		MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
6015 		MLX5_SET(wq, wq, log_wqe_stride_size,
6016 			 rwq->single_stride_log_num_of_bytes -
6017 			 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
6018 		MLX5_SET(wq, wq, log_wqe_num_of_strides,
6019 			 fw_map[rwq->log_num_strides -
6020 				MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
6021 	}
6022 	MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
6023 	MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
6024 	MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
6025 	MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
6026 	MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
6027 	MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
6028 	has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
6029 	if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
6030 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
6031 			mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
6032 			err = -EOPNOTSUPP;
6033 			goto out;
6034 		}
6035 	} else {
6036 		MLX5_SET(rqc, rqc, vsd, 1);
6037 	}
6038 	if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
6039 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
6040 			mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
6041 			err = -EOPNOTSUPP;
6042 			goto out;
6043 		}
6044 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
6045 	}
6046 	if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
6047 		if (!(dev->ib_dev.attrs.raw_packet_caps &
6048 		      IB_RAW_PACKET_CAP_DELAY_DROP)) {
6049 			mlx5_ib_dbg(dev, "Delay drop is not supported\n");
6050 			err = -EOPNOTSUPP;
6051 			goto out;
6052 		}
6053 		MLX5_SET(rqc, rqc, delay_drop_en, 1);
6054 	}
6055 	rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
6056 	mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
6057 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
6058 	if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
6059 		err = set_delay_drop(dev);
6060 		if (err) {
6061 			mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
6062 				     err);
6063 			mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6064 		} else {
6065 			rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
6066 		}
6067 	}
6068 out:
6069 	kvfree(in);
6070 	return err;
6071 }
6072 
6073 static int set_user_rq_size(struct mlx5_ib_dev *dev,
6074 			    struct ib_wq_init_attr *wq_init_attr,
6075 			    struct mlx5_ib_create_wq *ucmd,
6076 			    struct mlx5_ib_rwq *rwq)
6077 {
6078 	/* Sanity check RQ size before proceeding */
6079 	if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
6080 		return -EINVAL;
6081 
6082 	if (!ucmd->rq_wqe_count)
6083 		return -EINVAL;
6084 
6085 	rwq->wqe_count = ucmd->rq_wqe_count;
6086 	rwq->wqe_shift = ucmd->rq_wqe_shift;
6087 	if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
6088 		return -EINVAL;
6089 
6090 	rwq->log_rq_stride = rwq->wqe_shift;
6091 	rwq->log_rq_size = ilog2(rwq->wqe_count);
6092 	return 0;
6093 }
6094 
6095 static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
6096 {
6097 	if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
6098 	    (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
6099 		return false;
6100 
6101 	if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
6102 	    (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
6103 		return false;
6104 
6105 	return true;
6106 }
6107 
6108 static int prepare_user_rq(struct ib_pd *pd,
6109 			   struct ib_wq_init_attr *init_attr,
6110 			   struct ib_udata *udata,
6111 			   struct mlx5_ib_rwq *rwq)
6112 {
6113 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
6114 	struct mlx5_ib_create_wq ucmd = {};
6115 	int err;
6116 	size_t required_cmd_sz;
6117 
6118 	required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
6119 		+ sizeof(ucmd.single_stride_log_num_of_bytes);
6120 	if (udata->inlen < required_cmd_sz) {
6121 		mlx5_ib_dbg(dev, "invalid inlen\n");
6122 		return -EINVAL;
6123 	}
6124 
6125 	if (udata->inlen > sizeof(ucmd) &&
6126 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
6127 				 udata->inlen - sizeof(ucmd))) {
6128 		mlx5_ib_dbg(dev, "inlen is not supported\n");
6129 		return -EOPNOTSUPP;
6130 	}
6131 
6132 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
6133 		mlx5_ib_dbg(dev, "copy failed\n");
6134 		return -EFAULT;
6135 	}
6136 
6137 	if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
6138 		mlx5_ib_dbg(dev, "invalid comp mask\n");
6139 		return -EOPNOTSUPP;
6140 	} else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
6141 		if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
6142 			mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
6143 			return -EOPNOTSUPP;
6144 		}
6145 		if ((ucmd.single_stride_log_num_of_bytes <
6146 		    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
6147 		    (ucmd.single_stride_log_num_of_bytes >
6148 		     MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
6149 			mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
6150 				    ucmd.single_stride_log_num_of_bytes,
6151 				    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
6152 				    MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
6153 			return -EINVAL;
6154 		}
6155 		if (!log_of_strides_valid(dev,
6156 					  ucmd.single_wqe_log_num_of_strides)) {
6157 			mlx5_ib_dbg(
6158 				dev,
6159 				"Invalid log num strides (%u. Range is %u - %u)\n",
6160 				ucmd.single_wqe_log_num_of_strides,
6161 				MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
6162 					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
6163 					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
6164 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
6165 			return -EINVAL;
6166 		}
6167 		rwq->single_stride_log_num_of_bytes =
6168 			ucmd.single_stride_log_num_of_bytes;
6169 		rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
6170 		rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
6171 		rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
6172 	}
6173 
6174 	err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
6175 	if (err) {
6176 		mlx5_ib_dbg(dev, "err %d\n", err);
6177 		return err;
6178 	}
6179 
6180 	err = create_user_rq(dev, pd, udata, rwq, &ucmd);
6181 	if (err) {
6182 		mlx5_ib_dbg(dev, "err %d\n", err);
6183 		return err;
6184 	}
6185 
6186 	rwq->user_index = ucmd.user_index;
6187 	return 0;
6188 }
6189 
6190 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
6191 				struct ib_wq_init_attr *init_attr,
6192 				struct ib_udata *udata)
6193 {
6194 	struct mlx5_ib_dev *dev;
6195 	struct mlx5_ib_rwq *rwq;
6196 	struct mlx5_ib_create_wq_resp resp = {};
6197 	size_t min_resp_len;
6198 	int err;
6199 
6200 	if (!udata)
6201 		return ERR_PTR(-ENOSYS);
6202 
6203 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
6204 	if (udata->outlen && udata->outlen < min_resp_len)
6205 		return ERR_PTR(-EINVAL);
6206 
6207 	dev = to_mdev(pd->device);
6208 	switch (init_attr->wq_type) {
6209 	case IB_WQT_RQ:
6210 		rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
6211 		if (!rwq)
6212 			return ERR_PTR(-ENOMEM);
6213 		err = prepare_user_rq(pd, init_attr, udata, rwq);
6214 		if (err)
6215 			goto err;
6216 		err = create_rq(rwq, pd, init_attr);
6217 		if (err)
6218 			goto err_user_rq;
6219 		break;
6220 	default:
6221 		mlx5_ib_dbg(dev, "unsupported wq type %d\n",
6222 			    init_attr->wq_type);
6223 		return ERR_PTR(-EINVAL);
6224 	}
6225 
6226 	rwq->ibwq.wq_num = rwq->core_qp.qpn;
6227 	rwq->ibwq.state = IB_WQS_RESET;
6228 	if (udata->outlen) {
6229 		resp.response_length = offsetof(typeof(resp), response_length) +
6230 				sizeof(resp.response_length);
6231 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
6232 		if (err)
6233 			goto err_copy;
6234 	}
6235 
6236 	rwq->core_qp.event = mlx5_ib_wq_event;
6237 	rwq->ibwq.event_handler = init_attr->event_handler;
6238 	return &rwq->ibwq;
6239 
6240 err_copy:
6241 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6242 err_user_rq:
6243 	destroy_user_rq(dev, pd, rwq, udata);
6244 err:
6245 	kfree(rwq);
6246 	return ERR_PTR(err);
6247 }
6248 
6249 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
6250 {
6251 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
6252 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
6253 
6254 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
6255 	destroy_user_rq(dev, wq->pd, rwq, udata);
6256 	kfree(rwq);
6257 }
6258 
6259 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
6260 						      struct ib_rwq_ind_table_init_attr *init_attr,
6261 						      struct ib_udata *udata)
6262 {
6263 	struct mlx5_ib_dev *dev = to_mdev(device);
6264 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
6265 	int sz = 1 << init_attr->log_ind_tbl_size;
6266 	struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
6267 	size_t min_resp_len;
6268 	int inlen;
6269 	int err;
6270 	int i;
6271 	u32 *in;
6272 	void *rqtc;
6273 
6274 	if (udata->inlen > 0 &&
6275 	    !ib_is_udata_cleared(udata, 0,
6276 				 udata->inlen))
6277 		return ERR_PTR(-EOPNOTSUPP);
6278 
6279 	if (init_attr->log_ind_tbl_size >
6280 	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
6281 		mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
6282 			    init_attr->log_ind_tbl_size,
6283 			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
6284 		return ERR_PTR(-EINVAL);
6285 	}
6286 
6287 	min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
6288 	if (udata->outlen && udata->outlen < min_resp_len)
6289 		return ERR_PTR(-EINVAL);
6290 
6291 	rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
6292 	if (!rwq_ind_tbl)
6293 		return ERR_PTR(-ENOMEM);
6294 
6295 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
6296 	in = kvzalloc(inlen, GFP_KERNEL);
6297 	if (!in) {
6298 		err = -ENOMEM;
6299 		goto err;
6300 	}
6301 
6302 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
6303 
6304 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
6305 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
6306 
6307 	for (i = 0; i < sz; i++)
6308 		MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
6309 
6310 	rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
6311 	MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
6312 
6313 	err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
6314 	kvfree(in);
6315 
6316 	if (err)
6317 		goto err;
6318 
6319 	rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
6320 	if (udata->outlen) {
6321 		resp.response_length = offsetof(typeof(resp), response_length) +
6322 					sizeof(resp.response_length);
6323 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
6324 		if (err)
6325 			goto err_copy;
6326 	}
6327 
6328 	return &rwq_ind_tbl->ib_rwq_ind_tbl;
6329 
6330 err_copy:
6331 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6332 err:
6333 	kfree(rwq_ind_tbl);
6334 	return ERR_PTR(err);
6335 }
6336 
6337 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
6338 {
6339 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
6340 	struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
6341 
6342 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
6343 
6344 	kfree(rwq_ind_tbl);
6345 	return 0;
6346 }
6347 
6348 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
6349 		      u32 wq_attr_mask, struct ib_udata *udata)
6350 {
6351 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
6352 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
6353 	struct mlx5_ib_modify_wq ucmd = {};
6354 	size_t required_cmd_sz;
6355 	int curr_wq_state;
6356 	int wq_state;
6357 	int inlen;
6358 	int err;
6359 	void *rqc;
6360 	void *in;
6361 
6362 	required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
6363 	if (udata->inlen < required_cmd_sz)
6364 		return -EINVAL;
6365 
6366 	if (udata->inlen > sizeof(ucmd) &&
6367 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
6368 				 udata->inlen - sizeof(ucmd)))
6369 		return -EOPNOTSUPP;
6370 
6371 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
6372 		return -EFAULT;
6373 
6374 	if (ucmd.comp_mask || ucmd.reserved)
6375 		return -EOPNOTSUPP;
6376 
6377 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
6378 	in = kvzalloc(inlen, GFP_KERNEL);
6379 	if (!in)
6380 		return -ENOMEM;
6381 
6382 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
6383 
6384 	curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
6385 		wq_attr->curr_wq_state : wq->state;
6386 	wq_state = (wq_attr_mask & IB_WQ_STATE) ?
6387 		wq_attr->wq_state : curr_wq_state;
6388 	if (curr_wq_state == IB_WQS_ERR)
6389 		curr_wq_state = MLX5_RQC_STATE_ERR;
6390 	if (wq_state == IB_WQS_ERR)
6391 		wq_state = MLX5_RQC_STATE_ERR;
6392 	MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
6393 	MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
6394 	MLX5_SET(rqc, rqc, state, wq_state);
6395 
6396 	if (wq_attr_mask & IB_WQ_FLAGS) {
6397 		if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
6398 			if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
6399 			      MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
6400 				mlx5_ib_dbg(dev, "VLAN offloads are not "
6401 					    "supported\n");
6402 				err = -EOPNOTSUPP;
6403 				goto out;
6404 			}
6405 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
6406 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
6407 			MLX5_SET(rqc, rqc, vsd,
6408 				 (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
6409 		}
6410 
6411 		if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
6412 			mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
6413 			err = -EOPNOTSUPP;
6414 			goto out;
6415 		}
6416 	}
6417 
6418 	if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
6419 		u16 set_id;
6420 
6421 		set_id = mlx5_ib_get_counters_id(dev, 0);
6422 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
6423 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
6424 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
6425 			MLX5_SET(rqc, rqc, counter_set_id, set_id);
6426 		} else
6427 			dev_info_once(
6428 				&dev->ib_dev.dev,
6429 				"Receive WQ counters are not supported on current FW\n");
6430 	}
6431 
6432 	err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
6433 	if (!err)
6434 		rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
6435 
6436 out:
6437 	kvfree(in);
6438 	return err;
6439 }
6440 
6441 struct mlx5_ib_drain_cqe {
6442 	struct ib_cqe cqe;
6443 	struct completion done;
6444 };
6445 
6446 static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
6447 {
6448 	struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
6449 						     struct mlx5_ib_drain_cqe,
6450 						     cqe);
6451 
6452 	complete(&cqe->done);
6453 }
6454 
6455 /* This function returns only once the drained WR was completed */
6456 static void handle_drain_completion(struct ib_cq *cq,
6457 				    struct mlx5_ib_drain_cqe *sdrain,
6458 				    struct mlx5_ib_dev *dev)
6459 {
6460 	struct mlx5_core_dev *mdev = dev->mdev;
6461 
6462 	if (cq->poll_ctx == IB_POLL_DIRECT) {
6463 		while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
6464 			ib_process_cq_direct(cq, -1);
6465 		return;
6466 	}
6467 
6468 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6469 		struct mlx5_ib_cq *mcq = to_mcq(cq);
6470 		bool triggered = false;
6471 		unsigned long flags;
6472 
6473 		spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
6474 		/* Make sure that the CQ handler won't run if wasn't run yet */
6475 		if (!mcq->mcq.reset_notify_added)
6476 			mcq->mcq.reset_notify_added = 1;
6477 		else
6478 			triggered = true;
6479 		spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
6480 
6481 		if (triggered) {
6482 			/* Wait for any scheduled/running task to be ended */
6483 			switch (cq->poll_ctx) {
6484 			case IB_POLL_SOFTIRQ:
6485 				irq_poll_disable(&cq->iop);
6486 				irq_poll_enable(&cq->iop);
6487 				break;
6488 			case IB_POLL_WORKQUEUE:
6489 				cancel_work_sync(&cq->work);
6490 				break;
6491 			default:
6492 				WARN_ON_ONCE(1);
6493 			}
6494 		}
6495 
6496 		/* Run the CQ handler - this makes sure that the drain WR will
6497 		 * be processed if wasn't processed yet.
6498 		 */
6499 		mcq->mcq.comp(&mcq->mcq, NULL);
6500 	}
6501 
6502 	wait_for_completion(&sdrain->done);
6503 }
6504 
6505 void mlx5_ib_drain_sq(struct ib_qp *qp)
6506 {
6507 	struct ib_cq *cq = qp->send_cq;
6508 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
6509 	struct mlx5_ib_drain_cqe sdrain;
6510 	const struct ib_send_wr *bad_swr;
6511 	struct ib_rdma_wr swr = {
6512 		.wr = {
6513 			.next = NULL,
6514 			{ .wr_cqe	= &sdrain.cqe, },
6515 			.opcode	= IB_WR_RDMA_WRITE,
6516 		},
6517 	};
6518 	int ret;
6519 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
6520 	struct mlx5_core_dev *mdev = dev->mdev;
6521 
6522 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
6523 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6524 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
6525 		return;
6526 	}
6527 
6528 	sdrain.cqe.done = mlx5_ib_drain_qp_done;
6529 	init_completion(&sdrain.done);
6530 
6531 	ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
6532 	if (ret) {
6533 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
6534 		return;
6535 	}
6536 
6537 	handle_drain_completion(cq, &sdrain, dev);
6538 }
6539 
6540 void mlx5_ib_drain_rq(struct ib_qp *qp)
6541 {
6542 	struct ib_cq *cq = qp->recv_cq;
6543 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
6544 	struct mlx5_ib_drain_cqe rdrain;
6545 	struct ib_recv_wr rwr = {};
6546 	const struct ib_recv_wr *bad_rwr;
6547 	int ret;
6548 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
6549 	struct mlx5_core_dev *mdev = dev->mdev;
6550 
6551 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
6552 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6553 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
6554 		return;
6555 	}
6556 
6557 	rwr.wr_cqe = &rdrain.cqe;
6558 	rdrain.cqe.done = mlx5_ib_drain_qp_done;
6559 	init_completion(&rdrain.done);
6560 
6561 	ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
6562 	if (ret) {
6563 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
6564 		return;
6565 	}
6566 
6567 	handle_drain_completion(cq, &rdrain, dev);
6568 }
6569 
6570 /**
6571  * Bind a qp to a counter. If @counter is NULL then bind the qp to
6572  * the default counter
6573  */
6574 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
6575 {
6576 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
6577 	struct mlx5_ib_qp *mqp = to_mqp(qp);
6578 	int err = 0;
6579 
6580 	mutex_lock(&mqp->mutex);
6581 	if (mqp->state == IB_QPS_RESET) {
6582 		qp->counter = counter;
6583 		goto out;
6584 	}
6585 
6586 	if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
6587 		err = -EOPNOTSUPP;
6588 		goto out;
6589 	}
6590 
6591 	if (mqp->state == IB_QPS_RTS) {
6592 		err = __mlx5_ib_qp_set_counter(qp, counter);
6593 		if (!err)
6594 			qp->counter = counter;
6595 
6596 		goto out;
6597 	}
6598 
6599 	mqp->counter_pending = 1;
6600 	qp->counter = counter;
6601 
6602 out:
6603 	mutex_unlock(&mqp->mutex);
6604 	return err;
6605 }
6606