xref: /openbmc/linux/drivers/infiniband/hw/mlx5/cq.c (revision 8dda2eac)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
37 #include "mlx5_ib.h"
38 #include "srq.h"
39 #include "qp.h"
40 
41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
42 {
43 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44 
45 	ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47 
48 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
49 {
50 	struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 	struct ib_cq *ibcq = &cq->ibcq;
53 	struct ib_event event;
54 
55 	if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
56 		mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
57 			     type, mcq->cqn);
58 		return;
59 	}
60 
61 	if (ibcq->event_handler) {
62 		event.device     = &dev->ib_dev;
63 		event.event      = IB_EVENT_CQ_ERR;
64 		event.element.cq = ibcq;
65 		ibcq->event_handler(&event, ibcq->cq_context);
66 	}
67 }
68 
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
70 {
71 	return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
72 }
73 
74 static u8 sw_ownership_bit(int n, int nent)
75 {
76 	return (n & nent) ? 1 : 0;
77 }
78 
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
80 {
81 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 	struct mlx5_cqe64 *cqe64;
83 
84 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
85 
86 	if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
87 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 		return cqe;
89 	} else {
90 		return NULL;
91 	}
92 }
93 
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
95 {
96 	return get_sw_cqe(cq, cq->mcq.cons_index);
97 }
98 
99 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
100 {
101 	switch (wq->wr_data[idx]) {
102 	case MLX5_IB_WR_UMR:
103 		return 0;
104 
105 	case IB_WR_LOCAL_INV:
106 		return IB_WC_LOCAL_INV;
107 
108 	case IB_WR_REG_MR:
109 		return IB_WC_REG_MR;
110 
111 	default:
112 		pr_warn("unknown completion status\n");
113 		return 0;
114 	}
115 }
116 
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
118 			    struct mlx5_ib_wq *wq, int idx)
119 {
120 	wc->wc_flags = 0;
121 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
122 	case MLX5_OPCODE_RDMA_WRITE_IMM:
123 		wc->wc_flags |= IB_WC_WITH_IMM;
124 		fallthrough;
125 	case MLX5_OPCODE_RDMA_WRITE:
126 		wc->opcode    = IB_WC_RDMA_WRITE;
127 		break;
128 	case MLX5_OPCODE_SEND_IMM:
129 		wc->wc_flags |= IB_WC_WITH_IMM;
130 		fallthrough;
131 	case MLX5_OPCODE_SEND:
132 	case MLX5_OPCODE_SEND_INVAL:
133 		wc->opcode    = IB_WC_SEND;
134 		break;
135 	case MLX5_OPCODE_RDMA_READ:
136 		wc->opcode    = IB_WC_RDMA_READ;
137 		wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
138 		break;
139 	case MLX5_OPCODE_ATOMIC_CS:
140 		wc->opcode    = IB_WC_COMP_SWAP;
141 		wc->byte_len  = 8;
142 		break;
143 	case MLX5_OPCODE_ATOMIC_FA:
144 		wc->opcode    = IB_WC_FETCH_ADD;
145 		wc->byte_len  = 8;
146 		break;
147 	case MLX5_OPCODE_ATOMIC_MASKED_CS:
148 		wc->opcode    = IB_WC_MASKED_COMP_SWAP;
149 		wc->byte_len  = 8;
150 		break;
151 	case MLX5_OPCODE_ATOMIC_MASKED_FA:
152 		wc->opcode    = IB_WC_MASKED_FETCH_ADD;
153 		wc->byte_len  = 8;
154 		break;
155 	case MLX5_OPCODE_UMR:
156 		wc->opcode = get_umr_comp(wq, idx);
157 		break;
158 	}
159 }
160 
161 enum {
162 	MLX5_GRH_IN_BUFFER = 1,
163 	MLX5_GRH_IN_CQE	   = 2,
164 };
165 
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
167 			     struct mlx5_ib_qp *qp)
168 {
169 	enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
170 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
171 	struct mlx5_ib_srq *srq = NULL;
172 	struct mlx5_ib_wq *wq;
173 	u16 wqe_ctr;
174 	u8  roce_packet_type;
175 	bool vlan_present;
176 	u8 g;
177 
178 	if (qp->ibqp.srq || qp->ibqp.xrcd) {
179 		struct mlx5_core_srq *msrq = NULL;
180 
181 		if (qp->ibqp.xrcd) {
182 			msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
183 			if (msrq)
184 				srq = to_mibsrq(msrq);
185 		} else {
186 			srq = to_msrq(qp->ibqp.srq);
187 		}
188 		if (srq) {
189 			wqe_ctr = be16_to_cpu(cqe->wqe_counter);
190 			wc->wr_id = srq->wrid[wqe_ctr];
191 			mlx5_ib_free_srq_wqe(srq, wqe_ctr);
192 			if (msrq)
193 				mlx5_core_res_put(&msrq->common);
194 		}
195 	} else {
196 		wq	  = &qp->rq;
197 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
198 		++wq->tail;
199 	}
200 	wc->byte_len = be32_to_cpu(cqe->byte_cnt);
201 
202 	switch (get_cqe_opcode(cqe)) {
203 	case MLX5_CQE_RESP_WR_IMM:
204 		wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
205 		wc->wc_flags	= IB_WC_WITH_IMM;
206 		wc->ex.imm_data = cqe->immediate;
207 		break;
208 	case MLX5_CQE_RESP_SEND:
209 		wc->opcode   = IB_WC_RECV;
210 		wc->wc_flags = IB_WC_IP_CSUM_OK;
211 		if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
212 			       (cqe->hds_ip_ext & CQE_L4_OK))))
213 			wc->wc_flags = 0;
214 		break;
215 	case MLX5_CQE_RESP_SEND_IMM:
216 		wc->opcode	= IB_WC_RECV;
217 		wc->wc_flags	= IB_WC_WITH_IMM;
218 		wc->ex.imm_data = cqe->immediate;
219 		break;
220 	case MLX5_CQE_RESP_SEND_INV:
221 		wc->opcode	= IB_WC_RECV;
222 		wc->wc_flags	= IB_WC_WITH_INVALIDATE;
223 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
224 		break;
225 	}
226 	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
227 	wc->dlid_path_bits = cqe->ml_path;
228 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
229 	wc->wc_flags |= g ? IB_WC_GRH : 0;
230 	if (is_qp1(qp->type)) {
231 		u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
232 
233 		ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
234 				    &wc->pkey_index);
235 	} else {
236 		wc->pkey_index = 0;
237 	}
238 
239 	if (ll != IB_LINK_LAYER_ETHERNET) {
240 		wc->slid = be16_to_cpu(cqe->slid);
241 		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
242 		return;
243 	}
244 
245 	wc->slid = 0;
246 	vlan_present = cqe->l4_l3_hdr_type & 0x1;
247 	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
248 	if (vlan_present) {
249 		wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
250 		wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
251 		wc->wc_flags |= IB_WC_WITH_VLAN;
252 	} else {
253 		wc->sl = 0;
254 	}
255 
256 	switch (roce_packet_type) {
257 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
258 		wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
259 		break;
260 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
261 		wc->network_hdr_type = RDMA_NETWORK_IPV6;
262 		break;
263 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
264 		wc->network_hdr_type = RDMA_NETWORK_IPV4;
265 		break;
266 	}
267 	wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
268 }
269 
270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
271 {
272 	mlx5_ib_warn(dev, "dump error cqe\n");
273 	mlx5_dump_err_cqe(dev->mdev, cqe);
274 }
275 
276 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
277 				  struct mlx5_err_cqe *cqe,
278 				  struct ib_wc *wc)
279 {
280 	int dump = 1;
281 
282 	switch (cqe->syndrome) {
283 	case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
284 		wc->status = IB_WC_LOC_LEN_ERR;
285 		break;
286 	case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
287 		wc->status = IB_WC_LOC_QP_OP_ERR;
288 		break;
289 	case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
290 		wc->status = IB_WC_LOC_PROT_ERR;
291 		break;
292 	case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
293 		dump = 0;
294 		wc->status = IB_WC_WR_FLUSH_ERR;
295 		break;
296 	case MLX5_CQE_SYNDROME_MW_BIND_ERR:
297 		wc->status = IB_WC_MW_BIND_ERR;
298 		break;
299 	case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
300 		wc->status = IB_WC_BAD_RESP_ERR;
301 		break;
302 	case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
303 		wc->status = IB_WC_LOC_ACCESS_ERR;
304 		break;
305 	case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
306 		wc->status = IB_WC_REM_INV_REQ_ERR;
307 		break;
308 	case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
309 		wc->status = IB_WC_REM_ACCESS_ERR;
310 		break;
311 	case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
312 		wc->status = IB_WC_REM_OP_ERR;
313 		break;
314 	case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
315 		wc->status = IB_WC_RETRY_EXC_ERR;
316 		dump = 0;
317 		break;
318 	case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
319 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
320 		dump = 0;
321 		break;
322 	case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
323 		wc->status = IB_WC_REM_ABORT_ERR;
324 		break;
325 	default:
326 		wc->status = IB_WC_GENERAL_ERR;
327 		break;
328 	}
329 
330 	wc->vendor_err = cqe->vendor_err_synd;
331 	if (dump)
332 		dump_cqe(dev, cqe);
333 }
334 
335 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
336 			   u16 tail, u16 head)
337 {
338 	u16 idx;
339 
340 	do {
341 		idx = tail & (qp->sq.wqe_cnt - 1);
342 		if (idx == head)
343 			break;
344 
345 		tail = qp->sq.w_list[idx].next;
346 	} while (1);
347 	tail = qp->sq.w_list[idx].next;
348 	qp->sq.last_poll = tail;
349 }
350 
351 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
352 {
353 	mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
354 }
355 
356 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
357 			     struct ib_sig_err *item)
358 {
359 	u16 syndrome = be16_to_cpu(cqe->syndrome);
360 
361 #define GUARD_ERR   (1 << 13)
362 #define APPTAG_ERR  (1 << 12)
363 #define REFTAG_ERR  (1 << 11)
364 
365 	if (syndrome & GUARD_ERR) {
366 		item->err_type = IB_SIG_BAD_GUARD;
367 		item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
368 		item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
369 	} else
370 	if (syndrome & REFTAG_ERR) {
371 		item->err_type = IB_SIG_BAD_REFTAG;
372 		item->expected = be32_to_cpu(cqe->expected_reftag);
373 		item->actual = be32_to_cpu(cqe->actual_reftag);
374 	} else
375 	if (syndrome & APPTAG_ERR) {
376 		item->err_type = IB_SIG_BAD_APPTAG;
377 		item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
378 		item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
379 	} else {
380 		pr_err("Got signature completion error with bad syndrome %04x\n",
381 		       syndrome);
382 	}
383 
384 	item->sig_err_offset = be64_to_cpu(cqe->err_offset);
385 	item->key = be32_to_cpu(cqe->mkey);
386 }
387 
388 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
389 		    int *npolled, bool is_send)
390 {
391 	struct mlx5_ib_wq *wq;
392 	unsigned int cur;
393 	int np;
394 	int i;
395 
396 	wq = (is_send) ? &qp->sq : &qp->rq;
397 	cur = wq->head - wq->tail;
398 	np = *npolled;
399 
400 	if (cur == 0)
401 		return;
402 
403 	for (i = 0;  i < cur && np < num_entries; i++) {
404 		unsigned int idx;
405 
406 		idx = (is_send) ? wq->last_poll : wq->tail;
407 		idx &= (wq->wqe_cnt - 1);
408 		wc->wr_id = wq->wrid[idx];
409 		wc->status = IB_WC_WR_FLUSH_ERR;
410 		wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
411 		wq->tail++;
412 		if (is_send)
413 			wq->last_poll = wq->w_list[idx].next;
414 		np++;
415 		wc->qp = &qp->ibqp;
416 		wc++;
417 	}
418 	*npolled = np;
419 }
420 
421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
422 				 struct ib_wc *wc, int *npolled)
423 {
424 	struct mlx5_ib_qp *qp;
425 
426 	*npolled = 0;
427 	/* Find uncompleted WQEs belonging to that cq and return mmics ones */
428 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
429 		sw_comp(qp, num_entries, wc + *npolled, npolled, true);
430 		if (*npolled >= num_entries)
431 			return;
432 	}
433 
434 	list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
435 		sw_comp(qp, num_entries, wc + *npolled, npolled, false);
436 		if (*npolled >= num_entries)
437 			return;
438 	}
439 }
440 
441 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
442 			 struct mlx5_ib_qp **cur_qp,
443 			 struct ib_wc *wc)
444 {
445 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
446 	struct mlx5_err_cqe *err_cqe;
447 	struct mlx5_cqe64 *cqe64;
448 	struct mlx5_core_qp *mqp;
449 	struct mlx5_ib_wq *wq;
450 	uint8_t opcode;
451 	uint32_t qpn;
452 	u16 wqe_ctr;
453 	void *cqe;
454 	int idx;
455 
456 repoll:
457 	cqe = next_cqe_sw(cq);
458 	if (!cqe)
459 		return -EAGAIN;
460 
461 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
462 
463 	++cq->mcq.cons_index;
464 
465 	/* Make sure we read CQ entry contents after we've checked the
466 	 * ownership bit.
467 	 */
468 	rmb();
469 
470 	opcode = get_cqe_opcode(cqe64);
471 	if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
472 		if (likely(cq->resize_buf)) {
473 			free_cq_buf(dev, &cq->buf);
474 			cq->buf = *cq->resize_buf;
475 			kfree(cq->resize_buf);
476 			cq->resize_buf = NULL;
477 			goto repoll;
478 		} else {
479 			mlx5_ib_warn(dev, "unexpected resize cqe\n");
480 		}
481 	}
482 
483 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
484 	if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
485 		/* We do not have to take the QP table lock here,
486 		 * because CQs will be locked while QPs are removed
487 		 * from the table.
488 		 */
489 		mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
490 		*cur_qp = to_mibqp(mqp);
491 	}
492 
493 	wc->qp  = &(*cur_qp)->ibqp;
494 	switch (opcode) {
495 	case MLX5_CQE_REQ:
496 		wq = &(*cur_qp)->sq;
497 		wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
498 		idx = wqe_ctr & (wq->wqe_cnt - 1);
499 		handle_good_req(wc, cqe64, wq, idx);
500 		handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
501 		wc->wr_id = wq->wrid[idx];
502 		wq->tail = wq->wqe_head[idx] + 1;
503 		wc->status = IB_WC_SUCCESS;
504 		break;
505 	case MLX5_CQE_RESP_WR_IMM:
506 	case MLX5_CQE_RESP_SEND:
507 	case MLX5_CQE_RESP_SEND_IMM:
508 	case MLX5_CQE_RESP_SEND_INV:
509 		handle_responder(wc, cqe64, *cur_qp);
510 		wc->status = IB_WC_SUCCESS;
511 		break;
512 	case MLX5_CQE_RESIZE_CQ:
513 		break;
514 	case MLX5_CQE_REQ_ERR:
515 	case MLX5_CQE_RESP_ERR:
516 		err_cqe = (struct mlx5_err_cqe *)cqe64;
517 		mlx5_handle_error_cqe(dev, err_cqe, wc);
518 		mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
519 			    opcode == MLX5_CQE_REQ_ERR ?
520 			    "Requestor" : "Responder", cq->mcq.cqn);
521 		mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
522 			    err_cqe->syndrome, err_cqe->vendor_err_synd);
523 		if (opcode == MLX5_CQE_REQ_ERR) {
524 			wq = &(*cur_qp)->sq;
525 			wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
526 			idx = wqe_ctr & (wq->wqe_cnt - 1);
527 			wc->wr_id = wq->wrid[idx];
528 			wq->tail = wq->wqe_head[idx] + 1;
529 		} else {
530 			struct mlx5_ib_srq *srq;
531 
532 			if ((*cur_qp)->ibqp.srq) {
533 				srq = to_msrq((*cur_qp)->ibqp.srq);
534 				wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
535 				wc->wr_id = srq->wrid[wqe_ctr];
536 				mlx5_ib_free_srq_wqe(srq, wqe_ctr);
537 			} else {
538 				wq = &(*cur_qp)->rq;
539 				wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
540 				++wq->tail;
541 			}
542 		}
543 		break;
544 	case MLX5_CQE_SIG_ERR: {
545 		struct mlx5_sig_err_cqe *sig_err_cqe =
546 			(struct mlx5_sig_err_cqe *)cqe64;
547 		struct mlx5_core_sig_ctx *sig;
548 
549 		xa_lock(&dev->sig_mrs);
550 		sig = xa_load(&dev->sig_mrs,
551 				mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
552 		get_sig_err_item(sig_err_cqe, &sig->err_item);
553 		sig->sig_err_exists = true;
554 		sig->sigerr_count++;
555 
556 		mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
557 			     cq->mcq.cqn, sig->err_item.key,
558 			     sig->err_item.err_type,
559 			     sig->err_item.sig_err_offset,
560 			     sig->err_item.expected,
561 			     sig->err_item.actual);
562 
563 		xa_unlock(&dev->sig_mrs);
564 		goto repoll;
565 	}
566 	}
567 
568 	return 0;
569 }
570 
571 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
572 			struct ib_wc *wc, bool is_fatal_err)
573 {
574 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
575 	struct mlx5_ib_wc *soft_wc, *next;
576 	int npolled = 0;
577 
578 	list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
579 		if (npolled >= num_entries)
580 			break;
581 
582 		mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
583 			    cq->mcq.cqn);
584 
585 		if (unlikely(is_fatal_err)) {
586 			soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
587 			soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
588 		}
589 		wc[npolled++] = soft_wc->wc;
590 		list_del(&soft_wc->list);
591 		kfree(soft_wc);
592 	}
593 
594 	return npolled;
595 }
596 
597 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
598 {
599 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
600 	struct mlx5_ib_qp *cur_qp = NULL;
601 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
602 	struct mlx5_core_dev *mdev = dev->mdev;
603 	unsigned long flags;
604 	int soft_polled = 0;
605 	int npolled;
606 
607 	spin_lock_irqsave(&cq->lock, flags);
608 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
609 		/* make sure no soft wqe's are waiting */
610 		if (unlikely(!list_empty(&cq->wc_list)))
611 			soft_polled = poll_soft_wc(cq, num_entries, wc, true);
612 
613 		mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
614 				     wc + soft_polled, &npolled);
615 		goto out;
616 	}
617 
618 	if (unlikely(!list_empty(&cq->wc_list)))
619 		soft_polled = poll_soft_wc(cq, num_entries, wc, false);
620 
621 	for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
622 		if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
623 			break;
624 	}
625 
626 	if (npolled)
627 		mlx5_cq_set_ci(&cq->mcq);
628 out:
629 	spin_unlock_irqrestore(&cq->lock, flags);
630 
631 	return soft_polled + npolled;
632 }
633 
634 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
635 {
636 	struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
637 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
638 	void __iomem *uar_page = mdev->priv.uar->map;
639 	unsigned long irq_flags;
640 	int ret = 0;
641 
642 	spin_lock_irqsave(&cq->lock, irq_flags);
643 	if (cq->notify_flags != IB_CQ_NEXT_COMP)
644 		cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
645 
646 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
647 		ret = 1;
648 	spin_unlock_irqrestore(&cq->lock, irq_flags);
649 
650 	mlx5_cq_arm(&cq->mcq,
651 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
652 		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
653 		    uar_page, to_mcq(ibcq)->mcq.cons_index);
654 
655 	return ret;
656 }
657 
658 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
659 			     struct mlx5_ib_cq_buf *buf,
660 			     int nent,
661 			     int cqe_size)
662 {
663 	struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
664 	u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
665 	u8 log_wq_sz     = ilog2(cqe_size);
666 	int err;
667 
668 	err = mlx5_frag_buf_alloc_node(dev->mdev,
669 				       nent * cqe_size,
670 				       frag_buf,
671 				       dev->mdev->priv.numa_node);
672 	if (err)
673 		return err;
674 
675 	mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
676 
677 	buf->cqe_size = cqe_size;
678 	buf->nent = nent;
679 
680 	return 0;
681 }
682 
683 enum {
684 	MLX5_CQE_RES_FORMAT_HASH = 0,
685 	MLX5_CQE_RES_FORMAT_CSUM = 1,
686 	MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
687 };
688 
689 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
690 {
691 	switch (format) {
692 	case MLX5_IB_CQE_RES_FORMAT_HASH:
693 		return MLX5_CQE_RES_FORMAT_HASH;
694 	case MLX5_IB_CQE_RES_FORMAT_CSUM:
695 		return MLX5_CQE_RES_FORMAT_CSUM;
696 	case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
697 		if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
698 			return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
699 		return -EOPNOTSUPP;
700 	default:
701 		return -EINVAL;
702 	}
703 }
704 
705 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
706 			  struct mlx5_ib_cq *cq, int entries, u32 **cqb,
707 			  int *cqe_size, int *index, int *inlen)
708 {
709 	struct mlx5_ib_create_cq ucmd = {};
710 	unsigned long page_size;
711 	unsigned int page_offset_quantized;
712 	size_t ucmdlen;
713 	__be64 *pas;
714 	int ncont;
715 	void *cqc;
716 	int err;
717 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
718 		udata, struct mlx5_ib_ucontext, ibucontext);
719 
720 	ucmdlen = min(udata->inlen, sizeof(ucmd));
721 	if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
722 		return -EINVAL;
723 
724 	if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
725 		return -EFAULT;
726 
727 	if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
728 			    MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
729 			    MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
730 		return -EINVAL;
731 
732 	if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
733 	    ucmd.reserved0 || ucmd.reserved1)
734 		return -EINVAL;
735 
736 	*cqe_size = ucmd.cqe_size;
737 
738 	cq->buf.umem =
739 		ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
740 			    entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
741 	if (IS_ERR(cq->buf.umem)) {
742 		err = PTR_ERR(cq->buf.umem);
743 		return err;
744 	}
745 
746 	page_size = mlx5_umem_find_best_cq_quantized_pgoff(
747 		cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
748 		page_offset, 64, &page_offset_quantized);
749 	if (!page_size) {
750 		err = -EINVAL;
751 		goto err_umem;
752 	}
753 
754 	err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
755 	if (err)
756 		goto err_umem;
757 
758 	ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
759 	mlx5_ib_dbg(
760 		dev,
761 		"addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
762 		ucmd.buf_addr, entries * ucmd.cqe_size,
763 		ib_umem_num_pages(cq->buf.umem), page_size, ncont);
764 
765 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
766 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
767 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
768 	if (!*cqb) {
769 		err = -ENOMEM;
770 		goto err_db;
771 	}
772 
773 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
774 	mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
775 
776 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
777 	MLX5_SET(cqc, cqc, log_page_size,
778 		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
779 	MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
780 
781 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
782 		*index = ucmd.uar_page_index;
783 	} else if (context->bfregi.lib_uar_dyn) {
784 		err = -EINVAL;
785 		goto err_cqb;
786 	} else {
787 		*index = context->bfregi.sys_pages[0];
788 	}
789 
790 	if (ucmd.cqe_comp_en == 1) {
791 		int mini_cqe_format;
792 
793 		if (!((*cqe_size == 128 &&
794 		       MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
795 		      (*cqe_size == 64  &&
796 		       MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
797 			err = -EOPNOTSUPP;
798 			mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
799 				     *cqe_size);
800 			goto err_cqb;
801 		}
802 
803 		mini_cqe_format =
804 			mini_cqe_res_format_to_hw(dev,
805 						  ucmd.cqe_comp_res_format);
806 		if (mini_cqe_format < 0) {
807 			err = mini_cqe_format;
808 			mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
809 				    ucmd.cqe_comp_res_format, err);
810 			goto err_cqb;
811 		}
812 
813 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
814 		MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
815 	}
816 
817 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
818 		if (*cqe_size != 128 ||
819 		    !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
820 			err = -EOPNOTSUPP;
821 			mlx5_ib_warn(dev,
822 				     "CQE padding is not supported for CQE size of %dB!\n",
823 				     *cqe_size);
824 			goto err_cqb;
825 		}
826 
827 		cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
828 	}
829 
830 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
831 		cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
832 
833 	MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
834 	return 0;
835 
836 err_cqb:
837 	kvfree(*cqb);
838 
839 err_db:
840 	mlx5_ib_db_unmap_user(context, &cq->db);
841 
842 err_umem:
843 	ib_umem_release(cq->buf.umem);
844 	return err;
845 }
846 
847 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
848 {
849 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
850 		udata, struct mlx5_ib_ucontext, ibucontext);
851 
852 	mlx5_ib_db_unmap_user(context, &cq->db);
853 	ib_umem_release(cq->buf.umem);
854 }
855 
856 static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
857 {
858 	int i;
859 	void *cqe;
860 	struct mlx5_cqe64 *cqe64;
861 
862 	for (i = 0; i < buf->nent; i++) {
863 		cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
864 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
865 		cqe64->op_own = MLX5_CQE_INVALID << 4;
866 	}
867 }
868 
869 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
870 			    int entries, int cqe_size,
871 			    u32 **cqb, int *index, int *inlen)
872 {
873 	__be64 *pas;
874 	void *cqc;
875 	int err;
876 
877 	err = mlx5_db_alloc(dev->mdev, &cq->db);
878 	if (err)
879 		return err;
880 
881 	cq->mcq.set_ci_db  = cq->db.db;
882 	cq->mcq.arm_db     = cq->db.db + 1;
883 	cq->mcq.cqe_sz = cqe_size;
884 
885 	err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
886 	if (err)
887 		goto err_db;
888 
889 	init_cq_frag_buf(&cq->buf);
890 
891 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
892 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
893 		 cq->buf.frag_buf.npages;
894 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
895 	if (!*cqb) {
896 		err = -ENOMEM;
897 		goto err_buf;
898 	}
899 
900 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
901 	mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
902 
903 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
904 	MLX5_SET(cqc, cqc, log_page_size,
905 		 cq->buf.frag_buf.page_shift -
906 		 MLX5_ADAPTER_PAGE_SHIFT);
907 
908 	*index = dev->mdev->priv.uar->index;
909 
910 	return 0;
911 
912 err_buf:
913 	free_cq_buf(dev, &cq->buf);
914 
915 err_db:
916 	mlx5_db_free(dev->mdev, &cq->db);
917 	return err;
918 }
919 
920 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
921 {
922 	free_cq_buf(dev, &cq->buf);
923 	mlx5_db_free(dev->mdev, &cq->db);
924 }
925 
926 static void notify_soft_wc_handler(struct work_struct *work)
927 {
928 	struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
929 					     notify_work);
930 
931 	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
932 }
933 
934 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
935 		      struct ib_udata *udata)
936 {
937 	struct ib_device *ibdev = ibcq->device;
938 	int entries = attr->cqe;
939 	int vector = attr->comp_vector;
940 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
941 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
942 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
943 	int index;
944 	int inlen;
945 	u32 *cqb = NULL;
946 	void *cqc;
947 	int cqe_size;
948 	unsigned int irqn;
949 	int eqn;
950 	int err;
951 
952 	if (entries < 0 ||
953 	    (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
954 		return -EINVAL;
955 
956 	if (check_cq_create_flags(attr->flags))
957 		return -EOPNOTSUPP;
958 
959 	entries = roundup_pow_of_two(entries + 1);
960 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
961 		return -EINVAL;
962 
963 	cq->ibcq.cqe = entries - 1;
964 	mutex_init(&cq->resize_mutex);
965 	spin_lock_init(&cq->lock);
966 	cq->resize_buf = NULL;
967 	cq->resize_umem = NULL;
968 	cq->create_flags = attr->flags;
969 	INIT_LIST_HEAD(&cq->list_send_qp);
970 	INIT_LIST_HEAD(&cq->list_recv_qp);
971 
972 	if (udata) {
973 		err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
974 				     &index, &inlen);
975 		if (err)
976 			return err;
977 	} else {
978 		cqe_size = cache_line_size() == 128 ? 128 : 64;
979 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
980 				       &index, &inlen);
981 		if (err)
982 			return err;
983 
984 		INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
985 	}
986 
987 	err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
988 	if (err)
989 		goto err_cqb;
990 
991 	cq->cqe_size = cqe_size;
992 
993 	cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
994 	MLX5_SET(cqc, cqc, cqe_sz,
995 		 cqe_sz_to_mlx_sz(cqe_size,
996 				  cq->private_flags &
997 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
998 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
999 	MLX5_SET(cqc, cqc, uar_page, index);
1000 	MLX5_SET(cqc, cqc, c_eqn, eqn);
1001 	MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1002 	if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1003 		MLX5_SET(cqc, cqc, oi, 1);
1004 
1005 	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1006 	if (err)
1007 		goto err_cqb;
1008 
1009 	mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1010 	cq->mcq.irqn = irqn;
1011 	if (udata)
1012 		cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1013 	else
1014 		cq->mcq.comp  = mlx5_ib_cq_comp;
1015 	cq->mcq.event = mlx5_ib_cq_event;
1016 
1017 	INIT_LIST_HEAD(&cq->wc_list);
1018 
1019 	if (udata)
1020 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1021 			err = -EFAULT;
1022 			goto err_cmd;
1023 		}
1024 
1025 
1026 	kvfree(cqb);
1027 	return 0;
1028 
1029 err_cmd:
1030 	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1031 
1032 err_cqb:
1033 	kvfree(cqb);
1034 	if (udata)
1035 		destroy_cq_user(cq, udata);
1036 	else
1037 		destroy_cq_kernel(dev, cq);
1038 	return err;
1039 }
1040 
1041 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1042 {
1043 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
1044 	struct mlx5_ib_cq *mcq = to_mcq(cq);
1045 	int ret;
1046 
1047 	ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1048 	if (ret)
1049 		return ret;
1050 
1051 	if (udata)
1052 		destroy_cq_user(mcq, udata);
1053 	else
1054 		destroy_cq_kernel(dev, mcq);
1055 	return 0;
1056 }
1057 
1058 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1059 {
1060 	return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1061 }
1062 
1063 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1064 {
1065 	struct mlx5_cqe64 *cqe64, *dest64;
1066 	void *cqe, *dest;
1067 	u32 prod_index;
1068 	int nfreed = 0;
1069 	u8 owner_bit;
1070 
1071 	if (!cq)
1072 		return;
1073 
1074 	/* First we need to find the current producer index, so we
1075 	 * know where to start cleaning from.  It doesn't matter if HW
1076 	 * adds new entries after this loop -- the QP we're worried
1077 	 * about is already in RESET, so the new entries won't come
1078 	 * from our QP and therefore don't need to be checked.
1079 	 */
1080 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1081 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1082 			break;
1083 
1084 	/* Now sweep backwards through the CQ, removing CQ entries
1085 	 * that match our QP by copying older entries on top of them.
1086 	 */
1087 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1088 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1089 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1090 		if (is_equal_rsn(cqe64, rsn)) {
1091 			if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1092 				mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1093 			++nfreed;
1094 		} else if (nfreed) {
1095 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1096 			dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1097 			owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1098 			memcpy(dest, cqe, cq->mcq.cqe_sz);
1099 			dest64->op_own = owner_bit |
1100 				(dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1101 		}
1102 	}
1103 
1104 	if (nfreed) {
1105 		cq->mcq.cons_index += nfreed;
1106 		/* Make sure update of buffer contents is done before
1107 		 * updating consumer index.
1108 		 */
1109 		wmb();
1110 		mlx5_cq_set_ci(&cq->mcq);
1111 	}
1112 }
1113 
1114 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1115 {
1116 	if (!cq)
1117 		return;
1118 
1119 	spin_lock_irq(&cq->lock);
1120 	__mlx5_ib_cq_clean(cq, qpn, srq);
1121 	spin_unlock_irq(&cq->lock);
1122 }
1123 
1124 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1125 {
1126 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
1127 	struct mlx5_ib_cq *mcq = to_mcq(cq);
1128 	int err;
1129 
1130 	if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1131 		return -EOPNOTSUPP;
1132 
1133 	if (cq_period > MLX5_MAX_CQ_PERIOD)
1134 		return -EINVAL;
1135 
1136 	err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1137 					     cq_period, cq_count);
1138 	if (err)
1139 		mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1140 
1141 	return err;
1142 }
1143 
1144 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1145 		       int entries, struct ib_udata *udata,
1146 		       int *cqe_size)
1147 {
1148 	struct mlx5_ib_resize_cq ucmd;
1149 	struct ib_umem *umem;
1150 	int err;
1151 
1152 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1153 	if (err)
1154 		return err;
1155 
1156 	if (ucmd.reserved0 || ucmd.reserved1)
1157 		return -EINVAL;
1158 
1159 	/* check multiplication overflow */
1160 	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1161 		return -EINVAL;
1162 
1163 	umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
1164 			   (size_t)ucmd.cqe_size * entries,
1165 			   IB_ACCESS_LOCAL_WRITE);
1166 	if (IS_ERR(umem)) {
1167 		err = PTR_ERR(umem);
1168 		return err;
1169 	}
1170 
1171 	cq->resize_umem = umem;
1172 	*cqe_size = ucmd.cqe_size;
1173 
1174 	return 0;
1175 }
1176 
1177 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1178 			 int entries, int cqe_size)
1179 {
1180 	int err;
1181 
1182 	cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1183 	if (!cq->resize_buf)
1184 		return -ENOMEM;
1185 
1186 	err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1187 	if (err)
1188 		goto ex;
1189 
1190 	init_cq_frag_buf(cq->resize_buf);
1191 
1192 	return 0;
1193 
1194 ex:
1195 	kfree(cq->resize_buf);
1196 	return err;
1197 }
1198 
1199 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1200 {
1201 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1202 	struct mlx5_cqe64 *scqe64;
1203 	struct mlx5_cqe64 *dcqe64;
1204 	void *start_cqe;
1205 	void *scqe;
1206 	void *dcqe;
1207 	int ssize;
1208 	int dsize;
1209 	int i;
1210 	u8 sw_own;
1211 
1212 	ssize = cq->buf.cqe_size;
1213 	dsize = cq->resize_buf->cqe_size;
1214 	if (ssize != dsize) {
1215 		mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1216 		return -EINVAL;
1217 	}
1218 
1219 	i = cq->mcq.cons_index;
1220 	scqe = get_sw_cqe(cq, i);
1221 	scqe64 = ssize == 64 ? scqe : scqe + 64;
1222 	start_cqe = scqe;
1223 	if (!scqe) {
1224 		mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1225 		return -EINVAL;
1226 	}
1227 
1228 	while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
1229 		dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1230 					     (i + 1) & cq->resize_buf->nent);
1231 		dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1232 		sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1233 		memcpy(dcqe, scqe, dsize);
1234 		dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1235 
1236 		++i;
1237 		scqe = get_sw_cqe(cq, i);
1238 		scqe64 = ssize == 64 ? scqe : scqe + 64;
1239 		if (!scqe) {
1240 			mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1241 			return -EINVAL;
1242 		}
1243 
1244 		if (scqe == start_cqe) {
1245 			pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1246 				cq->mcq.cqn);
1247 			return -ENOMEM;
1248 		}
1249 	}
1250 	++cq->mcq.cons_index;
1251 	return 0;
1252 }
1253 
1254 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1255 {
1256 	struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1257 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
1258 	void *cqc;
1259 	u32 *in;
1260 	int err;
1261 	int npas;
1262 	__be64 *pas;
1263 	unsigned int page_offset_quantized = 0;
1264 	unsigned int page_shift;
1265 	int inlen;
1266 	int cqe_size;
1267 	unsigned long flags;
1268 
1269 	if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1270 		pr_info("Firmware does not support resize CQ\n");
1271 		return -ENOSYS;
1272 	}
1273 
1274 	if (entries < 1 ||
1275 	    entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1276 		mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1277 			     entries,
1278 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1279 		return -EINVAL;
1280 	}
1281 
1282 	entries = roundup_pow_of_two(entries + 1);
1283 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1284 		return -EINVAL;
1285 
1286 	if (entries == ibcq->cqe + 1)
1287 		return 0;
1288 
1289 	mutex_lock(&cq->resize_mutex);
1290 	if (udata) {
1291 		unsigned long page_size;
1292 
1293 		err = resize_user(dev, cq, entries, udata, &cqe_size);
1294 		if (err)
1295 			goto ex;
1296 
1297 		page_size = mlx5_umem_find_best_cq_quantized_pgoff(
1298 			cq->resize_umem, cqc, log_page_size,
1299 			MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
1300 			&page_offset_quantized);
1301 		if (!page_size) {
1302 			err = -EINVAL;
1303 			goto ex_resize;
1304 		}
1305 		npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
1306 		page_shift = order_base_2(page_size);
1307 	} else {
1308 		struct mlx5_frag_buf *frag_buf;
1309 
1310 		cqe_size = 64;
1311 		err = resize_kernel(dev, cq, entries, cqe_size);
1312 		if (err)
1313 			goto ex;
1314 		frag_buf = &cq->resize_buf->frag_buf;
1315 		npas = frag_buf->npages;
1316 		page_shift = frag_buf->page_shift;
1317 	}
1318 
1319 	inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1320 		MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1321 
1322 	in = kvzalloc(inlen, GFP_KERNEL);
1323 	if (!in) {
1324 		err = -ENOMEM;
1325 		goto ex_resize;
1326 	}
1327 
1328 	pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1329 	if (udata)
1330 		mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
1331 				     0);
1332 	else
1333 		mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1334 
1335 	MLX5_SET(modify_cq_in, in,
1336 		 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1337 		 MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1338 		 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1339 		 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1340 
1341 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1342 
1343 	MLX5_SET(cqc, cqc, log_page_size,
1344 		 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1345 	MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
1346 	MLX5_SET(cqc, cqc, cqe_sz,
1347 		 cqe_sz_to_mlx_sz(cqe_size,
1348 				  cq->private_flags &
1349 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1350 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1351 
1352 	MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1353 	MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1354 
1355 	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1356 	if (err)
1357 		goto ex_alloc;
1358 
1359 	if (udata) {
1360 		cq->ibcq.cqe = entries - 1;
1361 		ib_umem_release(cq->buf.umem);
1362 		cq->buf.umem = cq->resize_umem;
1363 		cq->resize_umem = NULL;
1364 	} else {
1365 		struct mlx5_ib_cq_buf tbuf;
1366 		int resized = 0;
1367 
1368 		spin_lock_irqsave(&cq->lock, flags);
1369 		if (cq->resize_buf) {
1370 			err = copy_resize_cqes(cq);
1371 			if (!err) {
1372 				tbuf = cq->buf;
1373 				cq->buf = *cq->resize_buf;
1374 				kfree(cq->resize_buf);
1375 				cq->resize_buf = NULL;
1376 				resized = 1;
1377 			}
1378 		}
1379 		cq->ibcq.cqe = entries - 1;
1380 		spin_unlock_irqrestore(&cq->lock, flags);
1381 		if (resized)
1382 			free_cq_buf(dev, &tbuf);
1383 	}
1384 	mutex_unlock(&cq->resize_mutex);
1385 
1386 	kvfree(in);
1387 	return 0;
1388 
1389 ex_alloc:
1390 	kvfree(in);
1391 
1392 ex_resize:
1393 	ib_umem_release(cq->resize_umem);
1394 	if (!udata) {
1395 		free_cq_buf(dev, cq->resize_buf);
1396 		cq->resize_buf = NULL;
1397 	}
1398 ex:
1399 	mutex_unlock(&cq->resize_mutex);
1400 	return err;
1401 }
1402 
1403 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
1404 {
1405 	struct mlx5_ib_cq *cq;
1406 
1407 	if (!ibcq)
1408 		return 128;
1409 
1410 	cq = to_mcq(ibcq);
1411 	return cq->cqe_size;
1412 }
1413 
1414 /* Called from atomic context */
1415 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1416 {
1417 	struct mlx5_ib_wc *soft_wc;
1418 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
1419 	unsigned long flags;
1420 
1421 	soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1422 	if (!soft_wc)
1423 		return -ENOMEM;
1424 
1425 	soft_wc->wc = *wc;
1426 	spin_lock_irqsave(&cq->lock, flags);
1427 	list_add_tail(&soft_wc->list, &cq->wc_list);
1428 	if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1429 	    wc->status != IB_WC_SUCCESS) {
1430 		cq->notify_flags = 0;
1431 		schedule_work(&cq->notify_work);
1432 	}
1433 	spin_unlock_irqrestore(&cq->lock, flags);
1434 
1435 	return 0;
1436 }
1437