1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
43 
44 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
45 
46 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
47 {
48 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
49 	struct device *dev = hr_dev->dev;
50 	struct hns_roce_qp *qp;
51 
52 	spin_lock(&qp_table->lock);
53 
54 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
55 	if (qp)
56 		atomic_inc(&qp->refcount);
57 
58 	spin_unlock(&qp_table->lock);
59 
60 	if (!qp) {
61 		dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
62 		return;
63 	}
64 
65 	qp->event(qp, (enum hns_roce_event)event_type);
66 
67 	if (atomic_dec_and_test(&qp->refcount))
68 		complete(&qp->free);
69 }
70 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
71 
72 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
73 				 enum hns_roce_event type)
74 {
75 	struct ib_event event;
76 	struct ib_qp *ibqp = &hr_qp->ibqp;
77 
78 	if (ibqp->event_handler) {
79 		event.device = ibqp->device;
80 		event.element.qp = ibqp;
81 		switch (type) {
82 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
83 			event.event = IB_EVENT_PATH_MIG;
84 			break;
85 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
86 			event.event = IB_EVENT_COMM_EST;
87 			break;
88 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
89 			event.event = IB_EVENT_SQ_DRAINED;
90 			break;
91 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
92 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
93 			break;
94 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
95 			event.event = IB_EVENT_QP_FATAL;
96 			break;
97 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
98 			event.event = IB_EVENT_PATH_MIG_ERR;
99 			break;
100 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
101 			event.event = IB_EVENT_QP_REQ_ERR;
102 			break;
103 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
104 			event.event = IB_EVENT_QP_ACCESS_ERR;
105 			break;
106 		default:
107 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
108 				type, hr_qp->qpn);
109 			return;
110 		}
111 		ibqp->event_handler(&event, ibqp->qp_context);
112 	}
113 }
114 
115 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
116 				     int align, unsigned long *base)
117 {
118 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
119 
120 	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
121 					   base) ?
122 		       -ENOMEM :
123 		       0;
124 }
125 
126 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
127 {
128 	switch (state) {
129 	case IB_QPS_RESET:
130 		return HNS_ROCE_QP_STATE_RST;
131 	case IB_QPS_INIT:
132 		return HNS_ROCE_QP_STATE_INIT;
133 	case IB_QPS_RTR:
134 		return HNS_ROCE_QP_STATE_RTR;
135 	case IB_QPS_RTS:
136 		return HNS_ROCE_QP_STATE_RTS;
137 	case IB_QPS_SQD:
138 		return HNS_ROCE_QP_STATE_SQD;
139 	case IB_QPS_ERR:
140 		return HNS_ROCE_QP_STATE_ERR;
141 	default:
142 		return HNS_ROCE_QP_NUM_STATE;
143 	}
144 }
145 EXPORT_SYMBOL_GPL(to_hns_roce_state);
146 
147 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
148 				 struct hns_roce_qp *hr_qp)
149 {
150 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
151 	int ret;
152 
153 	if (!qpn)
154 		return -EINVAL;
155 
156 	hr_qp->qpn = qpn;
157 
158 	spin_lock_irq(&qp_table->lock);
159 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
160 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
161 	spin_unlock_irq(&qp_table->lock);
162 	if (ret) {
163 		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
164 		goto err_put_irrl;
165 	}
166 
167 	atomic_set(&hr_qp->refcount, 1);
168 	init_completion(&hr_qp->free);
169 
170 	return 0;
171 
172 err_put_irrl:
173 
174 	return ret;
175 }
176 
177 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
178 			     struct hns_roce_qp *hr_qp)
179 {
180 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
181 	struct device *dev = hr_dev->dev;
182 	int ret;
183 
184 	if (!qpn)
185 		return -EINVAL;
186 
187 	hr_qp->qpn = qpn;
188 
189 	/* Alloc memory for QPC */
190 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
191 	if (ret) {
192 		dev_err(dev, "QPC table get failed\n");
193 		goto err_out;
194 	}
195 
196 	/* Alloc memory for IRRL */
197 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
198 	if (ret) {
199 		dev_err(dev, "IRRL table get failed\n");
200 		goto err_put_qp;
201 	}
202 
203 	if (hr_dev->caps.trrl_entry_sz) {
204 		/* Alloc memory for TRRL */
205 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
206 					 hr_qp->qpn);
207 		if (ret) {
208 			dev_err(dev, "TRRL table get failed\n");
209 			goto err_put_irrl;
210 		}
211 	}
212 
213 	if (hr_dev->caps.sccc_entry_sz) {
214 		/* Alloc memory for SCC CTX */
215 		ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
216 					 hr_qp->qpn);
217 		if (ret) {
218 			dev_err(dev, "SCC CTX table get failed\n");
219 			goto err_put_trrl;
220 		}
221 	}
222 
223 	spin_lock_irq(&qp_table->lock);
224 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
225 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
226 	spin_unlock_irq(&qp_table->lock);
227 	if (ret) {
228 		dev_err(dev, "QPC radix_tree_insert failed\n");
229 		goto err_put_sccc;
230 	}
231 
232 	atomic_set(&hr_qp->refcount, 1);
233 	init_completion(&hr_qp->free);
234 
235 	return 0;
236 
237 err_put_sccc:
238 	if (hr_dev->caps.sccc_entry_sz)
239 		hns_roce_table_put(hr_dev, &qp_table->sccc_table,
240 				   hr_qp->qpn);
241 
242 err_put_trrl:
243 	if (hr_dev->caps.trrl_entry_sz)
244 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
245 
246 err_put_irrl:
247 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
248 
249 err_put_qp:
250 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
251 
252 err_out:
253 	return ret;
254 }
255 
256 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
257 {
258 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
259 	unsigned long flags;
260 
261 	spin_lock_irqsave(&qp_table->lock, flags);
262 	radix_tree_delete(&hr_dev->qp_table_tree,
263 			  hr_qp->qpn & (hr_dev->caps.num_qps - 1));
264 	spin_unlock_irqrestore(&qp_table->lock, flags);
265 }
266 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
267 
268 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
269 {
270 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
271 
272 	if (atomic_dec_and_test(&hr_qp->refcount))
273 		complete(&hr_qp->free);
274 	wait_for_completion(&hr_qp->free);
275 
276 	if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
277 		if (hr_dev->caps.sccc_entry_sz)
278 			hns_roce_table_put(hr_dev, &qp_table->sccc_table,
279 					   hr_qp->qpn);
280 		if (hr_dev->caps.trrl_entry_sz)
281 			hns_roce_table_put(hr_dev, &qp_table->trrl_table,
282 					   hr_qp->qpn);
283 		hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
284 		hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
285 	}
286 }
287 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
288 
289 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
290 			       int cnt)
291 {
292 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
293 
294 	if (base_qpn < SQP_NUM)
295 		return;
296 
297 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
298 }
299 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
300 
301 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
302 				struct ib_qp_cap *cap, bool is_user, int has_rq,
303 				struct hns_roce_qp *hr_qp)
304 {
305 	struct device *dev = hr_dev->dev;
306 	u32 max_cnt;
307 
308 	/* Check the validity of QP support capacity */
309 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
310 	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
311 		dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
312 			cap->max_recv_wr, cap->max_recv_sge);
313 		return -EINVAL;
314 	}
315 
316 	/* If srq exist, set zero for relative number of rq */
317 	if (!has_rq) {
318 		hr_qp->rq.wqe_cnt = 0;
319 		hr_qp->rq.max_gs = 0;
320 		cap->max_recv_wr = 0;
321 		cap->max_recv_sge = 0;
322 	} else {
323 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
324 			dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
325 			return -EINVAL;
326 		}
327 
328 		if (hr_dev->caps.min_wqes)
329 			max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
330 		else
331 			max_cnt = cap->max_recv_wr;
332 
333 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
334 
335 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
336 			dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
337 			return -EINVAL;
338 		}
339 
340 		max_cnt = max(1U, cap->max_recv_sge);
341 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
342 		if (hr_dev->caps.max_rq_sg <= 2)
343 			hr_qp->rq.wqe_shift =
344 					ilog2(hr_dev->caps.max_rq_desc_sz);
345 		else
346 			hr_qp->rq.wqe_shift =
347 					ilog2(hr_dev->caps.max_rq_desc_sz
348 					      * hr_qp->rq.max_gs);
349 	}
350 
351 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
352 	cap->max_recv_sge = hr_qp->rq.max_gs;
353 
354 	return 0;
355 }
356 
357 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
358 				     struct ib_qp_cap *cap,
359 				     struct hns_roce_qp *hr_qp,
360 				     struct hns_roce_ib_create_qp *ucmd)
361 {
362 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
363 	u8 max_sq_stride = ilog2(roundup_sq_stride);
364 	u32 ex_sge_num;
365 	u32 page_size;
366 	u32 max_cnt;
367 
368 	/* Sanity check SQ size before proceeding */
369 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
370 	     ucmd->log_sq_stride > max_sq_stride ||
371 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
372 		dev_err(hr_dev->dev, "check SQ size error!\n");
373 		return -EINVAL;
374 	}
375 
376 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
377 		dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
378 			cap->max_send_sge);
379 		return -EINVAL;
380 	}
381 
382 	hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
383 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
384 
385 	max_cnt = max(1U, cap->max_send_sge);
386 	if (hr_dev->caps.max_sq_sg <= 2)
387 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
388 	else
389 		hr_qp->sq.max_gs = max_cnt;
390 
391 	if (hr_qp->sq.max_gs > 2)
392 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
393 							(hr_qp->sq.max_gs - 2));
394 
395 	if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
396 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
397 			dev_err(hr_dev->dev,
398 				"The extended sge cnt error! sge_cnt=%d\n",
399 				hr_qp->sge.sge_cnt);
400 			return -EINVAL;
401 		}
402 	}
403 
404 	hr_qp->sge.sge_shift = 4;
405 	ex_sge_num = hr_qp->sge.sge_cnt;
406 
407 	/* Get buf size, SQ and RQ  are aligned to page_szie */
408 	if (hr_dev->caps.max_sq_sg <= 2) {
409 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
410 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
411 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
412 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
413 
414 		hr_qp->sq.offset = 0;
415 		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
416 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
417 	} else {
418 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
419 		hr_qp->sge.sge_cnt =
420 		       max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
421 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
422 					     hr_qp->rq.wqe_shift), page_size) +
423 				   HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
424 					     hr_qp->sge.sge_shift), page_size) +
425 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
426 					     hr_qp->sq.wqe_shift), page_size);
427 
428 		hr_qp->sq.offset = 0;
429 		if (ex_sge_num) {
430 			hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
431 							(hr_qp->sq.wqe_cnt <<
432 							hr_qp->sq.wqe_shift),
433 							page_size);
434 			hr_qp->rq.offset = hr_qp->sge.offset +
435 					HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
436 						hr_qp->sge.sge_shift),
437 						page_size);
438 		} else {
439 			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
440 							(hr_qp->sq.wqe_cnt <<
441 							hr_qp->sq.wqe_shift),
442 							page_size);
443 		}
444 	}
445 
446 	return 0;
447 }
448 
449 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
450 				       struct ib_qp_cap *cap,
451 				       struct hns_roce_qp *hr_qp)
452 {
453 	struct device *dev = hr_dev->dev;
454 	u32 page_size;
455 	u32 max_cnt;
456 	int size;
457 
458 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
459 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
460 	    cap->max_inline_data > hr_dev->caps.max_sq_inline) {
461 		dev_err(dev, "SQ WR or sge or inline data error!\n");
462 		return -EINVAL;
463 	}
464 
465 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
466 	hr_qp->sq_max_wqes_per_wr = 1;
467 	hr_qp->sq_spare_wqes = 0;
468 
469 	if (hr_dev->caps.min_wqes)
470 		max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
471 	else
472 		max_cnt = cap->max_send_wr;
473 
474 	hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
475 	if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
476 		dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
477 		return -EINVAL;
478 	}
479 
480 	/* Get data_seg numbers */
481 	max_cnt = max(1U, cap->max_send_sge);
482 	if (hr_dev->caps.max_sq_sg <= 2)
483 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
484 	else
485 		hr_qp->sq.max_gs = max_cnt;
486 
487 	if (hr_qp->sq.max_gs > 2) {
488 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
489 				     (hr_qp->sq.max_gs - 2));
490 		hr_qp->sge.sge_shift = 4;
491 	}
492 
493 	/* ud sqwqe's sge use extend sge */
494 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
495 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
496 				     hr_qp->sq.max_gs);
497 		hr_qp->sge.sge_shift = 4;
498 	}
499 
500 	if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
501 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
502 			dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
503 				hr_qp->sge.sge_cnt);
504 			return -EINVAL;
505 		}
506 	}
507 
508 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
509 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
510 	hr_qp->sq.offset = 0;
511 	size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
512 				 page_size);
513 
514 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
515 		hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
516 					(u32)hr_qp->sge.sge_cnt);
517 		hr_qp->sge.offset = size;
518 		size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
519 					  hr_qp->sge.sge_shift, page_size);
520 	}
521 
522 	hr_qp->rq.offset = size;
523 	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
524 				  page_size);
525 	hr_qp->buff_size = size;
526 
527 	/* Get wr and sge number which send */
528 	cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
529 	cap->max_send_sge = hr_qp->sq.max_gs;
530 
531 	/* We don't support inline sends for kernel QPs (yet) */
532 	cap->max_inline_data = 0;
533 
534 	return 0;
535 }
536 
537 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
538 {
539 	if (attr->qp_type == IB_QPT_XRC_TGT)
540 		return 0;
541 
542 	return 1;
543 }
544 
545 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
546 {
547 	if (attr->qp_type == IB_QPT_XRC_INI ||
548 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
549 	    !attr->cap.max_recv_wr)
550 		return 0;
551 
552 	return 1;
553 }
554 
555 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
556 				     struct ib_pd *ib_pd,
557 				     struct ib_qp_init_attr *init_attr,
558 				     struct ib_udata *udata, unsigned long sqpn,
559 				     struct hns_roce_qp *hr_qp)
560 {
561 	struct device *dev = hr_dev->dev;
562 	struct hns_roce_ib_create_qp ucmd;
563 	struct hns_roce_ib_create_qp_resp resp = {};
564 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
565 		udata, struct hns_roce_ucontext, ibucontext);
566 	unsigned long qpn = 0;
567 	int ret = 0;
568 	u32 page_shift;
569 	u32 npages;
570 	int i;
571 
572 	mutex_init(&hr_qp->mutex);
573 	spin_lock_init(&hr_qp->sq.lock);
574 	spin_lock_init(&hr_qp->rq.lock);
575 
576 	hr_qp->state = IB_QPS_RESET;
577 
578 	hr_qp->ibqp.qp_type = init_attr->qp_type;
579 
580 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
581 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
582 	else
583 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
584 
585 	ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
586 				   hns_roce_qp_has_rq(init_attr), hr_qp);
587 	if (ret) {
588 		dev_err(dev, "hns_roce_set_rq_size failed\n");
589 		goto err_out;
590 	}
591 
592 	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
593 	    hns_roce_qp_has_rq(init_attr)) {
594 		/* allocate recv inline buf */
595 		hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
596 					       sizeof(struct hns_roce_rinl_wqe),
597 					       GFP_KERNEL);
598 		if (!hr_qp->rq_inl_buf.wqe_list) {
599 			ret = -ENOMEM;
600 			goto err_out;
601 		}
602 
603 		hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
604 
605 		/* Firstly, allocate a list of sge space buffer */
606 		hr_qp->rq_inl_buf.wqe_list[0].sg_list =
607 					kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
608 					       init_attr->cap.max_recv_sge *
609 					       sizeof(struct hns_roce_rinl_sge),
610 					       GFP_KERNEL);
611 		if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
612 			ret = -ENOMEM;
613 			goto err_wqe_list;
614 		}
615 
616 		for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
617 			/* Secondly, reallocate the buffer */
618 			hr_qp->rq_inl_buf.wqe_list[i].sg_list =
619 				&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
620 				init_attr->cap.max_recv_sge];
621 	}
622 
623 	if (udata) {
624 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
625 			dev_err(dev, "ib_copy_from_udata error for create qp\n");
626 			ret = -EFAULT;
627 			goto err_rq_sge_list;
628 		}
629 
630 		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
631 						&ucmd);
632 		if (ret) {
633 			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
634 			goto err_rq_sge_list;
635 		}
636 
637 		hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
638 					  hr_qp->buff_size, 0, 0);
639 		if (IS_ERR(hr_qp->umem)) {
640 			dev_err(dev, "ib_umem_get error for create qp\n");
641 			ret = PTR_ERR(hr_qp->umem);
642 			goto err_rq_sge_list;
643 		}
644 
645 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
646 		page_shift = PAGE_SHIFT;
647 		if (hr_dev->caps.mtt_buf_pg_sz) {
648 			npages = (ib_umem_page_count(hr_qp->umem) +
649 				  (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
650 				 (1 << hr_dev->caps.mtt_buf_pg_sz);
651 			page_shift += hr_dev->caps.mtt_buf_pg_sz;
652 			ret = hns_roce_mtt_init(hr_dev, npages,
653 				    page_shift,
654 				    &hr_qp->mtt);
655 		} else {
656 			ret = hns_roce_mtt_init(hr_dev,
657 						ib_umem_page_count(hr_qp->umem),
658 						page_shift, &hr_qp->mtt);
659 		}
660 		if (ret) {
661 			dev_err(dev, "hns_roce_mtt_init error for create qp\n");
662 			goto err_buf;
663 		}
664 
665 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
666 						 hr_qp->umem);
667 		if (ret) {
668 			dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
669 			goto err_mtt;
670 		}
671 
672 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
673 		    (udata->inlen >= sizeof(ucmd)) &&
674 		    (udata->outlen >= sizeof(resp)) &&
675 		    hns_roce_qp_has_sq(init_attr)) {
676 			ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
677 						   &hr_qp->sdb);
678 			if (ret) {
679 				dev_err(dev, "sq record doorbell map failed!\n");
680 				goto err_mtt;
681 			}
682 
683 			/* indicate kernel supports sq record db */
684 			resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
685 			hr_qp->sdb_en = 1;
686 		}
687 
688 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
689 		    (udata->outlen >= sizeof(resp)) &&
690 		    hns_roce_qp_has_rq(init_attr)) {
691 			ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
692 						   &hr_qp->rdb);
693 			if (ret) {
694 				dev_err(dev, "rq record doorbell map failed!\n");
695 				goto err_sq_dbmap;
696 			}
697 
698 			/* indicate kernel supports rq record db */
699 			resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
700 			hr_qp->rdb_en = 1;
701 		}
702 	} else {
703 		if (init_attr->create_flags &
704 		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
705 			dev_err(dev, "init_attr->create_flags error!\n");
706 			ret = -EINVAL;
707 			goto err_rq_sge_list;
708 		}
709 
710 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
711 			dev_err(dev, "init_attr->create_flags error!\n");
712 			ret = -EINVAL;
713 			goto err_rq_sge_list;
714 		}
715 
716 		/* Set SQ size */
717 		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
718 						  hr_qp);
719 		if (ret) {
720 			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
721 			goto err_rq_sge_list;
722 		}
723 
724 		/* QP doorbell register address */
725 		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
726 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
727 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
728 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
729 
730 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
731 		    hns_roce_qp_has_rq(init_attr)) {
732 			ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
733 			if (ret) {
734 				dev_err(dev, "rq record doorbell alloc failed!\n");
735 				goto err_rq_sge_list;
736 			}
737 			*hr_qp->rdb.db_record = 0;
738 			hr_qp->rdb_en = 1;
739 		}
740 
741 		/* Allocate QP buf */
742 		page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
743 		if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
744 				       (1 << page_shift) * 2,
745 				       &hr_qp->hr_buf, page_shift)) {
746 			dev_err(dev, "hns_roce_buf_alloc error!\n");
747 			ret = -ENOMEM;
748 			goto err_db;
749 		}
750 
751 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
752 		/* Write MTT */
753 		ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
754 					hr_qp->hr_buf.page_shift, &hr_qp->mtt);
755 		if (ret) {
756 			dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
757 			goto err_buf;
758 		}
759 
760 		ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
761 					     &hr_qp->hr_buf);
762 		if (ret) {
763 			dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
764 			goto err_mtt;
765 		}
766 
767 		hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
768 					 GFP_KERNEL);
769 		hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
770 					 GFP_KERNEL);
771 		if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
772 			ret = -ENOMEM;
773 			goto err_wrid;
774 		}
775 	}
776 
777 	if (sqpn) {
778 		qpn = sqpn;
779 	} else {
780 		/* Get QPN */
781 		ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
782 		if (ret) {
783 			dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
784 			goto err_wrid;
785 		}
786 	}
787 
788 	if (init_attr->qp_type == IB_QPT_GSI &&
789 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
790 		/* In v1 engine, GSI QP context in RoCE engine's register */
791 		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
792 		if (ret) {
793 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
794 			goto err_qpn;
795 		}
796 	} else {
797 		ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
798 		if (ret) {
799 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
800 			goto err_qpn;
801 		}
802 	}
803 
804 	if (sqpn)
805 		hr_qp->doorbell_qpn = 1;
806 	else
807 		hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
808 
809 	if (udata) {
810 		ret = ib_copy_to_udata(udata, &resp,
811 				       min(udata->outlen, sizeof(resp)));
812 		if (ret)
813 			goto err_qp;
814 	}
815 
816 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
817 		ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
818 		if (ret)
819 			goto err_qp;
820 	}
821 
822 	hr_qp->event = hns_roce_ib_qp_event;
823 
824 	return 0;
825 
826 err_qp:
827 	if (init_attr->qp_type == IB_QPT_GSI &&
828 		hr_dev->hw_rev == HNS_ROCE_HW_VER1)
829 		hns_roce_qp_remove(hr_dev, hr_qp);
830 	else
831 		hns_roce_qp_free(hr_dev, hr_qp);
832 
833 err_qpn:
834 	if (!sqpn)
835 		hns_roce_release_range_qp(hr_dev, qpn, 1);
836 
837 err_wrid:
838 	if (udata) {
839 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
840 		    (udata->outlen >= sizeof(resp)) &&
841 		    hns_roce_qp_has_rq(init_attr))
842 			hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
843 	} else {
844 		kfree(hr_qp->sq.wrid);
845 		kfree(hr_qp->rq.wrid);
846 	}
847 
848 err_sq_dbmap:
849 	if (udata)
850 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
851 		    (udata->inlen >= sizeof(ucmd)) &&
852 		    (udata->outlen >= sizeof(resp)) &&
853 		    hns_roce_qp_has_sq(init_attr))
854 			hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
855 
856 err_mtt:
857 	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
858 
859 err_buf:
860 	if (hr_qp->umem)
861 		ib_umem_release(hr_qp->umem);
862 	else
863 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
864 
865 err_db:
866 	if (!udata && hns_roce_qp_has_rq(init_attr) &&
867 	    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
868 		hns_roce_free_db(hr_dev, &hr_qp->rdb);
869 
870 err_rq_sge_list:
871 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
872 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
873 
874 err_wqe_list:
875 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
876 		kfree(hr_qp->rq_inl_buf.wqe_list);
877 
878 err_out:
879 	return ret;
880 }
881 
882 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
883 				 struct ib_qp_init_attr *init_attr,
884 				 struct ib_udata *udata)
885 {
886 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
887 	struct device *dev = hr_dev->dev;
888 	struct hns_roce_sqp *hr_sqp;
889 	struct hns_roce_qp *hr_qp;
890 	int ret;
891 
892 	switch (init_attr->qp_type) {
893 	case IB_QPT_RC: {
894 		hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
895 		if (!hr_qp)
896 			return ERR_PTR(-ENOMEM);
897 
898 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
899 						hr_qp);
900 		if (ret) {
901 			dev_err(dev, "Create RC QP failed\n");
902 			kfree(hr_qp);
903 			return ERR_PTR(ret);
904 		}
905 
906 		hr_qp->ibqp.qp_num = hr_qp->qpn;
907 
908 		break;
909 	}
910 	case IB_QPT_GSI: {
911 		/* Userspace is not allowed to create special QPs: */
912 		if (udata) {
913 			dev_err(dev, "not support usr space GSI\n");
914 			return ERR_PTR(-EINVAL);
915 		}
916 
917 		hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
918 		if (!hr_sqp)
919 			return ERR_PTR(-ENOMEM);
920 
921 		hr_qp = &hr_sqp->hr_qp;
922 		hr_qp->port = init_attr->port_num - 1;
923 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
924 
925 		/* when hw version is v1, the sqpn is allocated */
926 		if (hr_dev->caps.max_sq_sg <= 2)
927 			hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
928 					     hr_dev->iboe.phy_port[hr_qp->port];
929 		else
930 			hr_qp->ibqp.qp_num = 1;
931 
932 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
933 						hr_qp->ibqp.qp_num, hr_qp);
934 		if (ret) {
935 			dev_err(dev, "Create GSI QP failed!\n");
936 			kfree(hr_sqp);
937 			return ERR_PTR(ret);
938 		}
939 
940 		break;
941 	}
942 	default:{
943 		dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
944 		return ERR_PTR(-EINVAL);
945 	}
946 	}
947 
948 	return &hr_qp->ibqp;
949 }
950 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
951 
952 int to_hr_qp_type(int qp_type)
953 {
954 	int transport_type;
955 
956 	if (qp_type == IB_QPT_RC)
957 		transport_type = SERV_TYPE_RC;
958 	else if (qp_type == IB_QPT_UC)
959 		transport_type = SERV_TYPE_UC;
960 	else if (qp_type == IB_QPT_UD)
961 		transport_type = SERV_TYPE_UD;
962 	else if (qp_type == IB_QPT_GSI)
963 		transport_type = SERV_TYPE_UD;
964 	else
965 		transport_type = -1;
966 
967 	return transport_type;
968 }
969 EXPORT_SYMBOL_GPL(to_hr_qp_type);
970 
971 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
972 		       int attr_mask, struct ib_udata *udata)
973 {
974 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
975 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
976 	enum ib_qp_state cur_state, new_state;
977 	struct device *dev = hr_dev->dev;
978 	int ret = -EINVAL;
979 	int p;
980 	enum ib_mtu active_mtu;
981 
982 	mutex_lock(&hr_qp->mutex);
983 
984 	cur_state = attr_mask & IB_QP_CUR_STATE ?
985 		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
986 	new_state = attr_mask & IB_QP_STATE ?
987 		    attr->qp_state : cur_state;
988 
989 	if (ibqp->uobject &&
990 	    (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
991 		if (hr_qp->sdb_en == 1) {
992 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
993 
994 			if (hr_qp->rdb_en == 1)
995 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
996 		} else {
997 			dev_warn(dev, "flush cqe is not supported in userspace!\n");
998 			goto out;
999 		}
1000 	}
1001 
1002 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1003 				attr_mask)) {
1004 		dev_err(dev, "ib_modify_qp_is_ok failed\n");
1005 		goto out;
1006 	}
1007 
1008 	if ((attr_mask & IB_QP_PORT) &&
1009 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1010 		dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
1011 			attr->port_num);
1012 		goto out;
1013 	}
1014 
1015 	if (attr_mask & IB_QP_PKEY_INDEX) {
1016 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1017 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1018 			dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
1019 				attr->pkey_index);
1020 			goto out;
1021 		}
1022 	}
1023 
1024 	if (attr_mask & IB_QP_PATH_MTU) {
1025 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1026 		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1027 
1028 		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
1029 		    attr->path_mtu > IB_MTU_4096) ||
1030 		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
1031 		    attr->path_mtu > IB_MTU_2048) ||
1032 		    attr->path_mtu < IB_MTU_256 ||
1033 		    attr->path_mtu > active_mtu) {
1034 			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
1035 				attr->path_mtu);
1036 			goto out;
1037 		}
1038 	}
1039 
1040 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1041 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1042 		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1043 			attr->max_rd_atomic);
1044 		goto out;
1045 	}
1046 
1047 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1048 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1049 		dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1050 			attr->max_dest_rd_atomic);
1051 		goto out;
1052 	}
1053 
1054 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1055 		if (hr_dev->caps.min_wqes) {
1056 			ret = -EPERM;
1057 			dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
1058 				new_state);
1059 		} else {
1060 			ret = 0;
1061 		}
1062 
1063 		goto out;
1064 	}
1065 
1066 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1067 				    new_state);
1068 
1069 out:
1070 	mutex_unlock(&hr_qp->mutex);
1071 
1072 	return ret;
1073 }
1074 
1075 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1076 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1077 {
1078 	if (send_cq == recv_cq) {
1079 		spin_lock_irq(&send_cq->lock);
1080 		__acquire(&recv_cq->lock);
1081 	} else if (send_cq->cqn < recv_cq->cqn) {
1082 		spin_lock_irq(&send_cq->lock);
1083 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1084 	} else {
1085 		spin_lock_irq(&recv_cq->lock);
1086 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1087 	}
1088 }
1089 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
1090 
1091 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1092 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1093 			 __releases(&recv_cq->lock)
1094 {
1095 	if (send_cq == recv_cq) {
1096 		__release(&recv_cq->lock);
1097 		spin_unlock_irq(&send_cq->lock);
1098 	} else if (send_cq->cqn < recv_cq->cqn) {
1099 		spin_unlock(&recv_cq->lock);
1100 		spin_unlock_irq(&send_cq->lock);
1101 	} else {
1102 		spin_unlock(&send_cq->lock);
1103 		spin_unlock_irq(&recv_cq->lock);
1104 	}
1105 }
1106 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1107 
1108 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1109 {
1110 
1111 	return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1112 }
1113 
1114 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1115 {
1116 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1117 }
1118 EXPORT_SYMBOL_GPL(get_recv_wqe);
1119 
1120 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1121 {
1122 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1123 }
1124 EXPORT_SYMBOL_GPL(get_send_wqe);
1125 
1126 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1127 {
1128 	return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1129 					(n << hr_qp->sge.sge_shift));
1130 }
1131 EXPORT_SYMBOL_GPL(get_send_extend_sge);
1132 
1133 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1134 			  struct ib_cq *ib_cq)
1135 {
1136 	struct hns_roce_cq *hr_cq;
1137 	u32 cur;
1138 
1139 	cur = hr_wq->head - hr_wq->tail;
1140 	if (likely(cur + nreq < hr_wq->max_post))
1141 		return false;
1142 
1143 	hr_cq = to_hr_cq(ib_cq);
1144 	spin_lock(&hr_cq->lock);
1145 	cur = hr_wq->head - hr_wq->tail;
1146 	spin_unlock(&hr_cq->lock);
1147 
1148 	return cur + nreq >= hr_wq->max_post;
1149 }
1150 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1151 
1152 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1153 {
1154 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1155 	int reserved_from_top = 0;
1156 	int reserved_from_bot;
1157 	int ret;
1158 
1159 	mutex_init(&qp_table->scc_mutex);
1160 	spin_lock_init(&qp_table->lock);
1161 	INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1162 
1163 	/* In hw v1, a port include two SQP, six ports total 12 */
1164 	if (hr_dev->caps.max_sq_sg <= 2)
1165 		reserved_from_bot = SQP_NUM;
1166 	else
1167 		reserved_from_bot = hr_dev->caps.reserved_qps;
1168 
1169 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1170 				   hr_dev->caps.num_qps - 1, reserved_from_bot,
1171 				   reserved_from_top);
1172 	if (ret) {
1173 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1174 			ret);
1175 		return ret;
1176 	}
1177 
1178 	return 0;
1179 }
1180 
1181 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1182 {
1183 	hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1184 }
1185