xref: /openbmc/linux/drivers/infiniband/hw/hns/hns_roce_qp.c (revision 8631f940b81bf0da3d375fce166d381fa8c47bb2)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include "hns_roce_common.h"
39 #include "hns_roce_device.h"
40 #include "hns_roce_hem.h"
41 #include <rdma/hns-abi.h>
42 
43 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
44 
45 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
46 {
47 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
48 	struct device *dev = hr_dev->dev;
49 	struct hns_roce_qp *qp;
50 
51 	spin_lock(&qp_table->lock);
52 
53 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
54 	if (qp)
55 		atomic_inc(&qp->refcount);
56 
57 	spin_unlock(&qp_table->lock);
58 
59 	if (!qp) {
60 		dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
61 		return;
62 	}
63 
64 	qp->event(qp, (enum hns_roce_event)event_type);
65 
66 	if (atomic_dec_and_test(&qp->refcount))
67 		complete(&qp->free);
68 }
69 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
70 
71 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
72 				 enum hns_roce_event type)
73 {
74 	struct ib_event event;
75 	struct ib_qp *ibqp = &hr_qp->ibqp;
76 
77 	if (ibqp->event_handler) {
78 		event.device = ibqp->device;
79 		event.element.qp = ibqp;
80 		switch (type) {
81 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
82 			event.event = IB_EVENT_PATH_MIG;
83 			break;
84 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
85 			event.event = IB_EVENT_COMM_EST;
86 			break;
87 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
88 			event.event = IB_EVENT_SQ_DRAINED;
89 			break;
90 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
91 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
92 			break;
93 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
94 			event.event = IB_EVENT_QP_FATAL;
95 			break;
96 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
97 			event.event = IB_EVENT_PATH_MIG_ERR;
98 			break;
99 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
100 			event.event = IB_EVENT_QP_REQ_ERR;
101 			break;
102 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
103 			event.event = IB_EVENT_QP_ACCESS_ERR;
104 			break;
105 		default:
106 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
107 				type, hr_qp->qpn);
108 			return;
109 		}
110 		ibqp->event_handler(&event, ibqp->qp_context);
111 	}
112 }
113 
114 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
115 				     int align, unsigned long *base)
116 {
117 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
118 
119 	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
120 					   base) ?
121 		       -ENOMEM :
122 		       0;
123 }
124 
125 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
126 {
127 	switch (state) {
128 	case IB_QPS_RESET:
129 		return HNS_ROCE_QP_STATE_RST;
130 	case IB_QPS_INIT:
131 		return HNS_ROCE_QP_STATE_INIT;
132 	case IB_QPS_RTR:
133 		return HNS_ROCE_QP_STATE_RTR;
134 	case IB_QPS_RTS:
135 		return HNS_ROCE_QP_STATE_RTS;
136 	case IB_QPS_SQD:
137 		return HNS_ROCE_QP_STATE_SQD;
138 	case IB_QPS_ERR:
139 		return HNS_ROCE_QP_STATE_ERR;
140 	default:
141 		return HNS_ROCE_QP_NUM_STATE;
142 	}
143 }
144 EXPORT_SYMBOL_GPL(to_hns_roce_state);
145 
146 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
147 				 struct hns_roce_qp *hr_qp)
148 {
149 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
150 	int ret;
151 
152 	if (!qpn)
153 		return -EINVAL;
154 
155 	hr_qp->qpn = qpn;
156 
157 	spin_lock_irq(&qp_table->lock);
158 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
159 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
160 	spin_unlock_irq(&qp_table->lock);
161 	if (ret) {
162 		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
163 		goto err_put_irrl;
164 	}
165 
166 	atomic_set(&hr_qp->refcount, 1);
167 	init_completion(&hr_qp->free);
168 
169 	return 0;
170 
171 err_put_irrl:
172 
173 	return ret;
174 }
175 
176 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
177 			     struct hns_roce_qp *hr_qp)
178 {
179 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
180 	struct device *dev = hr_dev->dev;
181 	int ret;
182 
183 	if (!qpn)
184 		return -EINVAL;
185 
186 	hr_qp->qpn = qpn;
187 
188 	/* Alloc memory for QPC */
189 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
190 	if (ret) {
191 		dev_err(dev, "QPC table get failed\n");
192 		goto err_out;
193 	}
194 
195 	/* Alloc memory for IRRL */
196 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
197 	if (ret) {
198 		dev_err(dev, "IRRL table get failed\n");
199 		goto err_put_qp;
200 	}
201 
202 	if (hr_dev->caps.trrl_entry_sz) {
203 		/* Alloc memory for TRRL */
204 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
205 					 hr_qp->qpn);
206 		if (ret) {
207 			dev_err(dev, "TRRL table get failed\n");
208 			goto err_put_irrl;
209 		}
210 	}
211 
212 	spin_lock_irq(&qp_table->lock);
213 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
214 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
215 	spin_unlock_irq(&qp_table->lock);
216 	if (ret) {
217 		dev_err(dev, "QPC radix_tree_insert failed\n");
218 		goto err_put_trrl;
219 	}
220 
221 	atomic_set(&hr_qp->refcount, 1);
222 	init_completion(&hr_qp->free);
223 
224 	return 0;
225 
226 err_put_trrl:
227 	if (hr_dev->caps.trrl_entry_sz)
228 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
229 
230 err_put_irrl:
231 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
232 
233 err_put_qp:
234 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
235 
236 err_out:
237 	return ret;
238 }
239 
240 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
241 {
242 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
243 	unsigned long flags;
244 
245 	spin_lock_irqsave(&qp_table->lock, flags);
246 	radix_tree_delete(&hr_dev->qp_table_tree,
247 			  hr_qp->qpn & (hr_dev->caps.num_qps - 1));
248 	spin_unlock_irqrestore(&qp_table->lock, flags);
249 }
250 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
251 
252 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
253 {
254 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
255 
256 	if (atomic_dec_and_test(&hr_qp->refcount))
257 		complete(&hr_qp->free);
258 	wait_for_completion(&hr_qp->free);
259 
260 	if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
261 		if (hr_dev->caps.trrl_entry_sz)
262 			hns_roce_table_put(hr_dev, &qp_table->trrl_table,
263 					   hr_qp->qpn);
264 		hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
265 		hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
266 	}
267 }
268 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
269 
270 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
271 			       int cnt)
272 {
273 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
274 
275 	if (base_qpn < SQP_NUM)
276 		return;
277 
278 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
279 }
280 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
281 
282 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
283 				struct ib_qp_cap *cap, bool is_user, int has_rq,
284 				struct hns_roce_qp *hr_qp)
285 {
286 	struct device *dev = hr_dev->dev;
287 	u32 max_cnt;
288 
289 	/* Check the validity of QP support capacity */
290 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
291 	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
292 		dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
293 			cap->max_recv_wr, cap->max_recv_sge);
294 		return -EINVAL;
295 	}
296 
297 	/* If srq exist, set zero for relative number of rq */
298 	if (!has_rq) {
299 		hr_qp->rq.wqe_cnt = 0;
300 		hr_qp->rq.max_gs = 0;
301 		cap->max_recv_wr = 0;
302 		cap->max_recv_sge = 0;
303 	} else {
304 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
305 			dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
306 			return -EINVAL;
307 		}
308 
309 		if (hr_dev->caps.min_wqes)
310 			max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
311 		else
312 			max_cnt = cap->max_recv_wr;
313 
314 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
315 
316 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
317 			dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
318 			return -EINVAL;
319 		}
320 
321 		max_cnt = max(1U, cap->max_recv_sge);
322 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
323 		if (hr_dev->caps.max_rq_sg <= 2)
324 			hr_qp->rq.wqe_shift =
325 					ilog2(hr_dev->caps.max_rq_desc_sz);
326 		else
327 			hr_qp->rq.wqe_shift =
328 					ilog2(hr_dev->caps.max_rq_desc_sz
329 					      * hr_qp->rq.max_gs);
330 	}
331 
332 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
333 	cap->max_recv_sge = hr_qp->rq.max_gs;
334 
335 	return 0;
336 }
337 
338 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
339 				     struct ib_qp_cap *cap,
340 				     struct hns_roce_qp *hr_qp,
341 				     struct hns_roce_ib_create_qp *ucmd)
342 {
343 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
344 	u8 max_sq_stride = ilog2(roundup_sq_stride);
345 	u32 ex_sge_num;
346 	u32 page_size;
347 	u32 max_cnt;
348 
349 	/* Sanity check SQ size before proceeding */
350 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
351 	     ucmd->log_sq_stride > max_sq_stride ||
352 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
353 		dev_err(hr_dev->dev, "check SQ size error!\n");
354 		return -EINVAL;
355 	}
356 
357 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
358 		dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
359 			cap->max_send_sge);
360 		return -EINVAL;
361 	}
362 
363 	hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
364 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
365 
366 	max_cnt = max(1U, cap->max_send_sge);
367 	if (hr_dev->caps.max_sq_sg <= 2)
368 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
369 	else
370 		hr_qp->sq.max_gs = max_cnt;
371 
372 	if (hr_qp->sq.max_gs > 2)
373 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
374 							(hr_qp->sq.max_gs - 2));
375 
376 	if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
377 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
378 			dev_err(hr_dev->dev,
379 				"The extended sge cnt error! sge_cnt=%d\n",
380 				hr_qp->sge.sge_cnt);
381 			return -EINVAL;
382 		}
383 	}
384 
385 	hr_qp->sge.sge_shift = 4;
386 	ex_sge_num = hr_qp->sge.sge_cnt;
387 
388 	/* Get buf size, SQ and RQ  are aligned to page_szie */
389 	if (hr_dev->caps.max_sq_sg <= 2) {
390 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
391 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
392 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
393 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
394 
395 		hr_qp->sq.offset = 0;
396 		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
397 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
398 	} else {
399 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
400 		hr_qp->sge.sge_cnt =
401 		       max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
402 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
403 					     hr_qp->rq.wqe_shift), page_size) +
404 				   HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
405 					     hr_qp->sge.sge_shift), page_size) +
406 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
407 					     hr_qp->sq.wqe_shift), page_size);
408 
409 		hr_qp->sq.offset = 0;
410 		if (ex_sge_num) {
411 			hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
412 							(hr_qp->sq.wqe_cnt <<
413 							hr_qp->sq.wqe_shift),
414 							page_size);
415 			hr_qp->rq.offset = hr_qp->sge.offset +
416 					HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
417 						hr_qp->sge.sge_shift),
418 						page_size);
419 		} else {
420 			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
421 							(hr_qp->sq.wqe_cnt <<
422 							hr_qp->sq.wqe_shift),
423 							page_size);
424 		}
425 	}
426 
427 	return 0;
428 }
429 
430 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
431 				       struct ib_qp_cap *cap,
432 				       struct hns_roce_qp *hr_qp)
433 {
434 	struct device *dev = hr_dev->dev;
435 	u32 page_size;
436 	u32 max_cnt;
437 	int size;
438 
439 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
440 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
441 	    cap->max_inline_data > hr_dev->caps.max_sq_inline) {
442 		dev_err(dev, "SQ WR or sge or inline data error!\n");
443 		return -EINVAL;
444 	}
445 
446 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
447 	hr_qp->sq_max_wqes_per_wr = 1;
448 	hr_qp->sq_spare_wqes = 0;
449 
450 	if (hr_dev->caps.min_wqes)
451 		max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
452 	else
453 		max_cnt = cap->max_send_wr;
454 
455 	hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
456 	if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
457 		dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
458 		return -EINVAL;
459 	}
460 
461 	/* Get data_seg numbers */
462 	max_cnt = max(1U, cap->max_send_sge);
463 	if (hr_dev->caps.max_sq_sg <= 2)
464 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
465 	else
466 		hr_qp->sq.max_gs = max_cnt;
467 
468 	if (hr_qp->sq.max_gs > 2) {
469 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
470 				     (hr_qp->sq.max_gs - 2));
471 		hr_qp->sge.sge_shift = 4;
472 	}
473 
474 	/* ud sqwqe's sge use extend sge */
475 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
476 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
477 				     hr_qp->sq.max_gs);
478 		hr_qp->sge.sge_shift = 4;
479 	}
480 
481 	if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
482 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
483 			dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
484 				hr_qp->sge.sge_cnt);
485 			return -EINVAL;
486 		}
487 	}
488 
489 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
490 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
491 	hr_qp->sq.offset = 0;
492 	size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
493 				 page_size);
494 
495 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
496 		hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
497 					(u32)hr_qp->sge.sge_cnt);
498 		hr_qp->sge.offset = size;
499 		size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
500 					  hr_qp->sge.sge_shift, page_size);
501 	}
502 
503 	hr_qp->rq.offset = size;
504 	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
505 				  page_size);
506 	hr_qp->buff_size = size;
507 
508 	/* Get wr and sge number which send */
509 	cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
510 	cap->max_send_sge = hr_qp->sq.max_gs;
511 
512 	/* We don't support inline sends for kernel QPs (yet) */
513 	cap->max_inline_data = 0;
514 
515 	return 0;
516 }
517 
518 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
519 {
520 	if (attr->qp_type == IB_QPT_XRC_TGT)
521 		return 0;
522 
523 	return 1;
524 }
525 
526 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
527 {
528 	if (attr->qp_type == IB_QPT_XRC_INI ||
529 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
530 		return 0;
531 
532 	return 1;
533 }
534 
535 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
536 				     struct ib_pd *ib_pd,
537 				     struct ib_qp_init_attr *init_attr,
538 				     struct ib_udata *udata, unsigned long sqpn,
539 				     struct hns_roce_qp *hr_qp)
540 {
541 	struct device *dev = hr_dev->dev;
542 	struct hns_roce_ib_create_qp ucmd;
543 	struct hns_roce_ib_create_qp_resp resp = {};
544 	unsigned long qpn = 0;
545 	int ret = 0;
546 	u32 page_shift;
547 	u32 npages;
548 	int i;
549 
550 	mutex_init(&hr_qp->mutex);
551 	spin_lock_init(&hr_qp->sq.lock);
552 	spin_lock_init(&hr_qp->rq.lock);
553 
554 	hr_qp->state = IB_QPS_RESET;
555 
556 	hr_qp->ibqp.qp_type = init_attr->qp_type;
557 
558 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
559 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
560 	else
561 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
562 
563 	ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
564 				   hns_roce_qp_has_rq(init_attr), hr_qp);
565 	if (ret) {
566 		dev_err(dev, "hns_roce_set_rq_size failed\n");
567 		goto err_out;
568 	}
569 
570 	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
571 	    hns_roce_qp_has_rq(init_attr)) {
572 		/* allocate recv inline buf */
573 		hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
574 					       sizeof(struct hns_roce_rinl_wqe),
575 					       GFP_KERNEL);
576 		if (!hr_qp->rq_inl_buf.wqe_list) {
577 			ret = -ENOMEM;
578 			goto err_out;
579 		}
580 
581 		hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
582 
583 		/* Firstly, allocate a list of sge space buffer */
584 		hr_qp->rq_inl_buf.wqe_list[0].sg_list =
585 					kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
586 					       init_attr->cap.max_recv_sge *
587 					       sizeof(struct hns_roce_rinl_sge),
588 					       GFP_KERNEL);
589 		if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
590 			ret = -ENOMEM;
591 			goto err_wqe_list;
592 		}
593 
594 		for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
595 			/* Secondly, reallocate the buffer */
596 			hr_qp->rq_inl_buf.wqe_list[i].sg_list =
597 				&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
598 				init_attr->cap.max_recv_sge];
599 	}
600 
601 	if (udata) {
602 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
603 			dev_err(dev, "ib_copy_from_udata error for create qp\n");
604 			ret = -EFAULT;
605 			goto err_rq_sge_list;
606 		}
607 
608 		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
609 						&ucmd);
610 		if (ret) {
611 			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
612 			goto err_rq_sge_list;
613 		}
614 
615 		hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
616 					  ucmd.buf_addr, hr_qp->buff_size, 0,
617 					  0);
618 		if (IS_ERR(hr_qp->umem)) {
619 			dev_err(dev, "ib_umem_get error for create qp\n");
620 			ret = PTR_ERR(hr_qp->umem);
621 			goto err_rq_sge_list;
622 		}
623 
624 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
625 		if (hr_dev->caps.mtt_buf_pg_sz) {
626 			npages = (ib_umem_page_count(hr_qp->umem) +
627 				  (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
628 				  (1 << hr_dev->caps.mtt_buf_pg_sz);
629 			page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
630 			ret = hns_roce_mtt_init(hr_dev, npages,
631 				    page_shift,
632 				    &hr_qp->mtt);
633 		} else {
634 			ret = hns_roce_mtt_init(hr_dev,
635 				    ib_umem_page_count(hr_qp->umem),
636 				    hr_qp->umem->page_shift,
637 				    &hr_qp->mtt);
638 		}
639 		if (ret) {
640 			dev_err(dev, "hns_roce_mtt_init error for create qp\n");
641 			goto err_buf;
642 		}
643 
644 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
645 						 hr_qp->umem);
646 		if (ret) {
647 			dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
648 			goto err_mtt;
649 		}
650 
651 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
652 		    (udata->inlen >= sizeof(ucmd)) &&
653 		    (udata->outlen >= sizeof(resp)) &&
654 		    hns_roce_qp_has_sq(init_attr)) {
655 			ret = hns_roce_db_map_user(
656 					to_hr_ucontext(ib_pd->uobject->context),
657 					ucmd.sdb_addr, &hr_qp->sdb);
658 			if (ret) {
659 				dev_err(dev, "sq record doorbell map failed!\n");
660 				goto err_mtt;
661 			}
662 
663 			/* indicate kernel supports sq record db */
664 			resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
665 			hr_qp->sdb_en = 1;
666 		}
667 
668 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
669 		    (udata->outlen >= sizeof(resp)) &&
670 		    hns_roce_qp_has_rq(init_attr)) {
671 			ret = hns_roce_db_map_user(
672 					to_hr_ucontext(ib_pd->uobject->context),
673 					ucmd.db_addr, &hr_qp->rdb);
674 			if (ret) {
675 				dev_err(dev, "rq record doorbell map failed!\n");
676 				goto err_sq_dbmap;
677 			}
678 		}
679 	} else {
680 		if (init_attr->create_flags &
681 		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
682 			dev_err(dev, "init_attr->create_flags error!\n");
683 			ret = -EINVAL;
684 			goto err_rq_sge_list;
685 		}
686 
687 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
688 			dev_err(dev, "init_attr->create_flags error!\n");
689 			ret = -EINVAL;
690 			goto err_rq_sge_list;
691 		}
692 
693 		/* Set SQ size */
694 		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
695 						  hr_qp);
696 		if (ret) {
697 			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
698 			goto err_rq_sge_list;
699 		}
700 
701 		/* QP doorbell register address */
702 		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
703 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
704 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
705 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
706 
707 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
708 		    hns_roce_qp_has_rq(init_attr)) {
709 			ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
710 			if (ret) {
711 				dev_err(dev, "rq record doorbell alloc failed!\n");
712 				goto err_rq_sge_list;
713 			}
714 			*hr_qp->rdb.db_record = 0;
715 			hr_qp->rdb_en = 1;
716 		}
717 
718 		/* Allocate QP buf */
719 		page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
720 		if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
721 				       (1 << page_shift) * 2,
722 				       &hr_qp->hr_buf, page_shift)) {
723 			dev_err(dev, "hns_roce_buf_alloc error!\n");
724 			ret = -ENOMEM;
725 			goto err_db;
726 		}
727 
728 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
729 		/* Write MTT */
730 		ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
731 					hr_qp->hr_buf.page_shift, &hr_qp->mtt);
732 		if (ret) {
733 			dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
734 			goto err_buf;
735 		}
736 
737 		ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
738 					     &hr_qp->hr_buf);
739 		if (ret) {
740 			dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
741 			goto err_mtt;
742 		}
743 
744 		hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
745 					       GFP_KERNEL);
746 		hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
747 					       GFP_KERNEL);
748 		if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
749 			ret = -ENOMEM;
750 			goto err_wrid;
751 		}
752 	}
753 
754 	if (sqpn) {
755 		qpn = sqpn;
756 	} else {
757 		/* Get QPN */
758 		ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
759 		if (ret) {
760 			dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
761 			goto err_wrid;
762 		}
763 	}
764 
765 	if (init_attr->qp_type == IB_QPT_GSI &&
766 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
767 		/* In v1 engine, GSI QP context in RoCE engine's register */
768 		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
769 		if (ret) {
770 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
771 			goto err_qpn;
772 		}
773 	} else {
774 		ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
775 		if (ret) {
776 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
777 			goto err_qpn;
778 		}
779 	}
780 
781 	if (sqpn)
782 		hr_qp->doorbell_qpn = 1;
783 	else
784 		hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
785 
786 	if (udata && (udata->outlen >= sizeof(resp)) &&
787 		(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
788 
789 		/* indicate kernel supports rq record db */
790 		resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
791 		ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
792 		if (ret)
793 			goto err_qp;
794 
795 		hr_qp->rdb_en = 1;
796 	}
797 	hr_qp->event = hns_roce_ib_qp_event;
798 
799 	return 0;
800 
801 err_qp:
802 	if (init_attr->qp_type == IB_QPT_GSI &&
803 		hr_dev->hw_rev == HNS_ROCE_HW_VER1)
804 		hns_roce_qp_remove(hr_dev, hr_qp);
805 	else
806 		hns_roce_qp_free(hr_dev, hr_qp);
807 
808 err_qpn:
809 	if (!sqpn)
810 		hns_roce_release_range_qp(hr_dev, qpn, 1);
811 
812 err_wrid:
813 	if (udata) {
814 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
815 		    (udata->outlen >= sizeof(resp)) &&
816 		    hns_roce_qp_has_rq(init_attr))
817 			hns_roce_db_unmap_user(
818 					to_hr_ucontext(ib_pd->uobject->context),
819 					&hr_qp->rdb);
820 	} else {
821 		kfree(hr_qp->sq.wrid);
822 		kfree(hr_qp->rq.wrid);
823 	}
824 
825 err_sq_dbmap:
826 	if (udata)
827 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
828 		    (udata->inlen >= sizeof(ucmd)) &&
829 		    (udata->outlen >= sizeof(resp)) &&
830 		    hns_roce_qp_has_sq(init_attr))
831 			hns_roce_db_unmap_user(
832 					to_hr_ucontext(ib_pd->uobject->context),
833 					&hr_qp->sdb);
834 
835 err_mtt:
836 	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
837 
838 err_buf:
839 	if (hr_qp->umem)
840 		ib_umem_release(hr_qp->umem);
841 	else
842 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
843 
844 err_db:
845 	if (!udata && hns_roce_qp_has_rq(init_attr) &&
846 	    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
847 		hns_roce_free_db(hr_dev, &hr_qp->rdb);
848 
849 err_rq_sge_list:
850 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
851 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
852 
853 err_wqe_list:
854 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
855 		kfree(hr_qp->rq_inl_buf.wqe_list);
856 
857 err_out:
858 	return ret;
859 }
860 
861 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
862 				 struct ib_qp_init_attr *init_attr,
863 				 struct ib_udata *udata)
864 {
865 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
866 	struct device *dev = hr_dev->dev;
867 	struct hns_roce_sqp *hr_sqp;
868 	struct hns_roce_qp *hr_qp;
869 	int ret;
870 
871 	switch (init_attr->qp_type) {
872 	case IB_QPT_RC: {
873 		hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
874 		if (!hr_qp)
875 			return ERR_PTR(-ENOMEM);
876 
877 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
878 						hr_qp);
879 		if (ret) {
880 			dev_err(dev, "Create RC QP failed\n");
881 			kfree(hr_qp);
882 			return ERR_PTR(ret);
883 		}
884 
885 		hr_qp->ibqp.qp_num = hr_qp->qpn;
886 
887 		break;
888 	}
889 	case IB_QPT_GSI: {
890 		/* Userspace is not allowed to create special QPs: */
891 		if (udata) {
892 			dev_err(dev, "not support usr space GSI\n");
893 			return ERR_PTR(-EINVAL);
894 		}
895 
896 		hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
897 		if (!hr_sqp)
898 			return ERR_PTR(-ENOMEM);
899 
900 		hr_qp = &hr_sqp->hr_qp;
901 		hr_qp->port = init_attr->port_num - 1;
902 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
903 
904 		/* when hw version is v1, the sqpn is allocated */
905 		if (hr_dev->caps.max_sq_sg <= 2)
906 			hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
907 					     hr_dev->iboe.phy_port[hr_qp->port];
908 		else
909 			hr_qp->ibqp.qp_num = 1;
910 
911 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
912 						hr_qp->ibqp.qp_num, hr_qp);
913 		if (ret) {
914 			dev_err(dev, "Create GSI QP failed!\n");
915 			kfree(hr_sqp);
916 			return ERR_PTR(ret);
917 		}
918 
919 		break;
920 	}
921 	default:{
922 		dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
923 		return ERR_PTR(-EINVAL);
924 	}
925 	}
926 
927 	return &hr_qp->ibqp;
928 }
929 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
930 
931 int to_hr_qp_type(int qp_type)
932 {
933 	int transport_type;
934 
935 	if (qp_type == IB_QPT_RC)
936 		transport_type = SERV_TYPE_RC;
937 	else if (qp_type == IB_QPT_UC)
938 		transport_type = SERV_TYPE_UC;
939 	else if (qp_type == IB_QPT_UD)
940 		transport_type = SERV_TYPE_UD;
941 	else if (qp_type == IB_QPT_GSI)
942 		transport_type = SERV_TYPE_UD;
943 	else
944 		transport_type = -1;
945 
946 	return transport_type;
947 }
948 EXPORT_SYMBOL_GPL(to_hr_qp_type);
949 
950 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
951 		       int attr_mask, struct ib_udata *udata)
952 {
953 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
954 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
955 	enum ib_qp_state cur_state, new_state;
956 	struct device *dev = hr_dev->dev;
957 	int ret = -EINVAL;
958 	int p;
959 	enum ib_mtu active_mtu;
960 
961 	mutex_lock(&hr_qp->mutex);
962 
963 	cur_state = attr_mask & IB_QP_CUR_STATE ?
964 		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
965 	new_state = attr_mask & IB_QP_STATE ?
966 		    attr->qp_state : cur_state;
967 
968 	if (ibqp->uobject &&
969 	    (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
970 		if (hr_qp->sdb_en == 1) {
971 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
972 			hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
973 		} else {
974 			dev_warn(dev, "flush cqe is not supported in userspace!\n");
975 			goto out;
976 		}
977 	}
978 
979 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
980 				attr_mask)) {
981 		dev_err(dev, "ib_modify_qp_is_ok failed\n");
982 		goto out;
983 	}
984 
985 	if ((attr_mask & IB_QP_PORT) &&
986 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
987 		dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
988 			attr->port_num);
989 		goto out;
990 	}
991 
992 	if (attr_mask & IB_QP_PKEY_INDEX) {
993 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
994 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
995 			dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
996 				attr->pkey_index);
997 			goto out;
998 		}
999 	}
1000 
1001 	if (attr_mask & IB_QP_PATH_MTU) {
1002 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1003 		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1004 
1005 		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
1006 		    attr->path_mtu > IB_MTU_4096) ||
1007 		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
1008 		    attr->path_mtu > IB_MTU_2048) ||
1009 		    attr->path_mtu < IB_MTU_256 ||
1010 		    attr->path_mtu > active_mtu) {
1011 			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
1012 				attr->path_mtu);
1013 			goto out;
1014 		}
1015 	}
1016 
1017 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1018 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1019 		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1020 			attr->max_rd_atomic);
1021 		goto out;
1022 	}
1023 
1024 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1025 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1026 		dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1027 			attr->max_dest_rd_atomic);
1028 		goto out;
1029 	}
1030 
1031 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1032 		if (hr_dev->caps.min_wqes) {
1033 			ret = -EPERM;
1034 			dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
1035 				new_state);
1036 		} else {
1037 			ret = 0;
1038 		}
1039 
1040 		goto out;
1041 	}
1042 
1043 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1044 				    new_state);
1045 
1046 out:
1047 	mutex_unlock(&hr_qp->mutex);
1048 
1049 	return ret;
1050 }
1051 
1052 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1053 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1054 {
1055 	if (send_cq == recv_cq) {
1056 		spin_lock_irq(&send_cq->lock);
1057 		__acquire(&recv_cq->lock);
1058 	} else if (send_cq->cqn < recv_cq->cqn) {
1059 		spin_lock_irq(&send_cq->lock);
1060 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1061 	} else {
1062 		spin_lock_irq(&recv_cq->lock);
1063 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1064 	}
1065 }
1066 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
1067 
1068 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1069 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1070 			 __releases(&recv_cq->lock)
1071 {
1072 	if (send_cq == recv_cq) {
1073 		__release(&recv_cq->lock);
1074 		spin_unlock_irq(&send_cq->lock);
1075 	} else if (send_cq->cqn < recv_cq->cqn) {
1076 		spin_unlock(&recv_cq->lock);
1077 		spin_unlock_irq(&send_cq->lock);
1078 	} else {
1079 		spin_unlock(&send_cq->lock);
1080 		spin_unlock_irq(&recv_cq->lock);
1081 	}
1082 }
1083 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1084 
1085 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1086 {
1087 
1088 	return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1089 }
1090 
1091 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1092 {
1093 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1094 }
1095 EXPORT_SYMBOL_GPL(get_recv_wqe);
1096 
1097 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1098 {
1099 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1100 }
1101 EXPORT_SYMBOL_GPL(get_send_wqe);
1102 
1103 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1104 {
1105 	return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1106 					(n << hr_qp->sge.sge_shift));
1107 }
1108 EXPORT_SYMBOL_GPL(get_send_extend_sge);
1109 
1110 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1111 			  struct ib_cq *ib_cq)
1112 {
1113 	struct hns_roce_cq *hr_cq;
1114 	u32 cur;
1115 
1116 	cur = hr_wq->head - hr_wq->tail;
1117 	if (likely(cur + nreq < hr_wq->max_post))
1118 		return false;
1119 
1120 	hr_cq = to_hr_cq(ib_cq);
1121 	spin_lock(&hr_cq->lock);
1122 	cur = hr_wq->head - hr_wq->tail;
1123 	spin_unlock(&hr_cq->lock);
1124 
1125 	return cur + nreq >= hr_wq->max_post;
1126 }
1127 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1128 
1129 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1130 {
1131 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1132 	int reserved_from_top = 0;
1133 	int reserved_from_bot;
1134 	int ret;
1135 
1136 	spin_lock_init(&qp_table->lock);
1137 	INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1138 
1139 	/* In hw v1, a port include two SQP, six ports total 12 */
1140 	if (hr_dev->caps.max_sq_sg <= 2)
1141 		reserved_from_bot = SQP_NUM;
1142 	else
1143 		reserved_from_bot = hr_dev->caps.reserved_qps;
1144 
1145 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1146 				   hr_dev->caps.num_qps - 1, reserved_from_bot,
1147 				   reserved_from_top);
1148 	if (ret) {
1149 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1150 			ret);
1151 		return ret;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1158 {
1159 	hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1160 }
1161