1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
43 
44 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
45 
46 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
47 {
48 	struct device *dev = hr_dev->dev;
49 	struct hns_roce_qp *qp;
50 
51 	xa_lock(&hr_dev->qp_table_xa);
52 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 	if (qp)
54 		atomic_inc(&qp->refcount);
55 	xa_unlock(&hr_dev->qp_table_xa);
56 
57 	if (!qp) {
58 		dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
59 		return;
60 	}
61 
62 	qp->event(qp, (enum hns_roce_event)event_type);
63 
64 	if (atomic_dec_and_test(&qp->refcount))
65 		complete(&qp->free);
66 }
67 
68 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
69 				 enum hns_roce_event type)
70 {
71 	struct ib_event event;
72 	struct ib_qp *ibqp = &hr_qp->ibqp;
73 
74 	if (ibqp->event_handler) {
75 		event.device = ibqp->device;
76 		event.element.qp = ibqp;
77 		switch (type) {
78 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
79 			event.event = IB_EVENT_PATH_MIG;
80 			break;
81 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
82 			event.event = IB_EVENT_COMM_EST;
83 			break;
84 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
85 			event.event = IB_EVENT_SQ_DRAINED;
86 			break;
87 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
88 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
89 			break;
90 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
91 			event.event = IB_EVENT_QP_FATAL;
92 			break;
93 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
94 			event.event = IB_EVENT_PATH_MIG_ERR;
95 			break;
96 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
97 			event.event = IB_EVENT_QP_REQ_ERR;
98 			break;
99 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
100 			event.event = IB_EVENT_QP_ACCESS_ERR;
101 			break;
102 		default:
103 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
104 				type, hr_qp->qpn);
105 			return;
106 		}
107 		ibqp->event_handler(&event, ibqp->qp_context);
108 	}
109 }
110 
111 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
112 				     int align, unsigned long *base)
113 {
114 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
115 
116 	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
117 					   base) ?
118 		       -ENOMEM :
119 		       0;
120 }
121 
122 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
123 {
124 	switch (state) {
125 	case IB_QPS_RESET:
126 		return HNS_ROCE_QP_STATE_RST;
127 	case IB_QPS_INIT:
128 		return HNS_ROCE_QP_STATE_INIT;
129 	case IB_QPS_RTR:
130 		return HNS_ROCE_QP_STATE_RTR;
131 	case IB_QPS_RTS:
132 		return HNS_ROCE_QP_STATE_RTS;
133 	case IB_QPS_SQD:
134 		return HNS_ROCE_QP_STATE_SQD;
135 	case IB_QPS_ERR:
136 		return HNS_ROCE_QP_STATE_ERR;
137 	default:
138 		return HNS_ROCE_QP_NUM_STATE;
139 	}
140 }
141 
142 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143 				 struct hns_roce_qp *hr_qp)
144 {
145 	struct xarray *xa = &hr_dev->qp_table_xa;
146 	int ret;
147 
148 	if (!qpn)
149 		return -EINVAL;
150 
151 	hr_qp->qpn = qpn;
152 	atomic_set(&hr_qp->refcount, 1);
153 	init_completion(&hr_qp->free);
154 
155 	ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
156 				hr_qp, GFP_KERNEL));
157 	if (ret)
158 		dev_err(hr_dev->dev, "QPC xa_store failed\n");
159 
160 	return ret;
161 }
162 
163 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
164 			     struct hns_roce_qp *hr_qp)
165 {
166 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
167 	struct device *dev = hr_dev->dev;
168 	int ret;
169 
170 	if (!qpn)
171 		return -EINVAL;
172 
173 	hr_qp->qpn = qpn;
174 
175 	/* Alloc memory for QPC */
176 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
177 	if (ret) {
178 		dev_err(dev, "QPC table get failed\n");
179 		goto err_out;
180 	}
181 
182 	/* Alloc memory for IRRL */
183 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
184 	if (ret) {
185 		dev_err(dev, "IRRL table get failed\n");
186 		goto err_put_qp;
187 	}
188 
189 	if (hr_dev->caps.trrl_entry_sz) {
190 		/* Alloc memory for TRRL */
191 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
192 					 hr_qp->qpn);
193 		if (ret) {
194 			dev_err(dev, "TRRL table get failed\n");
195 			goto err_put_irrl;
196 		}
197 	}
198 
199 	if (hr_dev->caps.sccc_entry_sz) {
200 		/* Alloc memory for SCC CTX */
201 		ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
202 					 hr_qp->qpn);
203 		if (ret) {
204 			dev_err(dev, "SCC CTX table get failed\n");
205 			goto err_put_trrl;
206 		}
207 	}
208 
209 	ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
210 	if (ret)
211 		goto err_put_sccc;
212 
213 	return 0;
214 
215 err_put_sccc:
216 	if (hr_dev->caps.sccc_entry_sz)
217 		hns_roce_table_put(hr_dev, &qp_table->sccc_table,
218 				   hr_qp->qpn);
219 
220 err_put_trrl:
221 	if (hr_dev->caps.trrl_entry_sz)
222 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
223 
224 err_put_irrl:
225 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
226 
227 err_put_qp:
228 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
229 
230 err_out:
231 	return ret;
232 }
233 
234 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
235 {
236 	struct xarray *xa = &hr_dev->qp_table_xa;
237 	unsigned long flags;
238 
239 	xa_lock_irqsave(xa, flags);
240 	__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
241 	xa_unlock_irqrestore(xa, flags);
242 }
243 
244 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
245 {
246 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
247 
248 	if (atomic_dec_and_test(&hr_qp->refcount))
249 		complete(&hr_qp->free);
250 	wait_for_completion(&hr_qp->free);
251 
252 	if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
253 		if (hr_dev->caps.trrl_entry_sz)
254 			hns_roce_table_put(hr_dev, &qp_table->trrl_table,
255 					   hr_qp->qpn);
256 		hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
257 	}
258 }
259 
260 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
261 			       int cnt)
262 {
263 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
264 
265 	if (base_qpn < hr_dev->caps.reserved_qps)
266 		return;
267 
268 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
269 }
270 
271 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
272 				struct ib_qp_cap *cap, bool is_user, int has_rq,
273 				struct hns_roce_qp *hr_qp)
274 {
275 	struct device *dev = hr_dev->dev;
276 	u32 max_cnt;
277 
278 	/* Check the validity of QP support capacity */
279 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
280 	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
281 		dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
282 			cap->max_recv_wr, cap->max_recv_sge);
283 		return -EINVAL;
284 	}
285 
286 	/* If srq exist, set zero for relative number of rq */
287 	if (!has_rq) {
288 		hr_qp->rq.wqe_cnt = 0;
289 		hr_qp->rq.max_gs = 0;
290 		cap->max_recv_wr = 0;
291 		cap->max_recv_sge = 0;
292 	} else {
293 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
294 			dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
295 			return -EINVAL;
296 		}
297 
298 		if (hr_dev->caps.min_wqes)
299 			max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
300 		else
301 			max_cnt = cap->max_recv_wr;
302 
303 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
304 
305 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
306 			dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
307 			return -EINVAL;
308 		}
309 
310 		max_cnt = max(1U, cap->max_recv_sge);
311 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
312 		if (hr_dev->caps.max_rq_sg <= 2)
313 			hr_qp->rq.wqe_shift =
314 					ilog2(hr_dev->caps.max_rq_desc_sz);
315 		else
316 			hr_qp->rq.wqe_shift =
317 					ilog2(hr_dev->caps.max_rq_desc_sz
318 					      * hr_qp->rq.max_gs);
319 	}
320 
321 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
322 	cap->max_recv_sge = hr_qp->rq.max_gs;
323 
324 	return 0;
325 }
326 
327 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
328 				     struct ib_qp_cap *cap,
329 				     struct hns_roce_qp *hr_qp,
330 				     struct hns_roce_ib_create_qp *ucmd)
331 {
332 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
333 	u8 max_sq_stride = ilog2(roundup_sq_stride);
334 	u32 ex_sge_num;
335 	u32 page_size;
336 	u32 max_cnt;
337 
338 	/* Sanity check SQ size before proceeding */
339 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
340 	     ucmd->log_sq_stride > max_sq_stride ||
341 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
342 		dev_err(hr_dev->dev, "check SQ size error!\n");
343 		return -EINVAL;
344 	}
345 
346 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
347 		dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
348 			cap->max_send_sge);
349 		return -EINVAL;
350 	}
351 
352 	hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
353 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
354 
355 	max_cnt = max(1U, cap->max_send_sge);
356 	if (hr_dev->caps.max_sq_sg <= 2)
357 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
358 	else
359 		hr_qp->sq.max_gs = max_cnt;
360 
361 	if (hr_qp->sq.max_gs > 2)
362 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
363 							(hr_qp->sq.max_gs - 2));
364 
365 	if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
366 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
367 			dev_err(hr_dev->dev,
368 				"The extended sge cnt error! sge_cnt=%d\n",
369 				hr_qp->sge.sge_cnt);
370 			return -EINVAL;
371 		}
372 	}
373 
374 	hr_qp->sge.sge_shift = 4;
375 	ex_sge_num = hr_qp->sge.sge_cnt;
376 
377 	/* Get buf size, SQ and RQ  are aligned to page_szie */
378 	if (hr_dev->caps.max_sq_sg <= 2) {
379 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
380 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
381 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
382 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
383 
384 		hr_qp->sq.offset = 0;
385 		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
386 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
387 	} else {
388 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
389 		hr_qp->sge.sge_cnt = ex_sge_num ?
390 		   max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
391 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
392 					     hr_qp->rq.wqe_shift), page_size) +
393 				   HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
394 					     hr_qp->sge.sge_shift), page_size) +
395 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
396 					     hr_qp->sq.wqe_shift), page_size);
397 
398 		hr_qp->sq.offset = 0;
399 		if (ex_sge_num) {
400 			hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
401 							(hr_qp->sq.wqe_cnt <<
402 							hr_qp->sq.wqe_shift),
403 							page_size);
404 			hr_qp->rq.offset = hr_qp->sge.offset +
405 					HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
406 						hr_qp->sge.sge_shift),
407 						page_size);
408 		} else {
409 			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
410 							(hr_qp->sq.wqe_cnt <<
411 							hr_qp->sq.wqe_shift),
412 							page_size);
413 		}
414 	}
415 
416 	return 0;
417 }
418 
419 static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
420 				struct hns_roce_qp *hr_qp,
421 				struct hns_roce_buf_region *regions,
422 				int region_max, int page_shift)
423 {
424 	int page_size = 1 << page_shift;
425 	bool is_extend_sge;
426 	int region_cnt = 0;
427 	int buf_size;
428 	int buf_cnt;
429 
430 	if (hr_qp->buff_size < 1 || region_max < 1)
431 		return region_cnt;
432 
433 	if (hr_qp->sge.sge_cnt > 0)
434 		is_extend_sge = true;
435 	else
436 		is_extend_sge = false;
437 
438 	/* sq region */
439 	if (is_extend_sge)
440 		buf_size = hr_qp->sge.offset - hr_qp->sq.offset;
441 	else
442 		buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
443 
444 	if (buf_size > 0 && region_cnt < region_max) {
445 		buf_cnt = DIV_ROUND_UP(buf_size, page_size);
446 		hns_roce_init_buf_region(&regions[region_cnt],
447 					 hr_dev->caps.wqe_sq_hop_num,
448 					 hr_qp->sq.offset / page_size,
449 					 buf_cnt);
450 		region_cnt++;
451 	}
452 
453 	/* sge region */
454 	if (is_extend_sge) {
455 		buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
456 		if (buf_size > 0 && region_cnt < region_max) {
457 			buf_cnt = DIV_ROUND_UP(buf_size, page_size);
458 			hns_roce_init_buf_region(&regions[region_cnt],
459 						 hr_dev->caps.wqe_sge_hop_num,
460 						 hr_qp->sge.offset / page_size,
461 						 buf_cnt);
462 			region_cnt++;
463 		}
464 	}
465 
466 	/* rq region */
467 	buf_size = hr_qp->buff_size - hr_qp->rq.offset;
468 	if (buf_size > 0) {
469 		buf_cnt = DIV_ROUND_UP(buf_size, page_size);
470 		hns_roce_init_buf_region(&regions[region_cnt],
471 					 hr_dev->caps.wqe_rq_hop_num,
472 					 hr_qp->rq.offset / page_size,
473 					 buf_cnt);
474 		region_cnt++;
475 	}
476 
477 	return region_cnt;
478 }
479 
480 static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
481 				  struct hns_roce_buf_region *regions,
482 				  int region_cnt)
483 {
484 	int bt_pg_shift;
485 	int ba_num;
486 	int ret;
487 
488 	bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz;
489 
490 	/* all root ba entries must in one bt page */
491 	do {
492 		ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN;
493 		ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt,
494 						     ba_num);
495 		if (ret <= ba_num)
496 			break;
497 
498 		bt_pg_shift++;
499 	} while (ret > ba_num);
500 
501 	return bt_pg_shift - PAGE_SHIFT;
502 }
503 
504 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
505 				       struct ib_qp_cap *cap,
506 				       struct hns_roce_qp *hr_qp)
507 {
508 	struct device *dev = hr_dev->dev;
509 	u32 page_size;
510 	u32 max_cnt;
511 	int size;
512 
513 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
514 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
515 	    cap->max_inline_data > hr_dev->caps.max_sq_inline) {
516 		dev_err(dev, "SQ WR or sge or inline data error!\n");
517 		return -EINVAL;
518 	}
519 
520 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
521 	hr_qp->sq_max_wqes_per_wr = 1;
522 	hr_qp->sq_spare_wqes = 0;
523 
524 	if (hr_dev->caps.min_wqes)
525 		max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
526 	else
527 		max_cnt = cap->max_send_wr;
528 
529 	hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
530 	if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
531 		dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
532 		return -EINVAL;
533 	}
534 
535 	/* Get data_seg numbers */
536 	max_cnt = max(1U, cap->max_send_sge);
537 	if (hr_dev->caps.max_sq_sg <= 2)
538 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
539 	else
540 		hr_qp->sq.max_gs = max_cnt;
541 
542 	if (hr_qp->sq.max_gs > 2) {
543 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
544 				     (hr_qp->sq.max_gs - 2));
545 		hr_qp->sge.sge_shift = 4;
546 	}
547 
548 	/* ud sqwqe's sge use extend sge */
549 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
550 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
551 				     hr_qp->sq.max_gs);
552 		hr_qp->sge.sge_shift = 4;
553 	}
554 
555 	if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
556 		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
557 			dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
558 				hr_qp->sge.sge_cnt);
559 			return -EINVAL;
560 		}
561 	}
562 
563 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
564 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
565 	hr_qp->sq.offset = 0;
566 	size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
567 				 page_size);
568 
569 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
570 		hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
571 					(u32)hr_qp->sge.sge_cnt);
572 		hr_qp->sge.offset = size;
573 		size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
574 					  hr_qp->sge.sge_shift, page_size);
575 	}
576 
577 	hr_qp->rq.offset = size;
578 	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
579 				  page_size);
580 	hr_qp->buff_size = size;
581 
582 	/* Get wr and sge number which send */
583 	cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
584 	cap->max_send_sge = hr_qp->sq.max_gs;
585 
586 	/* We don't support inline sends for kernel QPs (yet) */
587 	cap->max_inline_data = 0;
588 
589 	return 0;
590 }
591 
592 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
593 {
594 	if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
595 		return 0;
596 
597 	return 1;
598 }
599 
600 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
601 {
602 	if (attr->qp_type == IB_QPT_XRC_INI ||
603 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
604 	    !attr->cap.max_recv_wr)
605 		return 0;
606 
607 	return 1;
608 }
609 
610 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
611 				     struct ib_pd *ib_pd,
612 				     struct ib_qp_init_attr *init_attr,
613 				     struct ib_udata *udata, unsigned long sqpn,
614 				     struct hns_roce_qp *hr_qp)
615 {
616 	dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 };
617 	struct device *dev = hr_dev->dev;
618 	struct hns_roce_ib_create_qp ucmd;
619 	struct hns_roce_ib_create_qp_resp resp = {};
620 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
621 		udata, struct hns_roce_ucontext, ibucontext);
622 	struct hns_roce_buf_region *r;
623 	unsigned long qpn = 0;
624 	u32 page_shift;
625 	int buf_count;
626 	int ret;
627 	int i;
628 
629 	mutex_init(&hr_qp->mutex);
630 	spin_lock_init(&hr_qp->sq.lock);
631 	spin_lock_init(&hr_qp->rq.lock);
632 
633 	hr_qp->state = IB_QPS_RESET;
634 
635 	hr_qp->ibqp.qp_type = init_attr->qp_type;
636 
637 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
638 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
639 	else
640 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
641 
642 	ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
643 				   hns_roce_qp_has_rq(init_attr), hr_qp);
644 	if (ret) {
645 		dev_err(dev, "hns_roce_set_rq_size failed\n");
646 		goto err_out;
647 	}
648 
649 	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
650 	    hns_roce_qp_has_rq(init_attr)) {
651 		/* allocate recv inline buf */
652 		hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
653 					       sizeof(struct hns_roce_rinl_wqe),
654 					       GFP_KERNEL);
655 		if (!hr_qp->rq_inl_buf.wqe_list) {
656 			ret = -ENOMEM;
657 			goto err_out;
658 		}
659 
660 		hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
661 
662 		/* Firstly, allocate a list of sge space buffer */
663 		hr_qp->rq_inl_buf.wqe_list[0].sg_list =
664 					kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
665 					       init_attr->cap.max_recv_sge *
666 					       sizeof(struct hns_roce_rinl_sge),
667 					       GFP_KERNEL);
668 		if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
669 			ret = -ENOMEM;
670 			goto err_wqe_list;
671 		}
672 
673 		for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
674 			/* Secondly, reallocate the buffer */
675 			hr_qp->rq_inl_buf.wqe_list[i].sg_list =
676 				&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
677 				init_attr->cap.max_recv_sge];
678 	}
679 
680 	page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
681 	if (udata) {
682 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
683 			dev_err(dev, "ib_copy_from_udata error for create qp\n");
684 			ret = -EFAULT;
685 			goto err_rq_sge_list;
686 		}
687 
688 		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
689 						&ucmd);
690 		if (ret) {
691 			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
692 			goto err_rq_sge_list;
693 		}
694 
695 		hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
696 					  hr_qp->buff_size, 0, 0);
697 		if (IS_ERR(hr_qp->umem)) {
698 			dev_err(dev, "ib_umem_get error for create qp\n");
699 			ret = PTR_ERR(hr_qp->umem);
700 			goto err_rq_sge_list;
701 		}
702 		hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
703 				hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
704 				page_shift);
705 		ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
706 					      hr_qp->region_cnt);
707 		if (ret) {
708 			dev_err(dev, "alloc buf_list error for create qp\n");
709 			goto err_alloc_list;
710 		}
711 
712 		for (i = 0; i < hr_qp->region_cnt; i++) {
713 			r = &hr_qp->regions[i];
714 			buf_count = hns_roce_get_umem_bufs(hr_dev,
715 					buf_list[i], r->count, r->offset,
716 					hr_qp->umem, page_shift);
717 			if (buf_count != r->count) {
718 				dev_err(dev,
719 					"get umem buf err, expect %d,ret %d.\n",
720 					r->count, buf_count);
721 				ret = -ENOBUFS;
722 				goto err_get_bufs;
723 			}
724 		}
725 
726 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
727 		    (udata->inlen >= sizeof(ucmd)) &&
728 		    (udata->outlen >= sizeof(resp)) &&
729 		    hns_roce_qp_has_sq(init_attr)) {
730 			ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
731 						   &hr_qp->sdb);
732 			if (ret) {
733 				dev_err(dev, "sq record doorbell map failed!\n");
734 				goto err_get_bufs;
735 			}
736 
737 			/* indicate kernel supports sq record db */
738 			resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
739 			hr_qp->sdb_en = 1;
740 		}
741 
742 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
743 		    (udata->outlen >= sizeof(resp)) &&
744 		    hns_roce_qp_has_rq(init_attr)) {
745 			ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
746 						   &hr_qp->rdb);
747 			if (ret) {
748 				dev_err(dev, "rq record doorbell map failed!\n");
749 				goto err_sq_dbmap;
750 			}
751 
752 			/* indicate kernel supports rq record db */
753 			resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
754 			hr_qp->rdb_en = 1;
755 		}
756 	} else {
757 		if (init_attr->create_flags &
758 		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
759 			dev_err(dev, "init_attr->create_flags error!\n");
760 			ret = -EINVAL;
761 			goto err_rq_sge_list;
762 		}
763 
764 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
765 			dev_err(dev, "init_attr->create_flags error!\n");
766 			ret = -EINVAL;
767 			goto err_rq_sge_list;
768 		}
769 
770 		/* Set SQ size */
771 		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
772 						  hr_qp);
773 		if (ret) {
774 			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
775 			goto err_rq_sge_list;
776 		}
777 
778 		/* QP doorbell register address */
779 		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
780 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
781 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
782 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
783 
784 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
785 		    hns_roce_qp_has_rq(init_attr)) {
786 			ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
787 			if (ret) {
788 				dev_err(dev, "rq record doorbell alloc failed!\n");
789 				goto err_rq_sge_list;
790 			}
791 			*hr_qp->rdb.db_record = 0;
792 			hr_qp->rdb_en = 1;
793 		}
794 
795 		/* Allocate QP buf */
796 		if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
797 				       (1 << page_shift) * 2,
798 				       &hr_qp->hr_buf, page_shift)) {
799 			dev_err(dev, "hns_roce_buf_alloc error!\n");
800 			ret = -ENOMEM;
801 			goto err_db;
802 		}
803 		hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
804 				hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
805 				page_shift);
806 		ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list,
807 					      hr_qp->region_cnt);
808 		if (ret) {
809 			dev_err(dev, "alloc buf_list error for create qp!\n");
810 			goto err_alloc_list;
811 		}
812 
813 		for (i = 0; i < hr_qp->region_cnt; i++) {
814 			r = &hr_qp->regions[i];
815 			buf_count = hns_roce_get_kmem_bufs(hr_dev,
816 					buf_list[i], r->count, r->offset,
817 					&hr_qp->hr_buf);
818 			if (buf_count != r->count) {
819 				dev_err(dev,
820 					"get kmem buf err, expect %d,ret %d.\n",
821 					r->count, buf_count);
822 				ret = -ENOBUFS;
823 				goto err_get_bufs;
824 			}
825 		}
826 
827 		hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
828 					 GFP_KERNEL);
829 		hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
830 					 GFP_KERNEL);
831 		if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
832 			ret = -ENOMEM;
833 			goto err_wrid;
834 		}
835 	}
836 
837 	if (sqpn) {
838 		qpn = sqpn;
839 	} else {
840 		/* Get QPN */
841 		ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
842 		if (ret) {
843 			dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
844 			goto err_wrid;
845 		}
846 	}
847 
848 	hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
849 							hr_qp->region_cnt);
850 	hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
851 			  page_shift);
852 	ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list,
853 				  hr_qp->regions, hr_qp->region_cnt);
854 	if (ret) {
855 		dev_err(dev, "mtr attach error for create qp\n");
856 		goto err_mtr;
857 	}
858 
859 	if (init_attr->qp_type == IB_QPT_GSI &&
860 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
861 		/* In v1 engine, GSI QP context in RoCE engine's register */
862 		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
863 		if (ret) {
864 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
865 			goto err_qpn;
866 		}
867 	} else {
868 		ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
869 		if (ret) {
870 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
871 			goto err_qpn;
872 		}
873 	}
874 
875 	if (sqpn)
876 		hr_qp->doorbell_qpn = 1;
877 	else
878 		hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
879 
880 	if (udata) {
881 		ret = ib_copy_to_udata(udata, &resp,
882 				       min(udata->outlen, sizeof(resp)));
883 		if (ret)
884 			goto err_qp;
885 	}
886 
887 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
888 		ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
889 		if (ret)
890 			goto err_qp;
891 	}
892 
893 	hr_qp->event = hns_roce_ib_qp_event;
894 	hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
895 
896 	return 0;
897 
898 err_qp:
899 	if (init_attr->qp_type == IB_QPT_GSI &&
900 		hr_dev->hw_rev == HNS_ROCE_HW_VER1)
901 		hns_roce_qp_remove(hr_dev, hr_qp);
902 	else
903 		hns_roce_qp_free(hr_dev, hr_qp);
904 
905 err_qpn:
906 	if (!sqpn)
907 		hns_roce_release_range_qp(hr_dev, qpn, 1);
908 
909 err_mtr:
910 	hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
911 
912 err_wrid:
913 	if (udata) {
914 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
915 		    (udata->outlen >= sizeof(resp)) &&
916 		    hns_roce_qp_has_rq(init_attr))
917 			hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
918 	} else {
919 		kfree(hr_qp->sq.wrid);
920 		kfree(hr_qp->rq.wrid);
921 	}
922 
923 err_sq_dbmap:
924 	if (udata)
925 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
926 		    (udata->inlen >= sizeof(ucmd)) &&
927 		    (udata->outlen >= sizeof(resp)) &&
928 		    hns_roce_qp_has_sq(init_attr))
929 			hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
930 
931 err_get_bufs:
932 	hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
933 
934 err_alloc_list:
935 	if (!hr_qp->umem)
936 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
937 	ib_umem_release(hr_qp->umem);
938 
939 err_db:
940 	if (!udata && hns_roce_qp_has_rq(init_attr) &&
941 	    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
942 		hns_roce_free_db(hr_dev, &hr_qp->rdb);
943 
944 err_rq_sge_list:
945 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
946 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
947 
948 err_wqe_list:
949 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
950 		kfree(hr_qp->rq_inl_buf.wqe_list);
951 
952 err_out:
953 	return ret;
954 }
955 
956 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
957 				 struct ib_qp_init_attr *init_attr,
958 				 struct ib_udata *udata)
959 {
960 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
961 	struct device *dev = hr_dev->dev;
962 	struct hns_roce_sqp *hr_sqp;
963 	struct hns_roce_qp *hr_qp;
964 	int ret;
965 
966 	switch (init_attr->qp_type) {
967 	case IB_QPT_RC: {
968 		hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
969 		if (!hr_qp)
970 			return ERR_PTR(-ENOMEM);
971 
972 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
973 						hr_qp);
974 		if (ret) {
975 			dev_err(dev, "Create RC QP failed\n");
976 			kfree(hr_qp);
977 			return ERR_PTR(ret);
978 		}
979 
980 		hr_qp->ibqp.qp_num = hr_qp->qpn;
981 
982 		break;
983 	}
984 	case IB_QPT_GSI: {
985 		/* Userspace is not allowed to create special QPs: */
986 		if (udata) {
987 			dev_err(dev, "not support usr space GSI\n");
988 			return ERR_PTR(-EINVAL);
989 		}
990 
991 		hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
992 		if (!hr_sqp)
993 			return ERR_PTR(-ENOMEM);
994 
995 		hr_qp = &hr_sqp->hr_qp;
996 		hr_qp->port = init_attr->port_num - 1;
997 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
998 
999 		/* when hw version is v1, the sqpn is allocated */
1000 		if (hr_dev->caps.max_sq_sg <= 2)
1001 			hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
1002 					     hr_dev->iboe.phy_port[hr_qp->port];
1003 		else
1004 			hr_qp->ibqp.qp_num = 1;
1005 
1006 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
1007 						hr_qp->ibqp.qp_num, hr_qp);
1008 		if (ret) {
1009 			dev_err(dev, "Create GSI QP failed!\n");
1010 			kfree(hr_sqp);
1011 			return ERR_PTR(ret);
1012 		}
1013 
1014 		break;
1015 	}
1016 	default:{
1017 		dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
1018 		return ERR_PTR(-EINVAL);
1019 	}
1020 	}
1021 
1022 	return &hr_qp->ibqp;
1023 }
1024 
1025 int to_hr_qp_type(int qp_type)
1026 {
1027 	int transport_type;
1028 
1029 	if (qp_type == IB_QPT_RC)
1030 		transport_type = SERV_TYPE_RC;
1031 	else if (qp_type == IB_QPT_UC)
1032 		transport_type = SERV_TYPE_UC;
1033 	else if (qp_type == IB_QPT_UD)
1034 		transport_type = SERV_TYPE_UD;
1035 	else if (qp_type == IB_QPT_GSI)
1036 		transport_type = SERV_TYPE_UD;
1037 	else
1038 		transport_type = -1;
1039 
1040 	return transport_type;
1041 }
1042 
1043 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1044 		       int attr_mask, struct ib_udata *udata)
1045 {
1046 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1047 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1048 	enum ib_qp_state cur_state, new_state;
1049 	struct device *dev = hr_dev->dev;
1050 	int ret = -EINVAL;
1051 	int p;
1052 	enum ib_mtu active_mtu;
1053 
1054 	mutex_lock(&hr_qp->mutex);
1055 
1056 	cur_state = attr_mask & IB_QP_CUR_STATE ?
1057 		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
1058 	new_state = attr_mask & IB_QP_STATE ?
1059 		    attr->qp_state : cur_state;
1060 
1061 	if (ibqp->uobject &&
1062 	    (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1063 		if (hr_qp->sdb_en == 1) {
1064 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1065 
1066 			if (hr_qp->rdb_en == 1)
1067 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1068 		} else {
1069 			dev_warn(dev, "flush cqe is not supported in userspace!\n");
1070 			goto out;
1071 		}
1072 	}
1073 
1074 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1075 				attr_mask)) {
1076 		dev_err(dev, "ib_modify_qp_is_ok failed\n");
1077 		goto out;
1078 	}
1079 
1080 	if ((attr_mask & IB_QP_PORT) &&
1081 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1082 		dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
1083 			attr->port_num);
1084 		goto out;
1085 	}
1086 
1087 	if (attr_mask & IB_QP_PKEY_INDEX) {
1088 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1089 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1090 			dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
1091 				attr->pkey_index);
1092 			goto out;
1093 		}
1094 	}
1095 
1096 	if (attr_mask & IB_QP_PATH_MTU) {
1097 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1098 		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1099 
1100 		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
1101 		    attr->path_mtu > IB_MTU_4096) ||
1102 		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
1103 		    attr->path_mtu > IB_MTU_2048) ||
1104 		    attr->path_mtu < IB_MTU_256 ||
1105 		    attr->path_mtu > active_mtu) {
1106 			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
1107 				attr->path_mtu);
1108 			goto out;
1109 		}
1110 	}
1111 
1112 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1113 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1114 		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1115 			attr->max_rd_atomic);
1116 		goto out;
1117 	}
1118 
1119 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1120 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1121 		dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1122 			attr->max_dest_rd_atomic);
1123 		goto out;
1124 	}
1125 
1126 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1127 		if (hr_dev->caps.min_wqes) {
1128 			ret = -EPERM;
1129 			dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
1130 				new_state);
1131 		} else {
1132 			ret = 0;
1133 		}
1134 
1135 		goto out;
1136 	}
1137 
1138 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1139 				    new_state);
1140 
1141 out:
1142 	mutex_unlock(&hr_qp->mutex);
1143 
1144 	return ret;
1145 }
1146 
1147 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1148 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1149 {
1150 	if (send_cq == recv_cq) {
1151 		spin_lock_irq(&send_cq->lock);
1152 		__acquire(&recv_cq->lock);
1153 	} else if (send_cq->cqn < recv_cq->cqn) {
1154 		spin_lock_irq(&send_cq->lock);
1155 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1156 	} else {
1157 		spin_lock_irq(&recv_cq->lock);
1158 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1159 	}
1160 }
1161 
1162 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1163 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1164 			 __releases(&recv_cq->lock)
1165 {
1166 	if (send_cq == recv_cq) {
1167 		__release(&recv_cq->lock);
1168 		spin_unlock_irq(&send_cq->lock);
1169 	} else if (send_cq->cqn < recv_cq->cqn) {
1170 		spin_unlock(&recv_cq->lock);
1171 		spin_unlock_irq(&send_cq->lock);
1172 	} else {
1173 		spin_unlock(&send_cq->lock);
1174 		spin_unlock_irq(&recv_cq->lock);
1175 	}
1176 }
1177 
1178 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1179 {
1180 
1181 	return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1182 }
1183 
1184 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1185 {
1186 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1187 }
1188 
1189 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1190 {
1191 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1192 }
1193 
1194 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1195 {
1196 	return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1197 					(n << hr_qp->sge.sge_shift));
1198 }
1199 
1200 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1201 			  struct ib_cq *ib_cq)
1202 {
1203 	struct hns_roce_cq *hr_cq;
1204 	u32 cur;
1205 
1206 	cur = hr_wq->head - hr_wq->tail;
1207 	if (likely(cur + nreq < hr_wq->max_post))
1208 		return false;
1209 
1210 	hr_cq = to_hr_cq(ib_cq);
1211 	spin_lock(&hr_cq->lock);
1212 	cur = hr_wq->head - hr_wq->tail;
1213 	spin_unlock(&hr_cq->lock);
1214 
1215 	return cur + nreq >= hr_wq->max_post;
1216 }
1217 
1218 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1219 {
1220 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1221 	int reserved_from_top = 0;
1222 	int reserved_from_bot;
1223 	int ret;
1224 
1225 	mutex_init(&qp_table->scc_mutex);
1226 	xa_init(&hr_dev->qp_table_xa);
1227 
1228 	reserved_from_bot = hr_dev->caps.reserved_qps;
1229 
1230 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1231 				   hr_dev->caps.num_qps - 1, reserved_from_bot,
1232 				   reserved_from_top);
1233 	if (ret) {
1234 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1235 			ret);
1236 		return ret;
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1243 {
1244 	hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1245 }
1246