1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include <rdma/hns-abi.h>
41 
42 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
43 
44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45 {
46 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47 	struct device *dev = hr_dev->dev;
48 	struct hns_roce_qp *qp;
49 
50 	spin_lock(&qp_table->lock);
51 
52 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 	if (qp)
54 		atomic_inc(&qp->refcount);
55 
56 	spin_unlock(&qp_table->lock);
57 
58 	if (!qp) {
59 		dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60 		return;
61 	}
62 
63 	qp->event(qp, (enum hns_roce_event)event_type);
64 
65 	if (atomic_dec_and_test(&qp->refcount))
66 		complete(&qp->free);
67 }
68 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
69 
70 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71 				 enum hns_roce_event type)
72 {
73 	struct ib_event event;
74 	struct ib_qp *ibqp = &hr_qp->ibqp;
75 
76 	if (ibqp->event_handler) {
77 		event.device = ibqp->device;
78 		event.element.qp = ibqp;
79 		switch (type) {
80 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81 			event.event = IB_EVENT_PATH_MIG;
82 			break;
83 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
84 			event.event = IB_EVENT_COMM_EST;
85 			break;
86 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87 			event.event = IB_EVENT_SQ_DRAINED;
88 			break;
89 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91 			break;
92 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93 			event.event = IB_EVENT_QP_FATAL;
94 			break;
95 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96 			event.event = IB_EVENT_PATH_MIG_ERR;
97 			break;
98 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99 			event.event = IB_EVENT_QP_REQ_ERR;
100 			break;
101 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102 			event.event = IB_EVENT_QP_ACCESS_ERR;
103 			break;
104 		default:
105 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
106 				type, hr_qp->qpn);
107 			return;
108 		}
109 		ibqp->event_handler(&event, ibqp->qp_context);
110 	}
111 }
112 
113 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114 				     int align, unsigned long *base)
115 {
116 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
117 
118 	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
119 }
120 
121 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
122 {
123 	switch (state) {
124 	case IB_QPS_RESET:
125 		return HNS_ROCE_QP_STATE_RST;
126 	case IB_QPS_INIT:
127 		return HNS_ROCE_QP_STATE_INIT;
128 	case IB_QPS_RTR:
129 		return HNS_ROCE_QP_STATE_RTR;
130 	case IB_QPS_RTS:
131 		return HNS_ROCE_QP_STATE_RTS;
132 	case IB_QPS_SQD:
133 		return HNS_ROCE_QP_STATE_SQD;
134 	case IB_QPS_ERR:
135 		return HNS_ROCE_QP_STATE_ERR;
136 	default:
137 		return HNS_ROCE_QP_NUM_STATE;
138 	}
139 }
140 EXPORT_SYMBOL_GPL(to_hns_roce_state);
141 
142 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143 				 struct hns_roce_qp *hr_qp)
144 {
145 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
146 	int ret;
147 
148 	if (!qpn)
149 		return -EINVAL;
150 
151 	hr_qp->qpn = qpn;
152 
153 	spin_lock_irq(&qp_table->lock);
154 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
155 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
156 	spin_unlock_irq(&qp_table->lock);
157 	if (ret) {
158 		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
159 		goto err_put_irrl;
160 	}
161 
162 	atomic_set(&hr_qp->refcount, 1);
163 	init_completion(&hr_qp->free);
164 
165 	return 0;
166 
167 err_put_irrl:
168 
169 	return ret;
170 }
171 
172 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
173 			     struct hns_roce_qp *hr_qp)
174 {
175 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
176 	struct device *dev = hr_dev->dev;
177 	int ret;
178 
179 	if (!qpn)
180 		return -EINVAL;
181 
182 	hr_qp->qpn = qpn;
183 
184 	/* Alloc memory for QPC */
185 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
186 	if (ret) {
187 		dev_err(dev, "QPC table get failed\n");
188 		goto err_out;
189 	}
190 
191 	/* Alloc memory for IRRL */
192 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
193 	if (ret) {
194 		dev_err(dev, "IRRL table get failed\n");
195 		goto err_put_qp;
196 	}
197 
198 	if (hr_dev->caps.trrl_entry_sz) {
199 		/* Alloc memory for TRRL */
200 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
201 					 hr_qp->qpn);
202 		if (ret) {
203 			dev_err(dev, "TRRL table get failed\n");
204 			goto err_put_irrl;
205 		}
206 	}
207 
208 	spin_lock_irq(&qp_table->lock);
209 	ret = radix_tree_insert(&hr_dev->qp_table_tree,
210 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
211 	spin_unlock_irq(&qp_table->lock);
212 	if (ret) {
213 		dev_err(dev, "QPC radix_tree_insert failed\n");
214 		goto err_put_trrl;
215 	}
216 
217 	atomic_set(&hr_qp->refcount, 1);
218 	init_completion(&hr_qp->free);
219 
220 	return 0;
221 
222 err_put_trrl:
223 	if (hr_dev->caps.trrl_entry_sz)
224 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
225 
226 err_put_irrl:
227 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
228 
229 err_put_qp:
230 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
231 
232 err_out:
233 	return ret;
234 }
235 
236 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
237 {
238 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
239 	unsigned long flags;
240 
241 	spin_lock_irqsave(&qp_table->lock, flags);
242 	radix_tree_delete(&hr_dev->qp_table_tree,
243 			  hr_qp->qpn & (hr_dev->caps.num_qps - 1));
244 	spin_unlock_irqrestore(&qp_table->lock, flags);
245 }
246 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
247 
248 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
249 {
250 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
251 
252 	if (atomic_dec_and_test(&hr_qp->refcount))
253 		complete(&hr_qp->free);
254 	wait_for_completion(&hr_qp->free);
255 
256 	if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
257 		if (hr_dev->caps.trrl_entry_sz)
258 			hns_roce_table_put(hr_dev, &qp_table->trrl_table,
259 					   hr_qp->qpn);
260 		hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
261 		hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
262 	}
263 }
264 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
265 
266 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
267 			       int cnt)
268 {
269 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
270 
271 	if (base_qpn < SQP_NUM)
272 		return;
273 
274 	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
275 }
276 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
277 
278 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
279 				struct ib_qp_cap *cap, int is_user, int has_srq,
280 				struct hns_roce_qp *hr_qp)
281 {
282 	struct device *dev = hr_dev->dev;
283 	u32 max_cnt;
284 
285 	/* Check the validity of QP support capacity */
286 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
287 	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
288 		dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
289 			cap->max_recv_wr, cap->max_recv_sge);
290 		return -EINVAL;
291 	}
292 
293 	/* If srq exit, set zero for relative number of rq */
294 	if (has_srq) {
295 		if (cap->max_recv_wr) {
296 			dev_dbg(dev, "srq no need config max_recv_wr\n");
297 			return -EINVAL;
298 		}
299 
300 		hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
301 	} else {
302 		if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
303 			dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
304 			return -EINVAL;
305 		}
306 
307 		if (hr_dev->caps.min_wqes)
308 			max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
309 		else
310 			max_cnt = cap->max_recv_wr;
311 
312 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
313 
314 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
315 			dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
316 			return -EINVAL;
317 		}
318 
319 		max_cnt = max(1U, cap->max_recv_sge);
320 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
321 		if (hr_dev->caps.max_rq_sg <= 2)
322 			hr_qp->rq.wqe_shift =
323 					ilog2(hr_dev->caps.max_rq_desc_sz);
324 		else
325 			hr_qp->rq.wqe_shift =
326 					ilog2(hr_dev->caps.max_rq_desc_sz
327 					      * hr_qp->rq.max_gs);
328 	}
329 
330 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
331 	cap->max_recv_sge = hr_qp->rq.max_gs;
332 
333 	return 0;
334 }
335 
336 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
337 				     struct ib_qp_cap *cap,
338 				     struct hns_roce_qp *hr_qp,
339 				     struct hns_roce_ib_create_qp *ucmd)
340 {
341 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
342 	u8 max_sq_stride = ilog2(roundup_sq_stride);
343 	u32 page_size;
344 	u32 max_cnt;
345 
346 	/* Sanity check SQ size before proceeding */
347 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
348 	     ucmd->log_sq_stride > max_sq_stride ||
349 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
350 		dev_err(hr_dev->dev, "check SQ size error!\n");
351 		return -EINVAL;
352 	}
353 
354 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
355 		dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
356 			cap->max_send_sge);
357 		return -EINVAL;
358 	}
359 
360 	hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
361 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
362 
363 	max_cnt = max(1U, cap->max_send_sge);
364 	if (hr_dev->caps.max_sq_sg <= 2)
365 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
366 	else
367 		hr_qp->sq.max_gs = max_cnt;
368 
369 	if (hr_qp->sq.max_gs > 2)
370 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
371 							(hr_qp->sq.max_gs - 2));
372 	hr_qp->sge.sge_shift = 4;
373 
374 	/* Get buf size, SQ and RQ  are aligned to page_szie */
375 	if (hr_dev->caps.max_sq_sg <= 2) {
376 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
377 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
378 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
379 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
380 
381 		hr_qp->sq.offset = 0;
382 		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
383 					     hr_qp->sq.wqe_shift), PAGE_SIZE);
384 	} else {
385 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
386 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
387 					     hr_qp->rq.wqe_shift), page_size) +
388 				   HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
389 					     hr_qp->sge.sge_shift), page_size) +
390 				   HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
391 					     hr_qp->sq.wqe_shift), page_size);
392 
393 		hr_qp->sq.offset = 0;
394 		if (hr_qp->sge.sge_cnt) {
395 			hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
396 							(hr_qp->sq.wqe_cnt <<
397 							hr_qp->sq.wqe_shift),
398 							page_size);
399 			hr_qp->rq.offset = hr_qp->sge.offset +
400 					HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
401 						hr_qp->sge.sge_shift),
402 						page_size);
403 		} else {
404 			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
405 							(hr_qp->sq.wqe_cnt <<
406 							hr_qp->sq.wqe_shift),
407 							page_size);
408 		}
409 	}
410 
411 	return 0;
412 }
413 
414 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
415 				       struct ib_qp_cap *cap,
416 				       struct hns_roce_qp *hr_qp)
417 {
418 	struct device *dev = hr_dev->dev;
419 	u32 page_size;
420 	u32 max_cnt;
421 	int size;
422 
423 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
424 	    cap->max_send_sge > hr_dev->caps.max_sq_sg ||
425 	    cap->max_inline_data > hr_dev->caps.max_sq_inline) {
426 		dev_err(dev, "SQ WR or sge or inline data error!\n");
427 		return -EINVAL;
428 	}
429 
430 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
431 	hr_qp->sq_max_wqes_per_wr = 1;
432 	hr_qp->sq_spare_wqes = 0;
433 
434 	if (hr_dev->caps.min_wqes)
435 		max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
436 	else
437 		max_cnt = cap->max_send_wr;
438 
439 	hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
440 	if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
441 		dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
442 		return -EINVAL;
443 	}
444 
445 	/* Get data_seg numbers */
446 	max_cnt = max(1U, cap->max_send_sge);
447 	if (hr_dev->caps.max_sq_sg <= 2)
448 		hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
449 	else
450 		hr_qp->sq.max_gs = max_cnt;
451 
452 	if (hr_qp->sq.max_gs > 2) {
453 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
454 				     (hr_qp->sq.max_gs - 2));
455 		hr_qp->sge.sge_shift = 4;
456 	}
457 
458 	/* ud sqwqe's sge use extend sge */
459 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
460 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
461 				     hr_qp->sq.max_gs);
462 		hr_qp->sge.sge_shift = 4;
463 	}
464 
465 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
466 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
467 	hr_qp->sq.offset = 0;
468 	size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
469 				 page_size);
470 
471 	if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
472 		hr_qp->sge.offset = size;
473 		size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
474 					  hr_qp->sge.sge_shift, page_size);
475 	}
476 
477 	hr_qp->rq.offset = size;
478 	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
479 				  page_size);
480 	hr_qp->buff_size = size;
481 
482 	/* Get wr and sge number which send */
483 	cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
484 	cap->max_send_sge = hr_qp->sq.max_gs;
485 
486 	/* We don't support inline sends for kernel QPs (yet) */
487 	cap->max_inline_data = 0;
488 
489 	return 0;
490 }
491 
492 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
493 				     struct ib_pd *ib_pd,
494 				     struct ib_qp_init_attr *init_attr,
495 				     struct ib_udata *udata, unsigned long sqpn,
496 				     struct hns_roce_qp *hr_qp)
497 {
498 	struct device *dev = hr_dev->dev;
499 	struct hns_roce_ib_create_qp ucmd;
500 	unsigned long qpn = 0;
501 	int ret = 0;
502 	u32 page_shift;
503 	u32 npages;
504 	int i;
505 
506 	mutex_init(&hr_qp->mutex);
507 	spin_lock_init(&hr_qp->sq.lock);
508 	spin_lock_init(&hr_qp->rq.lock);
509 
510 	hr_qp->state = IB_QPS_RESET;
511 
512 	hr_qp->ibqp.qp_type = init_attr->qp_type;
513 
514 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
515 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
516 	else
517 		hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
518 
519 	ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
520 				   !!init_attr->srq, hr_qp);
521 	if (ret) {
522 		dev_err(dev, "hns_roce_set_rq_size failed\n");
523 		goto err_out;
524 	}
525 
526 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
527 		/* allocate recv inline buf */
528 		hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
529 					       sizeof(struct hns_roce_rinl_wqe),
530 					       GFP_KERNEL);
531 		if (!hr_qp->rq_inl_buf.wqe_list) {
532 			ret = -ENOMEM;
533 			goto err_out;
534 		}
535 
536 		hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
537 
538 		/* Firstly, allocate a list of sge space buffer */
539 		hr_qp->rq_inl_buf.wqe_list[0].sg_list =
540 					kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
541 					       init_attr->cap.max_recv_sge *
542 					       sizeof(struct hns_roce_rinl_sge),
543 					       GFP_KERNEL);
544 		if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
545 			ret = -ENOMEM;
546 			goto err_wqe_list;
547 		}
548 
549 		for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
550 			/* Secondly, reallocate the buffer */
551 			hr_qp->rq_inl_buf.wqe_list[i].sg_list =
552 				&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
553 				init_attr->cap.max_recv_sge];
554 	}
555 
556 	if (ib_pd->uobject) {
557 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
558 			dev_err(dev, "ib_copy_from_udata error for create qp\n");
559 			ret = -EFAULT;
560 			goto err_rq_sge_list;
561 		}
562 
563 		ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
564 						&ucmd);
565 		if (ret) {
566 			dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
567 			goto err_rq_sge_list;
568 		}
569 
570 		hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
571 					  ucmd.buf_addr, hr_qp->buff_size, 0,
572 					  0);
573 		if (IS_ERR(hr_qp->umem)) {
574 			dev_err(dev, "ib_umem_get error for create qp\n");
575 			ret = PTR_ERR(hr_qp->umem);
576 			goto err_rq_sge_list;
577 		}
578 
579 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
580 		if (hr_dev->caps.mtt_buf_pg_sz) {
581 			npages = (ib_umem_page_count(hr_qp->umem) +
582 				  (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
583 				  (1 << hr_dev->caps.mtt_buf_pg_sz);
584 			page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
585 			ret = hns_roce_mtt_init(hr_dev, npages,
586 				    page_shift,
587 				    &hr_qp->mtt);
588 		} else {
589 			ret = hns_roce_mtt_init(hr_dev,
590 				    ib_umem_page_count(hr_qp->umem),
591 				    hr_qp->umem->page_shift,
592 				    &hr_qp->mtt);
593 		}
594 		if (ret) {
595 			dev_err(dev, "hns_roce_mtt_init error for create qp\n");
596 			goto err_buf;
597 		}
598 
599 		ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
600 						 hr_qp->umem);
601 		if (ret) {
602 			dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
603 			goto err_mtt;
604 		}
605 	} else {
606 		if (init_attr->create_flags &
607 		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
608 			dev_err(dev, "init_attr->create_flags error!\n");
609 			ret = -EINVAL;
610 			goto err_rq_sge_list;
611 		}
612 
613 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
614 			dev_err(dev, "init_attr->create_flags error!\n");
615 			ret = -EINVAL;
616 			goto err_rq_sge_list;
617 		}
618 
619 		/* Set SQ size */
620 		ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
621 						  hr_qp);
622 		if (ret) {
623 			dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
624 			goto err_rq_sge_list;
625 		}
626 
627 		/* QP doorbell register address */
628 		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
629 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
630 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
631 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
632 
633 		/* Allocate QP buf */
634 		page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
635 		if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
636 				       (1 << page_shift) * 2,
637 				       &hr_qp->hr_buf, page_shift)) {
638 			dev_err(dev, "hns_roce_buf_alloc error!\n");
639 			ret = -ENOMEM;
640 			goto err_rq_sge_list;
641 		}
642 
643 		hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
644 		/* Write MTT */
645 		ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
646 					hr_qp->hr_buf.page_shift, &hr_qp->mtt);
647 		if (ret) {
648 			dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
649 			goto err_buf;
650 		}
651 
652 		ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
653 					     &hr_qp->hr_buf);
654 		if (ret) {
655 			dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
656 			goto err_mtt;
657 		}
658 
659 		hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
660 					       GFP_KERNEL);
661 		hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
662 					       GFP_KERNEL);
663 		if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
664 			ret = -ENOMEM;
665 			goto err_wrid;
666 		}
667 	}
668 
669 	if (sqpn) {
670 		qpn = sqpn;
671 	} else {
672 		/* Get QPN */
673 		ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
674 		if (ret) {
675 			dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
676 			goto err_wrid;
677 		}
678 	}
679 
680 	if (init_attr->qp_type == IB_QPT_GSI &&
681 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
682 		/* In v1 engine, GSI QP context in RoCE engine's register */
683 		ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
684 		if (ret) {
685 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
686 			goto err_qpn;
687 		}
688 	} else {
689 		ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
690 		if (ret) {
691 			dev_err(dev, "hns_roce_qp_alloc failed!\n");
692 			goto err_qpn;
693 		}
694 	}
695 
696 	if (sqpn)
697 		hr_qp->doorbell_qpn = 1;
698 	else
699 		hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
700 
701 	hr_qp->event = hns_roce_ib_qp_event;
702 
703 	return 0;
704 
705 err_qpn:
706 	if (!sqpn)
707 		hns_roce_release_range_qp(hr_dev, qpn, 1);
708 
709 err_wrid:
710 	kfree(hr_qp->sq.wrid);
711 	kfree(hr_qp->rq.wrid);
712 
713 err_mtt:
714 	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
715 
716 err_buf:
717 	if (ib_pd->uobject)
718 		ib_umem_release(hr_qp->umem);
719 	else
720 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
721 
722 err_rq_sge_list:
723 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
724 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
725 
726 err_wqe_list:
727 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
728 		kfree(hr_qp->rq_inl_buf.wqe_list);
729 
730 err_out:
731 	return ret;
732 }
733 
734 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
735 				 struct ib_qp_init_attr *init_attr,
736 				 struct ib_udata *udata)
737 {
738 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
739 	struct device *dev = hr_dev->dev;
740 	struct hns_roce_sqp *hr_sqp;
741 	struct hns_roce_qp *hr_qp;
742 	int ret;
743 
744 	switch (init_attr->qp_type) {
745 	case IB_QPT_RC: {
746 		hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
747 		if (!hr_qp)
748 			return ERR_PTR(-ENOMEM);
749 
750 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
751 						hr_qp);
752 		if (ret) {
753 			dev_err(dev, "Create RC QP failed\n");
754 			kfree(hr_qp);
755 			return ERR_PTR(ret);
756 		}
757 
758 		hr_qp->ibqp.qp_num = hr_qp->qpn;
759 
760 		break;
761 	}
762 	case IB_QPT_GSI: {
763 		/* Userspace is not allowed to create special QPs: */
764 		if (pd->uobject) {
765 			dev_err(dev, "not support usr space GSI\n");
766 			return ERR_PTR(-EINVAL);
767 		}
768 
769 		hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
770 		if (!hr_sqp)
771 			return ERR_PTR(-ENOMEM);
772 
773 		hr_qp = &hr_sqp->hr_qp;
774 		hr_qp->port = init_attr->port_num - 1;
775 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
776 
777 		/* when hw version is v1, the sqpn is allocated */
778 		if (hr_dev->caps.max_sq_sg <= 2)
779 			hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
780 					     hr_dev->iboe.phy_port[hr_qp->port];
781 		else
782 			hr_qp->ibqp.qp_num = 1;
783 
784 		ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
785 						hr_qp->ibqp.qp_num, hr_qp);
786 		if (ret) {
787 			dev_err(dev, "Create GSI QP failed!\n");
788 			kfree(hr_sqp);
789 			return ERR_PTR(ret);
790 		}
791 
792 		break;
793 	}
794 	default:{
795 		dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
796 		return ERR_PTR(-EINVAL);
797 	}
798 	}
799 
800 	return &hr_qp->ibqp;
801 }
802 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
803 
804 int to_hr_qp_type(int qp_type)
805 {
806 	int transport_type;
807 
808 	if (qp_type == IB_QPT_RC)
809 		transport_type = SERV_TYPE_RC;
810 	else if (qp_type == IB_QPT_UC)
811 		transport_type = SERV_TYPE_UC;
812 	else if (qp_type == IB_QPT_UD)
813 		transport_type = SERV_TYPE_UD;
814 	else if (qp_type == IB_QPT_GSI)
815 		transport_type = SERV_TYPE_UD;
816 	else
817 		transport_type = -1;
818 
819 	return transport_type;
820 }
821 EXPORT_SYMBOL_GPL(to_hr_qp_type);
822 
823 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
824 		       int attr_mask, struct ib_udata *udata)
825 {
826 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
827 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
828 	enum ib_qp_state cur_state, new_state;
829 	struct device *dev = hr_dev->dev;
830 	int ret = -EINVAL;
831 	int p;
832 	enum ib_mtu active_mtu;
833 
834 	mutex_lock(&hr_qp->mutex);
835 
836 	cur_state = attr_mask & IB_QP_CUR_STATE ?
837 		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
838 	new_state = attr_mask & IB_QP_STATE ?
839 		    attr->qp_state : cur_state;
840 
841 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
842 				IB_LINK_LAYER_ETHERNET)) {
843 		dev_err(dev, "ib_modify_qp_is_ok failed\n");
844 		goto out;
845 	}
846 
847 	if ((attr_mask & IB_QP_PORT) &&
848 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
849 		dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
850 			attr->port_num);
851 		goto out;
852 	}
853 
854 	if (attr_mask & IB_QP_PKEY_INDEX) {
855 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
856 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
857 			dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
858 				attr->pkey_index);
859 			goto out;
860 		}
861 	}
862 
863 	if (attr_mask & IB_QP_PATH_MTU) {
864 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
865 		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
866 
867 		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
868 		    attr->path_mtu > IB_MTU_4096) ||
869 		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
870 		    attr->path_mtu > IB_MTU_2048) ||
871 		    attr->path_mtu < IB_MTU_256 ||
872 		    attr->path_mtu > active_mtu) {
873 			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
874 				attr->path_mtu);
875 			goto out;
876 		}
877 	}
878 
879 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
880 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
881 		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
882 			attr->max_rd_atomic);
883 		goto out;
884 	}
885 
886 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
887 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
888 		dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
889 			attr->max_dest_rd_atomic);
890 		goto out;
891 	}
892 
893 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
894 		ret = 0;
895 		goto out;
896 	}
897 
898 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
899 				    new_state);
900 
901 out:
902 	mutex_unlock(&hr_qp->mutex);
903 
904 	return ret;
905 }
906 
907 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
908 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
909 {
910 	if (send_cq == recv_cq) {
911 		spin_lock_irq(&send_cq->lock);
912 		__acquire(&recv_cq->lock);
913 	} else if (send_cq->cqn < recv_cq->cqn) {
914 		spin_lock_irq(&send_cq->lock);
915 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
916 	} else {
917 		spin_lock_irq(&recv_cq->lock);
918 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
919 	}
920 }
921 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
922 
923 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
924 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
925 			 __releases(&recv_cq->lock)
926 {
927 	if (send_cq == recv_cq) {
928 		__release(&recv_cq->lock);
929 		spin_unlock_irq(&send_cq->lock);
930 	} else if (send_cq->cqn < recv_cq->cqn) {
931 		spin_unlock(&recv_cq->lock);
932 		spin_unlock_irq(&send_cq->lock);
933 	} else {
934 		spin_unlock(&send_cq->lock);
935 		spin_unlock_irq(&recv_cq->lock);
936 	}
937 }
938 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
939 
940 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
941 {
942 
943 	return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
944 }
945 
946 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
947 {
948 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
949 }
950 EXPORT_SYMBOL_GPL(get_recv_wqe);
951 
952 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
953 {
954 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
955 }
956 EXPORT_SYMBOL_GPL(get_send_wqe);
957 
958 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
959 {
960 	return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
961 					(n << hr_qp->sge.sge_shift));
962 }
963 EXPORT_SYMBOL_GPL(get_send_extend_sge);
964 
965 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
966 			  struct ib_cq *ib_cq)
967 {
968 	struct hns_roce_cq *hr_cq;
969 	u32 cur;
970 
971 	cur = hr_wq->head - hr_wq->tail;
972 	if (likely(cur + nreq < hr_wq->max_post))
973 		return false;
974 
975 	hr_cq = to_hr_cq(ib_cq);
976 	spin_lock(&hr_cq->lock);
977 	cur = hr_wq->head - hr_wq->tail;
978 	spin_unlock(&hr_cq->lock);
979 
980 	return cur + nreq >= hr_wq->max_post;
981 }
982 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
983 
984 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
985 {
986 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
987 	int reserved_from_top = 0;
988 	int ret;
989 
990 	spin_lock_init(&qp_table->lock);
991 	INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
992 
993 	/* A port include two SQP, six port total 12 */
994 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
995 				   hr_dev->caps.num_qps - 1, SQP_NUM,
996 				   reserved_from_top);
997 	if (ret) {
998 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
999 			ret);
1000 		return ret;
1001 	}
1002 
1003 	return 0;
1004 }
1005 
1006 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1007 {
1008 	hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1009 }
1010