1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
43 
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
50 
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 			    struct ib_sge *sg)
53 {
54 	dseg->lkey = cpu_to_le32(sg->lkey);
55 	dseg->addr = cpu_to_le64(sg->addr);
56 	dseg->len  = cpu_to_le32(sg->length);
57 }
58 
59 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 			 struct hns_roce_wqe_frmr_seg *fseg,
61 			 const struct ib_reg_wr *wr)
62 {
63 	struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64 
65 	/* use ib_access_flags */
66 	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
67 		     wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
68 	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
69 		     wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
70 	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
71 		     wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
72 	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
73 		     wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
74 	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
75 		     wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
76 
77 	/* Data structure reuse may lead to confusion */
78 	rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
79 	rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
80 
81 	rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
82 	rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
83 	rc_sq_wqe->rkey = cpu_to_le32(wr->key);
84 	rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
85 
86 	fseg->pbl_size = cpu_to_le32(mr->pbl_size);
87 	roce_set_field(fseg->mode_buf_pg_sz,
88 		       V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
89 		       V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
90 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
91 	roce_set_bit(fseg->mode_buf_pg_sz,
92 		     V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
93 }
94 
95 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
96 			   const struct ib_atomic_wr *wr)
97 {
98 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
99 		aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
100 		aseg->cmp_data  = cpu_to_le64(wr->compare_add);
101 	} else {
102 		aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
103 		aseg->cmp_data  = 0;
104 	}
105 }
106 
107 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
108 			   unsigned int *sge_ind, int valid_num_sge)
109 {
110 	struct hns_roce_v2_wqe_data_seg *dseg;
111 	struct ib_sge *sg;
112 	int num_in_wqe = 0;
113 	int extend_sge_num;
114 	int fi_sge_num;
115 	int se_sge_num;
116 	int shift;
117 	int i;
118 
119 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
120 		num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
121 	extend_sge_num = valid_num_sge - num_in_wqe;
122 	sg = wr->sg_list + num_in_wqe;
123 	shift = qp->hr_buf.page_shift;
124 
125 	/*
126 	 * Check whether wr->num_sge sges are in the same page. If not, we
127 	 * should calculate how many sges in the first page and the second
128 	 * page.
129 	 */
130 	dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
131 	fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
132 		      (uintptr_t)dseg) /
133 		      sizeof(struct hns_roce_v2_wqe_data_seg);
134 	if (extend_sge_num > fi_sge_num) {
135 		se_sge_num = extend_sge_num - fi_sge_num;
136 		for (i = 0; i < fi_sge_num; i++) {
137 			set_data_seg_v2(dseg++, sg + i);
138 			(*sge_ind)++;
139 		}
140 		dseg = get_send_extend_sge(qp,
141 					   (*sge_ind) & (qp->sge.sge_cnt - 1));
142 		for (i = 0; i < se_sge_num; i++) {
143 			set_data_seg_v2(dseg++, sg + fi_sge_num + i);
144 			(*sge_ind)++;
145 		}
146 	} else {
147 		for (i = 0; i < extend_sge_num; i++) {
148 			set_data_seg_v2(dseg++, sg + i);
149 			(*sge_ind)++;
150 		}
151 	}
152 }
153 
154 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
155 			     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
156 			     void *wqe, unsigned int *sge_ind,
157 			     int valid_num_sge,
158 			     const struct ib_send_wr **bad_wr)
159 {
160 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
161 	struct hns_roce_v2_wqe_data_seg *dseg = wqe;
162 	struct hns_roce_qp *qp = to_hr_qp(ibqp);
163 	int j = 0;
164 	int i;
165 
166 	if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
167 		if (le32_to_cpu(rc_sq_wqe->msg_len) >
168 		    hr_dev->caps.max_sq_inline) {
169 			*bad_wr = wr;
170 			dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
171 				rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
172 			return -EINVAL;
173 		}
174 
175 		if (wr->opcode == IB_WR_RDMA_READ) {
176 			*bad_wr =  wr;
177 			dev_err(hr_dev->dev, "Not support inline data!\n");
178 			return -EINVAL;
179 		}
180 
181 		for (i = 0; i < wr->num_sge; i++) {
182 			memcpy(wqe, ((void *)wr->sg_list[i].addr),
183 			       wr->sg_list[i].length);
184 			wqe += wr->sg_list[i].length;
185 		}
186 
187 		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
188 			     1);
189 	} else {
190 		if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
191 			for (i = 0; i < wr->num_sge; i++) {
192 				if (likely(wr->sg_list[i].length)) {
193 					set_data_seg_v2(dseg, wr->sg_list + i);
194 					dseg++;
195 				}
196 			}
197 		} else {
198 			roce_set_field(rc_sq_wqe->byte_20,
199 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
200 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
201 				     (*sge_ind) & (qp->sge.sge_cnt - 1));
202 
203 			for (i = 0; i < wr->num_sge &&
204 			     j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
205 				if (likely(wr->sg_list[i].length)) {
206 					set_data_seg_v2(dseg, wr->sg_list + i);
207 					dseg++;
208 					j++;
209 				}
210 			}
211 
212 			set_extend_sge(qp, wr, sge_ind, valid_num_sge);
213 		}
214 
215 		roce_set_field(rc_sq_wqe->byte_16,
216 			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
217 			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
218 	}
219 
220 	return 0;
221 }
222 
223 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
224 				 const struct ib_qp_attr *attr,
225 				 int attr_mask, enum ib_qp_state cur_state,
226 				 enum ib_qp_state new_state);
227 
228 static int check_send_valid(struct hns_roce_dev *hr_dev,
229 			    struct hns_roce_qp *hr_qp)
230 {
231 	struct ib_qp *ibqp = &hr_qp->ibqp;
232 	struct device *dev = hr_dev->dev;
233 
234 	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
235 		     ibqp->qp_type != IB_QPT_GSI &&
236 		     ibqp->qp_type != IB_QPT_UD)) {
237 		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
238 		return -EOPNOTSUPP;
239 	} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
240 		   hr_qp->state == IB_QPS_INIT ||
241 		   hr_qp->state == IB_QPS_RTR)) {
242 		dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
243 		return -EINVAL;
244 	} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
245 		dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
246 		return -EIO;
247 	}
248 
249 	return 0;
250 }
251 
252 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
253 				 const struct ib_send_wr *wr,
254 				 const struct ib_send_wr **bad_wr)
255 {
256 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
257 	struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
258 	struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
259 	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
260 	struct hns_roce_qp *qp = to_hr_qp(ibqp);
261 	struct hns_roce_wqe_frmr_seg *fseg;
262 	struct device *dev = hr_dev->dev;
263 	struct hns_roce_v2_db sq_db;
264 	struct ib_qp_attr attr;
265 	unsigned int owner_bit;
266 	unsigned int sge_idx;
267 	unsigned int wqe_idx;
268 	unsigned long flags;
269 	int valid_num_sge;
270 	void *wqe = NULL;
271 	bool loopback;
272 	int attr_mask;
273 	u32 tmp_len;
274 	u32 hr_op;
275 	u8 *smac;
276 	int nreq;
277 	int ret;
278 	int i;
279 
280 	spin_lock_irqsave(&qp->sq.lock, flags);
281 
282 	ret = check_send_valid(hr_dev, qp);
283 	if (ret) {
284 		*bad_wr = wr;
285 		nreq = 0;
286 		goto out;
287 	}
288 
289 	sge_idx = qp->next_sge;
290 
291 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
292 		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
293 			ret = -ENOMEM;
294 			*bad_wr = wr;
295 			goto out;
296 		}
297 
298 		wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
299 
300 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
301 			dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
302 				wr->num_sge, qp->sq.max_gs);
303 			ret = -EINVAL;
304 			*bad_wr = wr;
305 			goto out;
306 		}
307 
308 		wqe = get_send_wqe(qp, wqe_idx);
309 		qp->sq.wrid[wqe_idx] = wr->wr_id;
310 		owner_bit =
311 		       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
312 		valid_num_sge = 0;
313 		tmp_len = 0;
314 
315 		for (i = 0; i < wr->num_sge; i++) {
316 			if (likely(wr->sg_list[i].length)) {
317 				tmp_len += wr->sg_list[i].length;
318 				valid_num_sge++;
319 			}
320 		}
321 
322 		/* Corresponding to the QP type, wqe process separately */
323 		if (ibqp->qp_type == IB_QPT_GSI) {
324 			ud_sq_wqe = wqe;
325 			memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
326 
327 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
328 				       V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
329 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
330 				       V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
331 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
332 				       V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
333 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
334 				       V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
335 			roce_set_field(ud_sq_wqe->byte_48,
336 				       V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
337 				       V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
338 				       ah->av.mac[4]);
339 			roce_set_field(ud_sq_wqe->byte_48,
340 				       V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
341 				       V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
342 				       ah->av.mac[5]);
343 
344 			/* MAC loopback */
345 			smac = (u8 *)hr_dev->dev_addr[qp->port];
346 			loopback = ether_addr_equal_unaligned(ah->av.mac,
347 							      smac) ? 1 : 0;
348 
349 			roce_set_bit(ud_sq_wqe->byte_40,
350 				     V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
351 
352 			roce_set_field(ud_sq_wqe->byte_4,
353 				       V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
354 				       V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
355 				       HNS_ROCE_V2_WQE_OP_SEND);
356 
357 			ud_sq_wqe->msg_len =
358 			 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
359 
360 			switch (wr->opcode) {
361 			case IB_WR_SEND_WITH_IMM:
362 			case IB_WR_RDMA_WRITE_WITH_IMM:
363 				ud_sq_wqe->immtdata =
364 				      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
365 				break;
366 			default:
367 				ud_sq_wqe->immtdata = 0;
368 				break;
369 			}
370 
371 			/* Set sig attr */
372 			roce_set_bit(ud_sq_wqe->byte_4,
373 				   V2_UD_SEND_WQE_BYTE_4_CQE_S,
374 				   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
375 
376 			/* Set se attr */
377 			roce_set_bit(ud_sq_wqe->byte_4,
378 				  V2_UD_SEND_WQE_BYTE_4_SE_S,
379 				  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
380 
381 			roce_set_bit(ud_sq_wqe->byte_4,
382 				     V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
383 
384 			roce_set_field(ud_sq_wqe->byte_16,
385 				       V2_UD_SEND_WQE_BYTE_16_PD_M,
386 				       V2_UD_SEND_WQE_BYTE_16_PD_S,
387 				       to_hr_pd(ibqp->pd)->pdn);
388 
389 			roce_set_field(ud_sq_wqe->byte_16,
390 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
391 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
392 				       valid_num_sge);
393 
394 			roce_set_field(ud_sq_wqe->byte_20,
395 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
396 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
397 				     sge_idx & (qp->sge.sge_cnt - 1));
398 
399 			roce_set_field(ud_sq_wqe->byte_24,
400 				       V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
401 				       V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
402 			ud_sq_wqe->qkey =
403 			     cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
404 			     qp->qkey : ud_wr(wr)->remote_qkey);
405 			roce_set_field(ud_sq_wqe->byte_32,
406 				       V2_UD_SEND_WQE_BYTE_32_DQPN_M,
407 				       V2_UD_SEND_WQE_BYTE_32_DQPN_S,
408 				       ud_wr(wr)->remote_qpn);
409 
410 			roce_set_field(ud_sq_wqe->byte_36,
411 				       V2_UD_SEND_WQE_BYTE_36_VLAN_M,
412 				       V2_UD_SEND_WQE_BYTE_36_VLAN_S,
413 				       ah->av.vlan_id);
414 			roce_set_field(ud_sq_wqe->byte_36,
415 				       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
416 				       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
417 				       ah->av.hop_limit);
418 			roce_set_field(ud_sq_wqe->byte_36,
419 				       V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
420 				       V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
421 				       ah->av.tclass);
422 			roce_set_field(ud_sq_wqe->byte_40,
423 				       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
424 				       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
425 				       ah->av.flowlabel);
426 			roce_set_field(ud_sq_wqe->byte_40,
427 				       V2_UD_SEND_WQE_BYTE_40_SL_M,
428 				       V2_UD_SEND_WQE_BYTE_40_SL_S,
429 				       ah->av.sl);
430 			roce_set_field(ud_sq_wqe->byte_40,
431 				       V2_UD_SEND_WQE_BYTE_40_PORTN_M,
432 				       V2_UD_SEND_WQE_BYTE_40_PORTN_S,
433 				       qp->port);
434 
435 			roce_set_bit(ud_sq_wqe->byte_40,
436 				     V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
437 				     ah->av.vlan_en ? 1 : 0);
438 			roce_set_field(ud_sq_wqe->byte_48,
439 				       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
440 				       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
441 				       hns_get_gid_index(hr_dev, qp->phy_port,
442 							 ah->av.gid_index));
443 
444 			memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
445 			       GID_LEN_V2);
446 
447 			set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
448 		} else if (ibqp->qp_type == IB_QPT_RC) {
449 			rc_sq_wqe = wqe;
450 			memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
451 
452 			rc_sq_wqe->msg_len =
453 			 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
454 
455 			switch (wr->opcode) {
456 			case IB_WR_SEND_WITH_IMM:
457 			case IB_WR_RDMA_WRITE_WITH_IMM:
458 				rc_sq_wqe->immtdata =
459 				      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
460 				break;
461 			case IB_WR_SEND_WITH_INV:
462 				rc_sq_wqe->inv_key =
463 					cpu_to_le32(wr->ex.invalidate_rkey);
464 				break;
465 			default:
466 				rc_sq_wqe->immtdata = 0;
467 				break;
468 			}
469 
470 			roce_set_bit(rc_sq_wqe->byte_4,
471 				     V2_RC_SEND_WQE_BYTE_4_FENCE_S,
472 				     (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
473 
474 			roce_set_bit(rc_sq_wqe->byte_4,
475 				  V2_RC_SEND_WQE_BYTE_4_SE_S,
476 				  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
477 
478 			roce_set_bit(rc_sq_wqe->byte_4,
479 				   V2_RC_SEND_WQE_BYTE_4_CQE_S,
480 				   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
481 
482 			roce_set_bit(rc_sq_wqe->byte_4,
483 				     V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
484 
485 			wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
486 			switch (wr->opcode) {
487 			case IB_WR_RDMA_READ:
488 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
489 				rc_sq_wqe->rkey =
490 					cpu_to_le32(rdma_wr(wr)->rkey);
491 				rc_sq_wqe->va =
492 					cpu_to_le64(rdma_wr(wr)->remote_addr);
493 				break;
494 			case IB_WR_RDMA_WRITE:
495 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
496 				rc_sq_wqe->rkey =
497 					cpu_to_le32(rdma_wr(wr)->rkey);
498 				rc_sq_wqe->va =
499 					cpu_to_le64(rdma_wr(wr)->remote_addr);
500 				break;
501 			case IB_WR_RDMA_WRITE_WITH_IMM:
502 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
503 				rc_sq_wqe->rkey =
504 					cpu_to_le32(rdma_wr(wr)->rkey);
505 				rc_sq_wqe->va =
506 					cpu_to_le64(rdma_wr(wr)->remote_addr);
507 				break;
508 			case IB_WR_SEND:
509 				hr_op = HNS_ROCE_V2_WQE_OP_SEND;
510 				break;
511 			case IB_WR_SEND_WITH_INV:
512 				hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
513 				break;
514 			case IB_WR_SEND_WITH_IMM:
515 				hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
516 				break;
517 			case IB_WR_LOCAL_INV:
518 				hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
519 				roce_set_bit(rc_sq_wqe->byte_4,
520 					       V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
521 				rc_sq_wqe->inv_key =
522 					    cpu_to_le32(wr->ex.invalidate_rkey);
523 				break;
524 			case IB_WR_REG_MR:
525 				hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
526 				fseg = wqe;
527 				set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
528 				break;
529 			case IB_WR_ATOMIC_CMP_AND_SWP:
530 				hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
531 				rc_sq_wqe->rkey =
532 					cpu_to_le32(atomic_wr(wr)->rkey);
533 				rc_sq_wqe->va =
534 					cpu_to_le64(atomic_wr(wr)->remote_addr);
535 				break;
536 			case IB_WR_ATOMIC_FETCH_AND_ADD:
537 				hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
538 				rc_sq_wqe->rkey =
539 					cpu_to_le32(atomic_wr(wr)->rkey);
540 				rc_sq_wqe->va =
541 					cpu_to_le64(atomic_wr(wr)->remote_addr);
542 				break;
543 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
544 				hr_op =
545 				       HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
546 				break;
547 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
548 				hr_op =
549 				      HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
550 				break;
551 			default:
552 				hr_op = HNS_ROCE_V2_WQE_OP_MASK;
553 				break;
554 			}
555 
556 			roce_set_field(rc_sq_wqe->byte_4,
557 				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
558 				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
559 
560 			if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
561 			    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
562 				struct hns_roce_v2_wqe_data_seg *dseg;
563 
564 				dseg = wqe;
565 				set_data_seg_v2(dseg, wr->sg_list);
566 				wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
567 				set_atomic_seg(wqe, atomic_wr(wr));
568 				roce_set_field(rc_sq_wqe->byte_16,
569 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
570 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
571 					       valid_num_sge);
572 			} else if (wr->opcode != IB_WR_REG_MR) {
573 				ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
574 							wqe, &sge_idx,
575 							valid_num_sge, bad_wr);
576 				if (ret)
577 					goto out;
578 			}
579 		} else {
580 			dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
581 			spin_unlock_irqrestore(&qp->sq.lock, flags);
582 			*bad_wr = wr;
583 			return -EOPNOTSUPP;
584 		}
585 	}
586 
587 out:
588 	if (likely(nreq)) {
589 		qp->sq.head += nreq;
590 		/* Memory barrier */
591 		wmb();
592 
593 		sq_db.byte_4 = 0;
594 		sq_db.parameter = 0;
595 
596 		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
597 			       V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
598 		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
599 			       V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
600 		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
601 			       V2_DB_PARAMETER_IDX_S,
602 			       qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
603 		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
604 			       V2_DB_PARAMETER_SL_S, qp->sl);
605 
606 		hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
607 
608 		qp->next_sge = sge_idx;
609 
610 		if (qp->state == IB_QPS_ERR) {
611 			attr_mask = IB_QP_STATE;
612 			attr.qp_state = IB_QPS_ERR;
613 
614 			ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
615 						    qp->state, IB_QPS_ERR);
616 			if (ret) {
617 				spin_unlock_irqrestore(&qp->sq.lock, flags);
618 				*bad_wr = wr;
619 				return ret;
620 			}
621 		}
622 	}
623 
624 	spin_unlock_irqrestore(&qp->sq.lock, flags);
625 
626 	return ret;
627 }
628 
629 static int check_recv_valid(struct hns_roce_dev *hr_dev,
630 			    struct hns_roce_qp *hr_qp)
631 {
632 	if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
633 		return -EIO;
634 	else if (hr_qp->state == IB_QPS_RESET)
635 		return -EINVAL;
636 
637 	return 0;
638 }
639 
640 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
641 				 const struct ib_recv_wr *wr,
642 				 const struct ib_recv_wr **bad_wr)
643 {
644 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
645 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
646 	struct hns_roce_v2_wqe_data_seg *dseg;
647 	struct hns_roce_rinl_sge *sge_list;
648 	struct device *dev = hr_dev->dev;
649 	struct ib_qp_attr attr;
650 	unsigned long flags;
651 	void *wqe = NULL;
652 	int attr_mask;
653 	u32 wqe_idx;
654 	int nreq;
655 	int ret;
656 	int i;
657 
658 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
659 
660 	ret = check_recv_valid(hr_dev, hr_qp);
661 	if (ret) {
662 		*bad_wr = wr;
663 		nreq = 0;
664 		goto out;
665 	}
666 
667 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
668 		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
669 			hr_qp->ibqp.recv_cq)) {
670 			ret = -ENOMEM;
671 			*bad_wr = wr;
672 			goto out;
673 		}
674 
675 		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
676 
677 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
678 			dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
679 				wr->num_sge, hr_qp->rq.max_gs);
680 			ret = -EINVAL;
681 			*bad_wr = wr;
682 			goto out;
683 		}
684 
685 		wqe = get_recv_wqe(hr_qp, wqe_idx);
686 		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
687 		for (i = 0; i < wr->num_sge; i++) {
688 			if (!wr->sg_list[i].length)
689 				continue;
690 			set_data_seg_v2(dseg, wr->sg_list + i);
691 			dseg++;
692 		}
693 
694 		if (i < hr_qp->rq.max_gs) {
695 			dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
696 			dseg->addr = 0;
697 		}
698 
699 		/* rq support inline data */
700 		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
701 			sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
702 			hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
703 							       (u32)wr->num_sge;
704 			for (i = 0; i < wr->num_sge; i++) {
705 				sge_list[i].addr =
706 					       (void *)(u64)wr->sg_list[i].addr;
707 				sge_list[i].len = wr->sg_list[i].length;
708 			}
709 		}
710 
711 		hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
712 	}
713 
714 out:
715 	if (likely(nreq)) {
716 		hr_qp->rq.head += nreq;
717 		/* Memory barrier */
718 		wmb();
719 
720 		*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
721 
722 		if (hr_qp->state == IB_QPS_ERR) {
723 			attr_mask = IB_QP_STATE;
724 			attr.qp_state = IB_QPS_ERR;
725 
726 			ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
727 						    attr_mask, hr_qp->state,
728 						    IB_QPS_ERR);
729 			if (ret) {
730 				spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
731 				*bad_wr = wr;
732 				return ret;
733 			}
734 		}
735 	}
736 	spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
737 
738 	return ret;
739 }
740 
741 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
742 				      unsigned long instance_stage,
743 				      unsigned long reset_stage)
744 {
745 	/* When hardware reset has been completed once or more, we should stop
746 	 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
747 	 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
748 	 * stage of soft reset process, we should exit with error, and then
749 	 * HNAE3_INIT_CLIENT related process can rollback the operation like
750 	 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
751 	 * process will exit with error to notify NIC driver to reschedule soft
752 	 * reset process once again.
753 	 */
754 	hr_dev->is_reset = true;
755 	hr_dev->dis_db = true;
756 
757 	if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
758 	    instance_stage == HNS_ROCE_STATE_INIT)
759 		return CMD_RST_PRC_EBUSY;
760 
761 	return CMD_RST_PRC_SUCCESS;
762 }
763 
764 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
765 					unsigned long instance_stage,
766 					unsigned long reset_stage)
767 {
768 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
769 	struct hnae3_handle *handle = priv->handle;
770 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
771 
772 	/* When hardware reset is detected, we should stop sending mailbox&cmq&
773 	 * doorbell to hardware. If now in .init_instance() function, we should
774 	 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
775 	 * process, we should exit with error, and then HNAE3_INIT_CLIENT
776 	 * related process can rollback the operation like notifing hardware to
777 	 * free resources, HNAE3_INIT_CLIENT related process will exit with
778 	 * error to notify NIC driver to reschedule soft reset process once
779 	 * again.
780 	 */
781 	hr_dev->dis_db = true;
782 	if (!ops->get_hw_reset_stat(handle))
783 		hr_dev->is_reset = true;
784 
785 	if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
786 	    instance_stage == HNS_ROCE_STATE_INIT)
787 		return CMD_RST_PRC_EBUSY;
788 
789 	return CMD_RST_PRC_SUCCESS;
790 }
791 
792 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
793 {
794 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
795 	struct hnae3_handle *handle = priv->handle;
796 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
797 
798 	/* When software reset is detected at .init_instance() function, we
799 	 * should stop sending mailbox&cmq&doorbell to hardware, and exit
800 	 * with error.
801 	 */
802 	hr_dev->dis_db = true;
803 	if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
804 		hr_dev->is_reset = true;
805 
806 	return CMD_RST_PRC_EBUSY;
807 }
808 
809 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
810 {
811 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
812 	struct hnae3_handle *handle = priv->handle;
813 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
814 	unsigned long instance_stage;	/* the current instance stage */
815 	unsigned long reset_stage;	/* the current reset stage */
816 	unsigned long reset_cnt;
817 	bool sw_resetting;
818 	bool hw_resetting;
819 
820 	if (hr_dev->is_reset)
821 		return CMD_RST_PRC_SUCCESS;
822 
823 	/* Get information about reset from NIC driver or RoCE driver itself,
824 	 * the meaning of the following variables from NIC driver are described
825 	 * as below:
826 	 * reset_cnt -- The count value of completed hardware reset.
827 	 * hw_resetting -- Whether hardware device is resetting now.
828 	 * sw_resetting -- Whether NIC's software reset process is running now.
829 	 */
830 	instance_stage = handle->rinfo.instance_state;
831 	reset_stage = handle->rinfo.reset_state;
832 	reset_cnt = ops->ae_dev_reset_cnt(handle);
833 	hw_resetting = ops->get_hw_reset_stat(handle);
834 	sw_resetting = ops->ae_dev_resetting(handle);
835 
836 	if (reset_cnt != hr_dev->reset_cnt)
837 		return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
838 						  reset_stage);
839 	else if (hw_resetting)
840 		return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
841 						    reset_stage);
842 	else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
843 		return hns_roce_v2_cmd_sw_resetting(hr_dev);
844 
845 	return 0;
846 }
847 
848 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
849 {
850 	int ntu = ring->next_to_use;
851 	int ntc = ring->next_to_clean;
852 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
853 
854 	return ring->desc_num - used - 1;
855 }
856 
857 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
858 				   struct hns_roce_v2_cmq_ring *ring)
859 {
860 	int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
861 
862 	ring->desc = kzalloc(size, GFP_KERNEL);
863 	if (!ring->desc)
864 		return -ENOMEM;
865 
866 	ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
867 					     DMA_BIDIRECTIONAL);
868 	if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
869 		ring->desc_dma_addr = 0;
870 		kfree(ring->desc);
871 		ring->desc = NULL;
872 		return -ENOMEM;
873 	}
874 
875 	return 0;
876 }
877 
878 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
879 				   struct hns_roce_v2_cmq_ring *ring)
880 {
881 	dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
882 			 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
883 			 DMA_BIDIRECTIONAL);
884 
885 	ring->desc_dma_addr = 0;
886 	kfree(ring->desc);
887 }
888 
889 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
890 {
891 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
892 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
893 					    &priv->cmq.csq : &priv->cmq.crq;
894 
895 	ring->flag = ring_type;
896 	ring->next_to_clean = 0;
897 	ring->next_to_use = 0;
898 
899 	return hns_roce_alloc_cmq_desc(hr_dev, ring);
900 }
901 
902 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
903 {
904 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
905 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
906 					    &priv->cmq.csq : &priv->cmq.crq;
907 	dma_addr_t dma = ring->desc_dma_addr;
908 
909 	if (ring_type == TYPE_CSQ) {
910 		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
911 		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
912 			   upper_32_bits(dma));
913 		roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
914 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
915 		roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
916 		roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
917 	} else {
918 		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
919 		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
920 			   upper_32_bits(dma));
921 		roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
922 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
923 		roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
924 		roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
925 	}
926 }
927 
928 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
929 {
930 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
931 	int ret;
932 
933 	/* Setup the queue entries for command queue */
934 	priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
935 	priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
936 
937 	/* Setup the lock for command queue */
938 	spin_lock_init(&priv->cmq.csq.lock);
939 	spin_lock_init(&priv->cmq.crq.lock);
940 
941 	/* Setup Tx write back timeout */
942 	priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
943 
944 	/* Init CSQ */
945 	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
946 	if (ret) {
947 		dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
948 		return ret;
949 	}
950 
951 	/* Init CRQ */
952 	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
953 	if (ret) {
954 		dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
955 		goto err_crq;
956 	}
957 
958 	/* Init CSQ REG */
959 	hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
960 
961 	/* Init CRQ REG */
962 	hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
963 
964 	return 0;
965 
966 err_crq:
967 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
968 
969 	return ret;
970 }
971 
972 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
973 {
974 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
975 
976 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
977 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
978 }
979 
980 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
981 					  enum hns_roce_opcode_type opcode,
982 					  bool is_read)
983 {
984 	memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
985 	desc->opcode = cpu_to_le16(opcode);
986 	desc->flag =
987 		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
988 	if (is_read)
989 		desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
990 	else
991 		desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
992 }
993 
994 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
995 {
996 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
997 	u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
998 
999 	return head == priv->cmq.csq.next_to_use;
1000 }
1001 
1002 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
1003 {
1004 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1005 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1006 	struct hns_roce_cmq_desc *desc;
1007 	u16 ntc = csq->next_to_clean;
1008 	u32 head;
1009 	int clean = 0;
1010 
1011 	desc = &csq->desc[ntc];
1012 	head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1013 	while (head != ntc) {
1014 		memset(desc, 0, sizeof(*desc));
1015 		ntc++;
1016 		if (ntc == csq->desc_num)
1017 			ntc = 0;
1018 		desc = &csq->desc[ntc];
1019 		clean++;
1020 	}
1021 	csq->next_to_clean = ntc;
1022 
1023 	return clean;
1024 }
1025 
1026 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1027 			       struct hns_roce_cmq_desc *desc, int num)
1028 {
1029 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1030 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1031 	struct hns_roce_cmq_desc *desc_to_use;
1032 	bool complete = false;
1033 	u32 timeout = 0;
1034 	int handle = 0;
1035 	u16 desc_ret;
1036 	int ret = 0;
1037 	int ntc;
1038 
1039 	spin_lock_bh(&csq->lock);
1040 
1041 	if (num > hns_roce_cmq_space(csq)) {
1042 		spin_unlock_bh(&csq->lock);
1043 		return -EBUSY;
1044 	}
1045 
1046 	/*
1047 	 * Record the location of desc in the cmq for this time
1048 	 * which will be use for hardware to write back
1049 	 */
1050 	ntc = csq->next_to_use;
1051 
1052 	while (handle < num) {
1053 		desc_to_use = &csq->desc[csq->next_to_use];
1054 		*desc_to_use = desc[handle];
1055 		dev_dbg(hr_dev->dev, "set cmq desc:\n");
1056 		csq->next_to_use++;
1057 		if (csq->next_to_use == csq->desc_num)
1058 			csq->next_to_use = 0;
1059 		handle++;
1060 	}
1061 
1062 	/* Write to hardware */
1063 	roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1064 
1065 	/*
1066 	 * If the command is sync, wait for the firmware to write back,
1067 	 * if multi descriptors to be sent, use the first one to check
1068 	 */
1069 	if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1070 		do {
1071 			if (hns_roce_cmq_csq_done(hr_dev))
1072 				break;
1073 			udelay(1);
1074 			timeout++;
1075 		} while (timeout < priv->cmq.tx_timeout);
1076 	}
1077 
1078 	if (hns_roce_cmq_csq_done(hr_dev)) {
1079 		complete = true;
1080 		handle = 0;
1081 		while (handle < num) {
1082 			/* get the result of hardware write back */
1083 			desc_to_use = &csq->desc[ntc];
1084 			desc[handle] = *desc_to_use;
1085 			dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1086 			desc_ret = le16_to_cpu(desc[handle].retval);
1087 			if (desc_ret == CMD_EXEC_SUCCESS)
1088 				ret = 0;
1089 			else
1090 				ret = -EIO;
1091 			priv->cmq.last_status = desc_ret;
1092 			ntc++;
1093 			handle++;
1094 			if (ntc == csq->desc_num)
1095 				ntc = 0;
1096 		}
1097 	}
1098 
1099 	if (!complete)
1100 		ret = -EAGAIN;
1101 
1102 	/* clean the command send queue */
1103 	handle = hns_roce_cmq_csq_clean(hr_dev);
1104 	if (handle != num)
1105 		dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1106 			 handle, num);
1107 
1108 	spin_unlock_bh(&csq->lock);
1109 
1110 	return ret;
1111 }
1112 
1113 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1114 			     struct hns_roce_cmq_desc *desc, int num)
1115 {
1116 	int retval;
1117 	int ret;
1118 
1119 	ret = hns_roce_v2_rst_process_cmd(hr_dev);
1120 	if (ret == CMD_RST_PRC_SUCCESS)
1121 		return 0;
1122 	if (ret == CMD_RST_PRC_EBUSY)
1123 		return -EBUSY;
1124 
1125 	ret = __hns_roce_cmq_send(hr_dev, desc, num);
1126 	if (ret) {
1127 		retval = hns_roce_v2_rst_process_cmd(hr_dev);
1128 		if (retval == CMD_RST_PRC_SUCCESS)
1129 			return 0;
1130 		else if (retval == CMD_RST_PRC_EBUSY)
1131 			return -EBUSY;
1132 	}
1133 
1134 	return ret;
1135 }
1136 
1137 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1138 {
1139 	struct hns_roce_query_version *resp;
1140 	struct hns_roce_cmq_desc desc;
1141 	int ret;
1142 
1143 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1144 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1145 	if (ret)
1146 		return ret;
1147 
1148 	resp = (struct hns_roce_query_version *)desc.data;
1149 	hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1150 	hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1151 
1152 	return 0;
1153 }
1154 
1155 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1156 {
1157 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1158 	struct hnae3_handle *handle = priv->handle;
1159 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1160 	unsigned long reset_cnt;
1161 	bool sw_resetting;
1162 	bool hw_resetting;
1163 
1164 	reset_cnt = ops->ae_dev_reset_cnt(handle);
1165 	hw_resetting = ops->get_hw_reset_stat(handle);
1166 	sw_resetting = ops->ae_dev_resetting(handle);
1167 
1168 	if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1169 		return true;
1170 
1171 	return false;
1172 }
1173 
1174 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1175 				      int flag)
1176 {
1177 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1178 	struct hnae3_handle *handle = priv->handle;
1179 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1180 	unsigned long instance_stage;
1181 	unsigned long reset_cnt;
1182 	unsigned long end;
1183 	bool sw_resetting;
1184 	bool hw_resetting;
1185 
1186 	instance_stage = handle->rinfo.instance_state;
1187 	reset_cnt = ops->ae_dev_reset_cnt(handle);
1188 	hw_resetting = ops->get_hw_reset_stat(handle);
1189 	sw_resetting = ops->ae_dev_resetting(handle);
1190 
1191 	if (reset_cnt != hr_dev->reset_cnt) {
1192 		hr_dev->dis_db = true;
1193 		hr_dev->is_reset = true;
1194 		dev_info(hr_dev->dev, "Func clear success after reset.\n");
1195 	} else if (hw_resetting) {
1196 		hr_dev->dis_db = true;
1197 
1198 		dev_warn(hr_dev->dev,
1199 			 "Func clear is pending, device in resetting state.\n");
1200 		end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1201 		while (end) {
1202 			if (!ops->get_hw_reset_stat(handle)) {
1203 				hr_dev->is_reset = true;
1204 				dev_info(hr_dev->dev,
1205 					 "Func clear success after reset.\n");
1206 				return;
1207 			}
1208 			msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1209 			end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1210 		}
1211 
1212 		dev_warn(hr_dev->dev, "Func clear failed.\n");
1213 	} else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1214 		hr_dev->dis_db = true;
1215 
1216 		dev_warn(hr_dev->dev,
1217 			 "Func clear is pending, device in resetting state.\n");
1218 		end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1219 		while (end) {
1220 			if (ops->ae_dev_reset_cnt(handle) !=
1221 			    hr_dev->reset_cnt) {
1222 				hr_dev->is_reset = true;
1223 				dev_info(hr_dev->dev,
1224 					 "Func clear success after sw reset\n");
1225 				return;
1226 			}
1227 			msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1228 			end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1229 		}
1230 
1231 		dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1232 	} else {
1233 		if (retval && !flag)
1234 			dev_warn(hr_dev->dev,
1235 				 "Func clear read failed, ret = %d.\n", retval);
1236 
1237 		dev_warn(hr_dev->dev, "Func clear failed.\n");
1238 	}
1239 }
1240 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1241 {
1242 	bool fclr_write_fail_flag = false;
1243 	struct hns_roce_func_clear *resp;
1244 	struct hns_roce_cmq_desc desc;
1245 	unsigned long end;
1246 	int ret = 0;
1247 
1248 	if (hns_roce_func_clr_chk_rst(hr_dev))
1249 		goto out;
1250 
1251 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1252 	resp = (struct hns_roce_func_clear *)desc.data;
1253 
1254 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1255 	if (ret) {
1256 		fclr_write_fail_flag = true;
1257 		dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1258 			 ret);
1259 		goto out;
1260 	}
1261 
1262 	msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1263 	end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1264 	while (end) {
1265 		if (hns_roce_func_clr_chk_rst(hr_dev))
1266 			goto out;
1267 		msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1268 		end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1269 
1270 		hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1271 					      true);
1272 
1273 		ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1274 		if (ret)
1275 			continue;
1276 
1277 		if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1278 			hr_dev->is_reset = true;
1279 			return;
1280 		}
1281 	}
1282 
1283 out:
1284 	hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1285 }
1286 
1287 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1288 {
1289 	struct hns_roce_query_fw_info *resp;
1290 	struct hns_roce_cmq_desc desc;
1291 	int ret;
1292 
1293 	hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1294 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1295 	if (ret)
1296 		return ret;
1297 
1298 	resp = (struct hns_roce_query_fw_info *)desc.data;
1299 	hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1300 
1301 	return 0;
1302 }
1303 
1304 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1305 {
1306 	struct hns_roce_cfg_global_param *req;
1307 	struct hns_roce_cmq_desc desc;
1308 
1309 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1310 				      false);
1311 
1312 	req = (struct hns_roce_cfg_global_param *)desc.data;
1313 	memset(req, 0, sizeof(*req));
1314 	roce_set_field(req->time_cfg_udp_port,
1315 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1316 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1317 	roce_set_field(req->time_cfg_udp_port,
1318 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1319 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1320 
1321 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1322 }
1323 
1324 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1325 {
1326 	struct hns_roce_cmq_desc desc[2];
1327 	struct hns_roce_pf_res_a *req_a;
1328 	struct hns_roce_pf_res_b *req_b;
1329 	int ret;
1330 	int i;
1331 
1332 	for (i = 0; i < 2; i++) {
1333 		hns_roce_cmq_setup_basic_desc(&desc[i],
1334 					      HNS_ROCE_OPC_QUERY_PF_RES, true);
1335 
1336 		if (i == 0)
1337 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1338 		else
1339 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1340 	}
1341 
1342 	ret = hns_roce_cmq_send(hr_dev, desc, 2);
1343 	if (ret)
1344 		return ret;
1345 
1346 	req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1347 	req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1348 
1349 	hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1350 						 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1351 						 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1352 	hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1353 						PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1354 						PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1355 	hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1356 						 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1357 						 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1358 	hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1359 						 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1360 						 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1361 
1362 	hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1363 					     PF_RES_DATA_3_PF_SL_NUM_M,
1364 					     PF_RES_DATA_3_PF_SL_NUM_S);
1365 	hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1366 					     PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1367 					     PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1368 
1369 	return 0;
1370 }
1371 
1372 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1373 {
1374 	struct hns_roce_pf_timer_res_a *req_a;
1375 	struct hns_roce_cmq_desc desc[2];
1376 	int ret, i;
1377 
1378 	for (i = 0; i < 2; i++) {
1379 		hns_roce_cmq_setup_basic_desc(&desc[i],
1380 					      HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1381 					      true);
1382 
1383 		if (i == 0)
1384 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1385 		else
1386 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1387 	}
1388 
1389 	ret = hns_roce_cmq_send(hr_dev, desc, 2);
1390 	if (ret)
1391 		return ret;
1392 
1393 	req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1394 
1395 	hr_dev->caps.qpc_timer_bt_num =
1396 				roce_get_field(req_a->qpc_timer_bt_idx_num,
1397 					PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1398 					PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1399 	hr_dev->caps.cqc_timer_bt_num =
1400 				roce_get_field(req_a->cqc_timer_bt_idx_num,
1401 					PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1402 					PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1403 
1404 	return 0;
1405 }
1406 
1407 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
1408 {
1409 	struct hns_roce_cmq_desc desc;
1410 	struct hns_roce_vf_switch *swt;
1411 	int ret;
1412 
1413 	swt = (struct hns_roce_vf_switch *)desc.data;
1414 	hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1415 	swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1416 	roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1417 		       VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1418 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1419 	if (ret)
1420 		return ret;
1421 
1422 	desc.flag =
1423 		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1424 	desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1425 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1426 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1427 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1428 
1429 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1430 }
1431 
1432 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1433 {
1434 	struct hns_roce_cmq_desc desc[2];
1435 	struct hns_roce_vf_res_a *req_a;
1436 	struct hns_roce_vf_res_b *req_b;
1437 	int i;
1438 
1439 	req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1440 	req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1441 	memset(req_a, 0, sizeof(*req_a));
1442 	memset(req_b, 0, sizeof(*req_b));
1443 	for (i = 0; i < 2; i++) {
1444 		hns_roce_cmq_setup_basic_desc(&desc[i],
1445 					      HNS_ROCE_OPC_ALLOC_VF_RES, false);
1446 
1447 		if (i == 0)
1448 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1449 		else
1450 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1451 
1452 		if (i == 0) {
1453 			roce_set_field(req_a->vf_qpc_bt_idx_num,
1454 				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1455 				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1456 			roce_set_field(req_a->vf_qpc_bt_idx_num,
1457 				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1458 				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1459 				       HNS_ROCE_VF_QPC_BT_NUM);
1460 
1461 			roce_set_field(req_a->vf_srqc_bt_idx_num,
1462 				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1463 				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1464 			roce_set_field(req_a->vf_srqc_bt_idx_num,
1465 				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1466 				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1467 				       HNS_ROCE_VF_SRQC_BT_NUM);
1468 
1469 			roce_set_field(req_a->vf_cqc_bt_idx_num,
1470 				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1471 				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1472 			roce_set_field(req_a->vf_cqc_bt_idx_num,
1473 				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1474 				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1475 				       HNS_ROCE_VF_CQC_BT_NUM);
1476 
1477 			roce_set_field(req_a->vf_mpt_bt_idx_num,
1478 				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1479 				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1480 			roce_set_field(req_a->vf_mpt_bt_idx_num,
1481 				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1482 				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1483 				       HNS_ROCE_VF_MPT_BT_NUM);
1484 
1485 			roce_set_field(req_a->vf_eqc_bt_idx_num,
1486 				       VF_RES_A_DATA_5_VF_EQC_IDX_M,
1487 				       VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1488 			roce_set_field(req_a->vf_eqc_bt_idx_num,
1489 				       VF_RES_A_DATA_5_VF_EQC_NUM_M,
1490 				       VF_RES_A_DATA_5_VF_EQC_NUM_S,
1491 				       HNS_ROCE_VF_EQC_NUM);
1492 		} else {
1493 			roce_set_field(req_b->vf_smac_idx_num,
1494 				       VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1495 				       VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1496 			roce_set_field(req_b->vf_smac_idx_num,
1497 				       VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1498 				       VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1499 				       HNS_ROCE_VF_SMAC_NUM);
1500 
1501 			roce_set_field(req_b->vf_sgid_idx_num,
1502 				       VF_RES_B_DATA_2_VF_SGID_IDX_M,
1503 				       VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1504 			roce_set_field(req_b->vf_sgid_idx_num,
1505 				       VF_RES_B_DATA_2_VF_SGID_NUM_M,
1506 				       VF_RES_B_DATA_2_VF_SGID_NUM_S,
1507 				       HNS_ROCE_VF_SGID_NUM);
1508 
1509 			roce_set_field(req_b->vf_qid_idx_sl_num,
1510 				       VF_RES_B_DATA_3_VF_QID_IDX_M,
1511 				       VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1512 			roce_set_field(req_b->vf_qid_idx_sl_num,
1513 				       VF_RES_B_DATA_3_VF_SL_NUM_M,
1514 				       VF_RES_B_DATA_3_VF_SL_NUM_S,
1515 				       HNS_ROCE_VF_SL_NUM);
1516 
1517 			roce_set_field(req_b->vf_sccc_idx_num,
1518 				       VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1519 				       VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1520 			roce_set_field(req_b->vf_sccc_idx_num,
1521 				       VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1522 				       VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1523 				       HNS_ROCE_VF_SCCC_BT_NUM);
1524 		}
1525 	}
1526 
1527 	return hns_roce_cmq_send(hr_dev, desc, 2);
1528 }
1529 
1530 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1531 {
1532 	u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1533 	u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1534 	u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1535 	u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1536 	u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1537 	struct hns_roce_cfg_bt_attr *req;
1538 	struct hns_roce_cmq_desc desc;
1539 
1540 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1541 	req = (struct hns_roce_cfg_bt_attr *)desc.data;
1542 	memset(req, 0, sizeof(*req));
1543 
1544 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1545 		       CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1546 		       hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1547 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1548 		       CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1549 		       hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1550 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1551 		       CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1552 		       qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1553 
1554 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1555 		       CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1556 		       hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1557 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1558 		       CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1559 		       hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1560 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1561 		       CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1562 		       srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1563 
1564 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1565 		       CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1566 		       hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1567 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1568 		       CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1569 		       hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1570 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1571 		       CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1572 		       cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1573 
1574 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1575 		       CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1576 		       hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1577 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1578 		       CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1579 		       hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1580 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1581 		       CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1582 		       mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1583 
1584 	roce_set_field(req->vf_sccc_cfg,
1585 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1586 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1587 		       hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1588 	roce_set_field(req->vf_sccc_cfg,
1589 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1590 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1591 		       hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1592 	roce_set_field(req->vf_sccc_cfg,
1593 		       CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1594 		       CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1595 		       sccc_hop_num ==
1596 			      HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1597 
1598 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1599 }
1600 
1601 static void set_default_caps(struct hns_roce_dev *hr_dev)
1602 {
1603 	struct hns_roce_caps *caps = &hr_dev->caps;
1604 
1605 	caps->num_qps		= HNS_ROCE_V2_MAX_QP_NUM;
1606 	caps->max_wqes		= HNS_ROCE_V2_MAX_WQE_NUM;
1607 	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
1608 	caps->num_srqs		= HNS_ROCE_V2_MAX_SRQ_NUM;
1609 	caps->min_cqes		= HNS_ROCE_MIN_CQE_NUM;
1610 	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
1611 	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1612 	caps->max_extend_sg	= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1613 	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1614 	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
1615 	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
1616 	caps->phy_num_uars	= HNS_ROCE_V2_PHY_UAR_NUM;
1617 	caps->num_aeq_vectors	= HNS_ROCE_V2_AEQE_VEC_NUM;
1618 	caps->num_comp_vectors	= HNS_ROCE_V2_COMP_VEC_NUM;
1619 	caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1620 	caps->num_mtpts		= HNS_ROCE_V2_MAX_MTPT_NUM;
1621 	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
1622 	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
1623 	caps->num_srqwqe_segs	= HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1624 	caps->num_idx_segs	= HNS_ROCE_V2_MAX_IDX_SEGS;
1625 	caps->num_pds		= HNS_ROCE_V2_MAX_PD_NUM;
1626 	caps->max_qp_init_rdma	= HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1627 	caps->max_qp_dest_rdma	= HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1628 	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1629 	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1630 	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1631 	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
1632 	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
1633 	caps->trrl_entry_sz	= HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1634 	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
1635 	caps->srqc_entry_sz	= HNS_ROCE_V2_SRQC_ENTRY_SZ;
1636 	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
1637 	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
1638 	caps->idx_entry_sz	= HNS_ROCE_V2_IDX_ENTRY_SZ;
1639 	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
1640 	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1641 	caps->reserved_lkey	= 0;
1642 	caps->reserved_pds	= 0;
1643 	caps->reserved_mrws	= 1;
1644 	caps->reserved_uars	= 0;
1645 	caps->reserved_cqs	= 0;
1646 	caps->reserved_srqs	= 0;
1647 	caps->reserved_qps	= HNS_ROCE_V2_RSV_QPS;
1648 
1649 	caps->qpc_ba_pg_sz	= 0;
1650 	caps->qpc_buf_pg_sz	= 0;
1651 	caps->qpc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1652 	caps->srqc_ba_pg_sz	= 0;
1653 	caps->srqc_buf_pg_sz	= 0;
1654 	caps->srqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1655 	caps->cqc_ba_pg_sz	= 0;
1656 	caps->cqc_buf_pg_sz	= 0;
1657 	caps->cqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1658 	caps->mpt_ba_pg_sz	= 0;
1659 	caps->mpt_buf_pg_sz	= 0;
1660 	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1661 	caps->mtt_ba_pg_sz	= 0;
1662 	caps->mtt_buf_pg_sz	= 0;
1663 	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
1664 	caps->wqe_sq_hop_num	= HNS_ROCE_SQWQE_HOP_NUM;
1665 	caps->wqe_sge_hop_num	= HNS_ROCE_EXT_SGE_HOP_NUM;
1666 	caps->wqe_rq_hop_num	= HNS_ROCE_RQWQE_HOP_NUM;
1667 	caps->cqe_ba_pg_sz	= HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1668 	caps->cqe_buf_pg_sz	= 0;
1669 	caps->cqe_hop_num	= HNS_ROCE_CQE_HOP_NUM;
1670 	caps->srqwqe_ba_pg_sz	= 0;
1671 	caps->srqwqe_buf_pg_sz	= 0;
1672 	caps->srqwqe_hop_num	= HNS_ROCE_SRQWQE_HOP_NUM;
1673 	caps->idx_ba_pg_sz	= 0;
1674 	caps->idx_buf_pg_sz	= 0;
1675 	caps->idx_hop_num	= HNS_ROCE_IDX_HOP_NUM;
1676 	caps->chunk_sz		= HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1677 
1678 	caps->flags		= HNS_ROCE_CAP_FLAG_REREG_MR |
1679 				  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1680 				  HNS_ROCE_CAP_FLAG_RQ_INLINE |
1681 				  HNS_ROCE_CAP_FLAG_RECORD_DB |
1682 				  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1683 
1684 	caps->pkey_table_len[0] = 1;
1685 	caps->gid_table_len[0]	= HNS_ROCE_V2_GID_INDEX_NUM;
1686 	caps->ceqe_depth	= HNS_ROCE_V2_COMP_EQE_NUM;
1687 	caps->aeqe_depth	= HNS_ROCE_V2_ASYNC_EQE_NUM;
1688 	caps->local_ca_ack_delay = 0;
1689 	caps->max_mtu = IB_MTU_4096;
1690 
1691 	caps->max_srq_wrs	= HNS_ROCE_V2_MAX_SRQ_WR;
1692 	caps->max_srq_sges	= HNS_ROCE_V2_MAX_SRQ_SGE;
1693 
1694 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
1695 		caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1696 			       HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1697 			       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1698 
1699 		caps->num_qpc_timer	  = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1700 		caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1701 		caps->qpc_timer_ba_pg_sz  = 0;
1702 		caps->qpc_timer_buf_pg_sz = 0;
1703 		caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1704 		caps->num_cqc_timer	  = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1705 		caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1706 		caps->cqc_timer_ba_pg_sz  = 0;
1707 		caps->cqc_timer_buf_pg_sz = 0;
1708 		caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1709 
1710 		caps->sccc_entry_sz	  = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1711 		caps->sccc_ba_pg_sz	  = 0;
1712 		caps->sccc_buf_pg_sz	  = 0;
1713 		caps->sccc_hop_num	  = HNS_ROCE_SCCC_HOP_NUM;
1714 	}
1715 }
1716 
1717 static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
1718 		       int *buf_page_size, int *bt_page_size, u32 hem_type)
1719 {
1720 	u64 obj_per_chunk;
1721 	int bt_chunk_size = 1 << PAGE_SHIFT;
1722 	int buf_chunk_size = 1 << PAGE_SHIFT;
1723 	int obj_per_chunk_default = buf_chunk_size / obj_size;
1724 
1725 	*buf_page_size = 0;
1726 	*bt_page_size = 0;
1727 
1728 	switch (hop_num) {
1729 	case 3:
1730 		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1731 				(bt_chunk_size / BA_BYTE_LEN) *
1732 				(bt_chunk_size / BA_BYTE_LEN) *
1733 				 obj_per_chunk_default;
1734 		break;
1735 	case 2:
1736 		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1737 				(bt_chunk_size / BA_BYTE_LEN) *
1738 				 obj_per_chunk_default;
1739 		break;
1740 	case 1:
1741 		obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1742 				obj_per_chunk_default;
1743 		break;
1744 	case HNS_ROCE_HOP_NUM_0:
1745 		obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1746 		break;
1747 	default:
1748 		pr_err("Table %d not support hop_num = %d!\n", hem_type,
1749 			hop_num);
1750 		return;
1751 	}
1752 
1753 	if (hem_type >= HEM_TYPE_MTT)
1754 		*bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1755 	else
1756 		*buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1757 }
1758 
1759 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1760 {
1761 	struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1762 	struct hns_roce_caps *caps = &hr_dev->caps;
1763 	struct hns_roce_query_pf_caps_a *resp_a;
1764 	struct hns_roce_query_pf_caps_b *resp_b;
1765 	struct hns_roce_query_pf_caps_c *resp_c;
1766 	struct hns_roce_query_pf_caps_d *resp_d;
1767 	struct hns_roce_query_pf_caps_e *resp_e;
1768 	int ctx_hop_num;
1769 	int pbl_hop_num;
1770 	int ret;
1771 	int i;
1772 
1773 	for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1774 		hns_roce_cmq_setup_basic_desc(&desc[i],
1775 					      HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1776 					      true);
1777 		if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1778 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1779 		else
1780 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1781 	}
1782 
1783 	ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1784 	if (ret)
1785 		return ret;
1786 
1787 	resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1788 	resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1789 	resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1790 	resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1791 	resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1792 
1793 	caps->local_ca_ack_delay     = resp_a->local_ca_ack_delay;
1794 	caps->max_sq_sg		     = le16_to_cpu(resp_a->max_sq_sg);
1795 	caps->max_sq_inline	     = le16_to_cpu(resp_a->max_sq_inline);
1796 	caps->max_rq_sg		     = le16_to_cpu(resp_a->max_rq_sg);
1797 	caps->max_extend_sg	     = le32_to_cpu(resp_a->max_extend_sg);
1798 	caps->num_qpc_timer	     = le16_to_cpu(resp_a->num_qpc_timer);
1799 	caps->num_cqc_timer	     = le16_to_cpu(resp_a->num_cqc_timer);
1800 	caps->max_srq_sges	     = le16_to_cpu(resp_a->max_srq_sges);
1801 	caps->num_aeq_vectors	     = resp_a->num_aeq_vectors;
1802 	caps->num_other_vectors	     = resp_a->num_other_vectors;
1803 	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
1804 	caps->max_rq_desc_sz	     = resp_a->max_rq_desc_sz;
1805 	caps->max_srq_desc_sz	     = resp_a->max_srq_desc_sz;
1806 	caps->cq_entry_sz	     = resp_a->cq_entry_sz;
1807 
1808 	caps->mtpt_entry_sz	     = resp_b->mtpt_entry_sz;
1809 	caps->irrl_entry_sz	     = resp_b->irrl_entry_sz;
1810 	caps->trrl_entry_sz	     = resp_b->trrl_entry_sz;
1811 	caps->cqc_entry_sz	     = resp_b->cqc_entry_sz;
1812 	caps->srqc_entry_sz	     = resp_b->srqc_entry_sz;
1813 	caps->idx_entry_sz	     = resp_b->idx_entry_sz;
1814 	caps->sccc_entry_sz	     = resp_b->scc_ctx_entry_sz;
1815 	caps->max_mtu		     = resp_b->max_mtu;
1816 	caps->qpc_entry_sz	     = le16_to_cpu(resp_b->qpc_entry_sz);
1817 	caps->min_cqes		     = resp_b->min_cqes;
1818 	caps->min_wqes		     = resp_b->min_wqes;
1819 	caps->page_size_cap	     = le32_to_cpu(resp_b->page_size_cap);
1820 	caps->pkey_table_len[0]	     = resp_b->pkey_table_len;
1821 	caps->phy_num_uars	     = resp_b->phy_num_uars;
1822 	ctx_hop_num		     = resp_b->ctx_hop_num;
1823 	pbl_hop_num		     = resp_b->pbl_hop_num;
1824 
1825 	caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
1826 					    V2_QUERY_PF_CAPS_C_NUM_PDS_M,
1827 					    V2_QUERY_PF_CAPS_C_NUM_PDS_S);
1828 	caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
1829 				     V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
1830 				     V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
1831 	caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
1832 					    V2_QUERY_PF_CAPS_C_NUM_CQS_M,
1833 					    V2_QUERY_PF_CAPS_C_NUM_CQS_S);
1834 	caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
1835 						V2_QUERY_PF_CAPS_C_MAX_GID_M,
1836 						V2_QUERY_PF_CAPS_C_MAX_GID_S);
1837 	caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
1838 					     V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
1839 					     V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
1840 	caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
1841 					      V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
1842 					      V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
1843 	caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
1844 					    V2_QUERY_PF_CAPS_C_NUM_QPS_M,
1845 					    V2_QUERY_PF_CAPS_C_NUM_QPS_S);
1846 	caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
1847 						V2_QUERY_PF_CAPS_C_MAX_ORD_M,
1848 						V2_QUERY_PF_CAPS_C_MAX_ORD_S);
1849 	caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
1850 	caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
1851 	caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
1852 					     V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
1853 					     V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
1854 	caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
1855 	caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
1856 					       V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
1857 					       V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
1858 	caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
1859 						V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
1860 						V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
1861 	caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
1862 					       V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
1863 					       V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
1864 	caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1865 					    V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
1866 					    V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
1867 	caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1868 					    V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
1869 					    V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
1870 	caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
1871 					    V2_QUERY_PF_CAPS_D_RSV_PDS_M,
1872 					    V2_QUERY_PF_CAPS_D_RSV_PDS_S);
1873 	caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
1874 					     V2_QUERY_PF_CAPS_D_NUM_UARS_M,
1875 					     V2_QUERY_PF_CAPS_D_NUM_UARS_S);
1876 	caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
1877 					    V2_QUERY_PF_CAPS_D_RSV_QPS_M,
1878 					    V2_QUERY_PF_CAPS_D_RSV_QPS_S);
1879 	caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
1880 					     V2_QUERY_PF_CAPS_D_RSV_UARS_M,
1881 					     V2_QUERY_PF_CAPS_D_RSV_UARS_S);
1882 	caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1883 					     V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
1884 					     V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
1885 	caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1886 					 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
1887 					 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
1888 	caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
1889 					    V2_QUERY_PF_CAPS_E_RSV_CQS_M,
1890 					    V2_QUERY_PF_CAPS_E_RSV_CQS_S);
1891 	caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
1892 					     V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
1893 					     V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
1894 	caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
1895 					     V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
1896 					     V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
1897 	caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
1898 	caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
1899 	caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
1900 	caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
1901 
1902 	caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1903 	caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1904 	caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1905 	caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1906 	caps->mtt_ba_pg_sz = 0;
1907 	caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1908 	caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1909 	caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1910 
1911 	caps->qpc_hop_num = ctx_hop_num;
1912 	caps->srqc_hop_num = ctx_hop_num;
1913 	caps->cqc_hop_num = ctx_hop_num;
1914 	caps->mpt_hop_num = ctx_hop_num;
1915 	caps->mtt_hop_num = pbl_hop_num;
1916 	caps->cqe_hop_num = pbl_hop_num;
1917 	caps->srqwqe_hop_num = pbl_hop_num;
1918 	caps->idx_hop_num = pbl_hop_num;
1919 	caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1920 					  V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
1921 					  V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
1922 	caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1923 					  V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
1924 					  V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
1925 	caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1926 					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
1927 					  V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
1928 
1929 	calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
1930 		   caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
1931 		   HEM_TYPE_QPC);
1932 	calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
1933 		   caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
1934 		   HEM_TYPE_MTPT);
1935 	calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
1936 		   caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
1937 		   HEM_TYPE_CQC);
1938 	calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
1939 		   caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
1940 		   &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
1941 
1942 	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
1943 		caps->sccc_hop_num = ctx_hop_num;
1944 		caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1945 		caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1946 
1947 		calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
1948 			   caps->sccc_hop_num, caps->sccc_bt_num,
1949 			   &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
1950 			   HEM_TYPE_SCCC);
1951 		calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
1952 			   caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
1953 			   &caps->cqc_timer_buf_pg_sz,
1954 			   &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
1955 	}
1956 
1957 	calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
1958 		   1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
1959 	calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
1960 		   caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
1961 		   &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
1962 	calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
1963 		   1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
1964 
1965 	return 0;
1966 }
1967 
1968 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1969 {
1970 	struct hns_roce_caps *caps = &hr_dev->caps;
1971 	int ret;
1972 
1973 	ret = hns_roce_cmq_query_hw_info(hr_dev);
1974 	if (ret) {
1975 		dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1976 			ret);
1977 		return ret;
1978 	}
1979 
1980 	ret = hns_roce_query_fw_ver(hr_dev);
1981 	if (ret) {
1982 		dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1983 			ret);
1984 		return ret;
1985 	}
1986 
1987 	ret = hns_roce_config_global_param(hr_dev);
1988 	if (ret) {
1989 		dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1990 			ret);
1991 		return ret;
1992 	}
1993 
1994 	/* Get pf resource owned by every pf */
1995 	ret = hns_roce_query_pf_resource(hr_dev);
1996 	if (ret) {
1997 		dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1998 			ret);
1999 		return ret;
2000 	}
2001 
2002 	if (hr_dev->pci_dev->revision == 0x21) {
2003 		ret = hns_roce_query_pf_timer_resource(hr_dev);
2004 		if (ret) {
2005 			dev_err(hr_dev->dev,
2006 				"Query pf timer resource fail, ret = %d.\n",
2007 				ret);
2008 			return ret;
2009 		}
2010 	}
2011 
2012 	ret = hns_roce_alloc_vf_resource(hr_dev);
2013 	if (ret) {
2014 		dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2015 			ret);
2016 		return ret;
2017 	}
2018 
2019 	if (hr_dev->pci_dev->revision == 0x21) {
2020 		ret = hns_roce_set_vf_switch_param(hr_dev, 0);
2021 		if (ret) {
2022 			dev_err(hr_dev->dev,
2023 				"Set function switch param fail, ret = %d.\n",
2024 				ret);
2025 			return ret;
2026 		}
2027 	}
2028 
2029 	hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2030 	hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2031 
2032 	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
2033 	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
2034 	caps->num_srqwqe_segs	= HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2035 	caps->num_idx_segs	= HNS_ROCE_V2_MAX_IDX_SEGS;
2036 
2037 	caps->pbl_ba_pg_sz	= HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2038 	caps->pbl_buf_pg_sz	= 0;
2039 	caps->pbl_hop_num	= HNS_ROCE_PBL_HOP_NUM;
2040 	caps->eqe_ba_pg_sz	= 0;
2041 	caps->eqe_buf_pg_sz	= 0;
2042 	caps->eqe_hop_num	= HNS_ROCE_EQE_HOP_NUM;
2043 	caps->tsq_buf_pg_sz	= 0;
2044 
2045 	ret = hns_roce_query_pf_caps(hr_dev);
2046 	if (ret)
2047 		set_default_caps(hr_dev);
2048 
2049 	ret = hns_roce_v2_set_bt(hr_dev);
2050 	if (ret)
2051 		dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
2052 			ret);
2053 
2054 	return ret;
2055 }
2056 
2057 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2058 				      enum hns_roce_link_table_type type)
2059 {
2060 	struct hns_roce_cmq_desc desc[2];
2061 	struct hns_roce_cfg_llm_a *req_a =
2062 				(struct hns_roce_cfg_llm_a *)desc[0].data;
2063 	struct hns_roce_cfg_llm_b *req_b =
2064 				(struct hns_roce_cfg_llm_b *)desc[1].data;
2065 	struct hns_roce_v2_priv *priv = hr_dev->priv;
2066 	struct hns_roce_link_table *link_tbl;
2067 	struct hns_roce_link_table_entry *entry;
2068 	enum hns_roce_opcode_type opcode;
2069 	u32 page_num;
2070 	int i;
2071 
2072 	switch (type) {
2073 	case TSQ_LINK_TABLE:
2074 		link_tbl = &priv->tsq;
2075 		opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2076 		break;
2077 	case TPQ_LINK_TABLE:
2078 		link_tbl = &priv->tpq;
2079 		opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2080 		break;
2081 	default:
2082 		return -EINVAL;
2083 	}
2084 
2085 	page_num = link_tbl->npages;
2086 	entry = link_tbl->table.buf;
2087 	memset(req_a, 0, sizeof(*req_a));
2088 	memset(req_b, 0, sizeof(*req_b));
2089 
2090 	for (i = 0; i < 2; i++) {
2091 		hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2092 
2093 		if (i == 0)
2094 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2095 		else
2096 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2097 
2098 		if (i == 0) {
2099 			req_a->base_addr_l =
2100 				cpu_to_le32(link_tbl->table.map & 0xffffffff);
2101 			req_a->base_addr_h =
2102 				cpu_to_le32(link_tbl->table.map >> 32);
2103 			roce_set_field(req_a->depth_pgsz_init_en,
2104 				       CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
2105 				       link_tbl->npages);
2106 			roce_set_field(req_a->depth_pgsz_init_en,
2107 				       CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
2108 				       link_tbl->pg_sz);
2109 			req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2110 			req_a->head_ba_h_nxtptr =
2111 				cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2112 			roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
2113 				       CFG_LLM_HEAD_PTR_S, 0);
2114 		} else {
2115 			req_b->tail_ba_l =
2116 				cpu_to_le32(entry[page_num - 1].blk_ba0);
2117 			roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2118 				       CFG_LLM_TAIL_BA_H_S,
2119 				       entry[page_num - 1].blk_ba1_nxt_ptr &
2120 					       HNS_ROCE_LINK_TABLE_BA1_M);
2121 			roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
2122 				       CFG_LLM_TAIL_PTR_S,
2123 				       (entry[page_num - 2].blk_ba1_nxt_ptr &
2124 					HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2125 					       HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2126 		}
2127 	}
2128 	roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2129 		       CFG_LLM_INIT_EN_S, 1);
2130 
2131 	return hns_roce_cmq_send(hr_dev, desc, 2);
2132 }
2133 
2134 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2135 				    enum hns_roce_link_table_type type)
2136 {
2137 	struct hns_roce_v2_priv *priv = hr_dev->priv;
2138 	struct hns_roce_link_table *link_tbl;
2139 	struct hns_roce_link_table_entry *entry;
2140 	struct device *dev = hr_dev->dev;
2141 	u32 buf_chk_sz;
2142 	dma_addr_t t;
2143 	int func_num = 1;
2144 	int pg_num_a;
2145 	int pg_num_b;
2146 	int pg_num;
2147 	int size;
2148 	int i;
2149 
2150 	switch (type) {
2151 	case TSQ_LINK_TABLE:
2152 		link_tbl = &priv->tsq;
2153 		buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2154 		pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2155 		pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2156 		break;
2157 	case TPQ_LINK_TABLE:
2158 		link_tbl = &priv->tpq;
2159 		buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz +	PAGE_SHIFT);
2160 		pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2161 		pg_num_b = 2 * 4 * func_num + 2;
2162 		break;
2163 	default:
2164 		return -EINVAL;
2165 	}
2166 
2167 	pg_num = max(pg_num_a, pg_num_b);
2168 	size = pg_num * sizeof(struct hns_roce_link_table_entry);
2169 
2170 	link_tbl->table.buf = dma_alloc_coherent(dev, size,
2171 						 &link_tbl->table.map,
2172 						 GFP_KERNEL);
2173 	if (!link_tbl->table.buf)
2174 		goto out;
2175 
2176 	link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2177 				    GFP_KERNEL);
2178 	if (!link_tbl->pg_list)
2179 		goto err_kcalloc_failed;
2180 
2181 	entry = link_tbl->table.buf;
2182 	for (i = 0; i < pg_num; ++i) {
2183 		link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2184 							      &t, GFP_KERNEL);
2185 		if (!link_tbl->pg_list[i].buf)
2186 			goto err_alloc_buf_failed;
2187 
2188 		link_tbl->pg_list[i].map = t;
2189 
2190 		entry[i].blk_ba0 = (u32)(t >> 12);
2191 		entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2192 
2193 		if (i < (pg_num - 1))
2194 			entry[i].blk_ba1_nxt_ptr |=
2195 				(i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2196 
2197 	}
2198 	link_tbl->npages = pg_num;
2199 	link_tbl->pg_sz = buf_chk_sz;
2200 
2201 	return hns_roce_config_link_table(hr_dev, type);
2202 
2203 err_alloc_buf_failed:
2204 	for (i -= 1; i >= 0; i--)
2205 		dma_free_coherent(dev, buf_chk_sz,
2206 				  link_tbl->pg_list[i].buf,
2207 				  link_tbl->pg_list[i].map);
2208 	kfree(link_tbl->pg_list);
2209 
2210 err_kcalloc_failed:
2211 	dma_free_coherent(dev, size, link_tbl->table.buf,
2212 			  link_tbl->table.map);
2213 
2214 out:
2215 	return -ENOMEM;
2216 }
2217 
2218 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2219 				     struct hns_roce_link_table *link_tbl)
2220 {
2221 	struct device *dev = hr_dev->dev;
2222 	int size;
2223 	int i;
2224 
2225 	size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2226 
2227 	for (i = 0; i < link_tbl->npages; ++i)
2228 		if (link_tbl->pg_list[i].buf)
2229 			dma_free_coherent(dev, link_tbl->pg_sz,
2230 					  link_tbl->pg_list[i].buf,
2231 					  link_tbl->pg_list[i].map);
2232 	kfree(link_tbl->pg_list);
2233 
2234 	dma_free_coherent(dev, size, link_tbl->table.buf,
2235 			  link_tbl->table.map);
2236 }
2237 
2238 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2239 {
2240 	struct hns_roce_v2_priv *priv = hr_dev->priv;
2241 	int qpc_count, cqc_count;
2242 	int ret, i;
2243 
2244 	/* TSQ includes SQ doorbell and ack doorbell */
2245 	ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2246 	if (ret) {
2247 		dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
2248 		return ret;
2249 	}
2250 
2251 	ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2252 	if (ret) {
2253 		dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
2254 		goto err_tpq_init_failed;
2255 	}
2256 
2257 	/* Alloc memory for QPC Timer buffer space chunk */
2258 	for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2259 	     qpc_count++) {
2260 		ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2261 					 qpc_count);
2262 		if (ret) {
2263 			dev_err(hr_dev->dev, "QPC Timer get failed\n");
2264 			goto err_qpc_timer_failed;
2265 		}
2266 	}
2267 
2268 	/* Alloc memory for CQC Timer buffer space chunk */
2269 	for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2270 	     cqc_count++) {
2271 		ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2272 					 cqc_count);
2273 		if (ret) {
2274 			dev_err(hr_dev->dev, "CQC Timer get failed\n");
2275 			goto err_cqc_timer_failed;
2276 		}
2277 	}
2278 
2279 	return 0;
2280 
2281 err_cqc_timer_failed:
2282 	for (i = 0; i < cqc_count; i++)
2283 		hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2284 
2285 err_qpc_timer_failed:
2286 	for (i = 0; i < qpc_count; i++)
2287 		hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2288 
2289 	hns_roce_free_link_table(hr_dev, &priv->tpq);
2290 
2291 err_tpq_init_failed:
2292 	hns_roce_free_link_table(hr_dev, &priv->tsq);
2293 
2294 	return ret;
2295 }
2296 
2297 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2298 {
2299 	struct hns_roce_v2_priv *priv = hr_dev->priv;
2300 
2301 	if (hr_dev->pci_dev->revision == 0x21)
2302 		hns_roce_function_clear(hr_dev);
2303 
2304 	hns_roce_free_link_table(hr_dev, &priv->tpq);
2305 	hns_roce_free_link_table(hr_dev, &priv->tsq);
2306 }
2307 
2308 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2309 {
2310 	struct hns_roce_cmq_desc desc;
2311 	struct hns_roce_mbox_status *mb_st =
2312 				       (struct hns_roce_mbox_status *)desc.data;
2313 	enum hns_roce_cmd_return_status status;
2314 
2315 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2316 
2317 	status = hns_roce_cmq_send(hr_dev, &desc, 1);
2318 	if (status)
2319 		return status;
2320 
2321 	return le32_to_cpu(mb_st->mb_status_hw_run);
2322 }
2323 
2324 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2325 {
2326 	u32 status = hns_roce_query_mbox_status(hr_dev);
2327 
2328 	return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2329 }
2330 
2331 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2332 {
2333 	u32 status = hns_roce_query_mbox_status(hr_dev);
2334 
2335 	return status & HNS_ROCE_HW_MB_STATUS_MASK;
2336 }
2337 
2338 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2339 			      u64 out_param, u32 in_modifier, u8 op_modifier,
2340 			      u16 op, u16 token, int event)
2341 {
2342 	struct hns_roce_cmq_desc desc;
2343 	struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2344 
2345 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2346 
2347 	mb->in_param_l = cpu_to_le32(in_param);
2348 	mb->in_param_h = cpu_to_le32(in_param >> 32);
2349 	mb->out_param_l = cpu_to_le32(out_param);
2350 	mb->out_param_h = cpu_to_le32(out_param >> 32);
2351 	mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2352 	mb->token_event_en = cpu_to_le32(event << 16 | token);
2353 
2354 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2355 }
2356 
2357 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2358 				 u64 out_param, u32 in_modifier, u8 op_modifier,
2359 				 u16 op, u16 token, int event)
2360 {
2361 	struct device *dev = hr_dev->dev;
2362 	unsigned long end;
2363 	int ret;
2364 
2365 	end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2366 	while (hns_roce_v2_cmd_pending(hr_dev)) {
2367 		if (time_after(jiffies, end)) {
2368 			dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2369 				(int)end);
2370 			return -EAGAIN;
2371 		}
2372 		cond_resched();
2373 	}
2374 
2375 	ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2376 				 op_modifier, op, token, event);
2377 	if (ret)
2378 		dev_err(dev, "Post mailbox fail(%d)\n", ret);
2379 
2380 	return ret;
2381 }
2382 
2383 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2384 				unsigned long timeout)
2385 {
2386 	struct device *dev = hr_dev->dev;
2387 	unsigned long end;
2388 	u32 status;
2389 
2390 	end = msecs_to_jiffies(timeout) + jiffies;
2391 	while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2392 		cond_resched();
2393 
2394 	if (hns_roce_v2_cmd_pending(hr_dev)) {
2395 		dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2396 		return -ETIMEDOUT;
2397 	}
2398 
2399 	status = hns_roce_v2_cmd_complete(hr_dev);
2400 	if (status != 0x1) {
2401 		if (status == CMD_RST_PRC_EBUSY)
2402 			return status;
2403 
2404 		dev_err(dev, "mailbox status 0x%x!\n", status);
2405 		return -EBUSY;
2406 	}
2407 
2408 	return 0;
2409 }
2410 
2411 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2412 				      int gid_index, const union ib_gid *gid,
2413 				      enum hns_roce_sgid_type sgid_type)
2414 {
2415 	struct hns_roce_cmq_desc desc;
2416 	struct hns_roce_cfg_sgid_tb *sgid_tb =
2417 				    (struct hns_roce_cfg_sgid_tb *)desc.data;
2418 	u32 *p;
2419 
2420 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2421 
2422 	roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2423 		       CFG_SGID_TB_TABLE_IDX_S, gid_index);
2424 	roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2425 		       CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2426 
2427 	p = (u32 *)&gid->raw[0];
2428 	sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2429 
2430 	p = (u32 *)&gid->raw[4];
2431 	sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2432 
2433 	p = (u32 *)&gid->raw[8];
2434 	sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2435 
2436 	p = (u32 *)&gid->raw[0xc];
2437 	sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2438 
2439 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2440 }
2441 
2442 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2443 			       int gid_index, const union ib_gid *gid,
2444 			       const struct ib_gid_attr *attr)
2445 {
2446 	enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2447 	int ret;
2448 
2449 	if (!gid || !attr)
2450 		return -EINVAL;
2451 
2452 	if (attr->gid_type == IB_GID_TYPE_ROCE)
2453 		sgid_type = GID_TYPE_FLAG_ROCE_V1;
2454 
2455 	if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2456 		if (ipv6_addr_v4mapped((void *)gid))
2457 			sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2458 		else
2459 			sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2460 	}
2461 
2462 	ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2463 	if (ret)
2464 		dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2465 
2466 	return ret;
2467 }
2468 
2469 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2470 			       u8 *addr)
2471 {
2472 	struct hns_roce_cmq_desc desc;
2473 	struct hns_roce_cfg_smac_tb *smac_tb =
2474 				    (struct hns_roce_cfg_smac_tb *)desc.data;
2475 	u16 reg_smac_h;
2476 	u32 reg_smac_l;
2477 
2478 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2479 
2480 	reg_smac_l = *(u32 *)(&addr[0]);
2481 	reg_smac_h = *(u16 *)(&addr[4]);
2482 
2483 	memset(smac_tb, 0, sizeof(*smac_tb));
2484 	roce_set_field(smac_tb->tb_idx_rsv,
2485 		       CFG_SMAC_TB_IDX_M,
2486 		       CFG_SMAC_TB_IDX_S, phy_port);
2487 	roce_set_field(smac_tb->vf_smac_h_rsv,
2488 		       CFG_SMAC_TB_VF_SMAC_H_M,
2489 		       CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2490 	smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2491 
2492 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2493 }
2494 
2495 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2496 			struct hns_roce_mr *mr)
2497 {
2498 	struct sg_dma_page_iter sg_iter;
2499 	u64 page_addr;
2500 	u64 *pages;
2501 	int i;
2502 
2503 	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2504 	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2505 	roce_set_field(mpt_entry->byte_48_mode_ba,
2506 		       V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2507 		       upper_32_bits(mr->pbl_ba >> 3));
2508 
2509 	pages = (u64 *)__get_free_page(GFP_KERNEL);
2510 	if (!pages)
2511 		return -ENOMEM;
2512 
2513 	i = 0;
2514 	for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2515 		page_addr = sg_page_iter_dma_address(&sg_iter);
2516 		pages[i] = page_addr >> 6;
2517 
2518 		/* Record the first 2 entry directly to MTPT table */
2519 		if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2520 			goto found;
2521 		i++;
2522 	}
2523 found:
2524 	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2525 	roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2526 		       V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2527 
2528 	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2529 	roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2530 		       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2531 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2532 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2533 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2534 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2535 
2536 	free_page((unsigned long)pages);
2537 
2538 	return 0;
2539 }
2540 
2541 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2542 				  unsigned long mtpt_idx)
2543 {
2544 	struct hns_roce_v2_mpt_entry *mpt_entry;
2545 	int ret;
2546 
2547 	mpt_entry = mb_buf;
2548 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2549 
2550 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2551 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2552 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2553 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2554 		       HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2555 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2556 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2557 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2558 		       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2559 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2560 		       V2_MPT_BYTE_4_PD_S, mr->pd);
2561 
2562 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2563 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2564 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2565 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2566 		     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2567 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2568 		     mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2569 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2570 		     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2571 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2572 		     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2573 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2574 		     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2575 
2576 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2577 		     mr->type == MR_TYPE_MR ? 0 : 1);
2578 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2579 		     1);
2580 
2581 	mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2582 	mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2583 	mpt_entry->lkey = cpu_to_le32(mr->key);
2584 	mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2585 	mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2586 
2587 	if (mr->type == MR_TYPE_DMA)
2588 		return 0;
2589 
2590 	ret = set_mtpt_pbl(mpt_entry, mr);
2591 
2592 	return ret;
2593 }
2594 
2595 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2596 					struct hns_roce_mr *mr, int flags,
2597 					u32 pdn, int mr_access_flags, u64 iova,
2598 					u64 size, void *mb_buf)
2599 {
2600 	struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2601 	int ret = 0;
2602 
2603 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2604 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2605 
2606 	if (flags & IB_MR_REREG_PD) {
2607 		roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2608 			       V2_MPT_BYTE_4_PD_S, pdn);
2609 		mr->pd = pdn;
2610 	}
2611 
2612 	if (flags & IB_MR_REREG_ACCESS) {
2613 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2614 			     V2_MPT_BYTE_8_BIND_EN_S,
2615 			     (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2616 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2617 			     V2_MPT_BYTE_8_ATOMIC_EN_S,
2618 			     mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2619 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2620 			     mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2621 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2622 			     mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2623 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2624 			     mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2625 	}
2626 
2627 	if (flags & IB_MR_REREG_TRANS) {
2628 		mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2629 		mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2630 		mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2631 		mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2632 
2633 		mr->iova = iova;
2634 		mr->size = size;
2635 
2636 		ret = set_mtpt_pbl(mpt_entry, mr);
2637 	}
2638 
2639 	return ret;
2640 }
2641 
2642 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2643 {
2644 	struct hns_roce_v2_mpt_entry *mpt_entry;
2645 
2646 	mpt_entry = mb_buf;
2647 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2648 
2649 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2650 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2651 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2652 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2653 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2654 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2655 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2656 		       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2657 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2658 		       V2_MPT_BYTE_4_PD_S, mr->pd);
2659 
2660 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2661 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2662 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2663 
2664 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2665 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2666 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2667 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2668 
2669 	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2670 
2671 	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2672 	roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2673 		       V2_MPT_BYTE_48_PBL_BA_H_S,
2674 		       upper_32_bits(mr->pbl_ba >> 3));
2675 
2676 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2677 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2678 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2679 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2680 
2681 	return 0;
2682 }
2683 
2684 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2685 {
2686 	struct hns_roce_v2_mpt_entry *mpt_entry;
2687 
2688 	mpt_entry = mb_buf;
2689 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2690 
2691 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2692 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2693 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2694 		       V2_MPT_BYTE_4_PD_S, mw->pdn);
2695 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2696 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2697 		       mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
2698 							       mw->pbl_hop_num);
2699 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2700 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2701 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2702 		       mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2703 
2704 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2705 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2706 
2707 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2708 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2709 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2710 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2711 		     mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2712 
2713 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2714 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2715 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2716 		       mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2717 
2718 	mpt_entry->lkey = cpu_to_le32(mw->rkey);
2719 
2720 	return 0;
2721 }
2722 
2723 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2724 {
2725 	return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2726 }
2727 
2728 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2729 {
2730 	struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2731 
2732 	/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2733 	return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2734 		!!(n & hr_cq->cq_depth)) ? cqe : NULL;
2735 }
2736 
2737 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2738 {
2739 	return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2740 }
2741 
2742 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2743 {
2744 	return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2745 }
2746 
2747 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2748 {
2749 	/* always called with interrupts disabled. */
2750 	spin_lock(&srq->lock);
2751 
2752 	bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2753 	srq->tail++;
2754 
2755 	spin_unlock(&srq->lock);
2756 }
2757 
2758 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2759 {
2760 	*hr_cq->set_ci_db = cons_index & 0xffffff;
2761 }
2762 
2763 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2764 				   struct hns_roce_srq *srq)
2765 {
2766 	struct hns_roce_v2_cqe *cqe, *dest;
2767 	u32 prod_index;
2768 	int nfreed = 0;
2769 	int wqe_index;
2770 	u8 owner_bit;
2771 
2772 	for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2773 	     ++prod_index) {
2774 		if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2775 			break;
2776 	}
2777 
2778 	/*
2779 	 * Now backwards through the CQ, removing CQ entries
2780 	 * that match our QP by overwriting them with next entries.
2781 	 */
2782 	while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2783 		cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2784 		if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2785 				    V2_CQE_BYTE_16_LCL_QPN_S) &
2786 				    HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2787 			if (srq &&
2788 			    roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2789 				wqe_index = roce_get_field(cqe->byte_4,
2790 						     V2_CQE_BYTE_4_WQE_INDX_M,
2791 						     V2_CQE_BYTE_4_WQE_INDX_S);
2792 				hns_roce_free_srq_wqe(srq, wqe_index);
2793 			}
2794 			++nfreed;
2795 		} else if (nfreed) {
2796 			dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2797 					  hr_cq->ib_cq.cqe);
2798 			owner_bit = roce_get_bit(dest->byte_4,
2799 						 V2_CQE_BYTE_4_OWNER_S);
2800 			memcpy(dest, cqe, sizeof(*cqe));
2801 			roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2802 				     owner_bit);
2803 		}
2804 	}
2805 
2806 	if (nfreed) {
2807 		hr_cq->cons_index += nfreed;
2808 		/*
2809 		 * Make sure update of buffer contents is done before
2810 		 * updating consumer index.
2811 		 */
2812 		wmb();
2813 		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2814 	}
2815 }
2816 
2817 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2818 				 struct hns_roce_srq *srq)
2819 {
2820 	spin_lock_irq(&hr_cq->lock);
2821 	__hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2822 	spin_unlock_irq(&hr_cq->lock);
2823 }
2824 
2825 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2826 				  struct hns_roce_cq *hr_cq, void *mb_buf,
2827 				  u64 *mtts, dma_addr_t dma_handle)
2828 {
2829 	struct hns_roce_v2_cq_context *cq_context;
2830 
2831 	cq_context = mb_buf;
2832 	memset(cq_context, 0, sizeof(*cq_context));
2833 
2834 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2835 		       V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2836 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2837 		       V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2838 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2839 		       V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
2840 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2841 		       V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
2842 
2843 	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2844 		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2845 
2846 	cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
2847 
2848 	roce_set_field(cq_context->byte_16_hop_addr,
2849 		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2850 		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2851 		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2852 	roce_set_field(cq_context->byte_16_hop_addr,
2853 		       V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2854 		       V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2855 		       HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2856 
2857 	cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
2858 	roce_set_field(cq_context->byte_24_pgsz_addr,
2859 		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2860 		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2861 		       mtts[1] >> (32 + PAGE_ADDR_SHIFT));
2862 	roce_set_field(cq_context->byte_24_pgsz_addr,
2863 		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2864 		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2865 		       hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2866 	roce_set_field(cq_context->byte_24_pgsz_addr,
2867 		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2868 		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2869 		       hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2870 
2871 	cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
2872 
2873 	roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2874 		       V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2875 
2876 	if (hr_cq->db_en)
2877 		roce_set_bit(cq_context->byte_44_db_record,
2878 			     V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2879 
2880 	roce_set_field(cq_context->byte_44_db_record,
2881 		       V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2882 		       V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2883 		       ((u32)hr_cq->db.dma) >> 1);
2884 	cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
2885 
2886 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2887 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2888 		       V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2889 		       HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2890 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2891 		       V2_CQC_BYTE_56_CQ_PERIOD_M,
2892 		       V2_CQC_BYTE_56_CQ_PERIOD_S,
2893 		       HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2894 }
2895 
2896 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2897 				     enum ib_cq_notify_flags flags)
2898 {
2899 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2900 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2901 	u32 notification_flag;
2902 	__le32 doorbell[2];
2903 
2904 	doorbell[0] = 0;
2905 	doorbell[1] = 0;
2906 
2907 	notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2908 			     V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2909 	/*
2910 	 * flags = 0; Notification Flag = 1, next
2911 	 * flags = 1; Notification Flag = 0, solocited
2912 	 */
2913 	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2914 		       hr_cq->cqn);
2915 	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2916 		       HNS_ROCE_V2_CQ_DB_NTR);
2917 	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2918 		       V2_CQ_DB_PARAMETER_CONS_IDX_S,
2919 		       hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2920 	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2921 		       V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2922 	roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2923 		     notification_flag);
2924 
2925 	hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2926 
2927 	return 0;
2928 }
2929 
2930 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2931 						    struct hns_roce_qp **cur_qp,
2932 						    struct ib_wc *wc)
2933 {
2934 	struct hns_roce_rinl_sge *sge_list;
2935 	u32 wr_num, wr_cnt, sge_num;
2936 	u32 sge_cnt, data_len, size;
2937 	void *wqe_buf;
2938 
2939 	wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2940 				V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2941 	wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2942 
2943 	sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2944 	sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2945 	wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2946 	data_len = wc->byte_len;
2947 
2948 	for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2949 		size = min(sge_list[sge_cnt].len, data_len);
2950 		memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2951 
2952 		data_len -= size;
2953 		wqe_buf += size;
2954 	}
2955 
2956 	if (data_len) {
2957 		wc->status = IB_WC_LOC_LEN_ERR;
2958 		return -EAGAIN;
2959 	}
2960 
2961 	return 0;
2962 }
2963 
2964 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
2965 		   int num_entries, struct ib_wc *wc)
2966 {
2967 	unsigned int left;
2968 	int npolled = 0;
2969 
2970 	left = wq->head - wq->tail;
2971 	if (left == 0)
2972 		return 0;
2973 
2974 	left = min_t(unsigned int, (unsigned int)num_entries, left);
2975 	while (npolled < left) {
2976 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2977 		wc->status = IB_WC_WR_FLUSH_ERR;
2978 		wc->vendor_err = 0;
2979 		wc->qp = &hr_qp->ibqp;
2980 
2981 		wq->tail++;
2982 		wc++;
2983 		npolled++;
2984 	}
2985 
2986 	return npolled;
2987 }
2988 
2989 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
2990 				  struct ib_wc *wc)
2991 {
2992 	struct hns_roce_qp *hr_qp;
2993 	int npolled = 0;
2994 
2995 	list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
2996 		npolled += sw_comp(hr_qp, &hr_qp->sq,
2997 				   num_entries - npolled, wc + npolled);
2998 		if (npolled >= num_entries)
2999 			goto out;
3000 	}
3001 
3002 	list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3003 		npolled += sw_comp(hr_qp, &hr_qp->rq,
3004 				   num_entries - npolled, wc + npolled);
3005 		if (npolled >= num_entries)
3006 			goto out;
3007 	}
3008 
3009 out:
3010 	return npolled;
3011 }
3012 
3013 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3014 				struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3015 {
3016 	struct hns_roce_srq *srq = NULL;
3017 	struct hns_roce_dev *hr_dev;
3018 	struct hns_roce_v2_cqe *cqe;
3019 	struct hns_roce_qp *hr_qp;
3020 	struct hns_roce_wq *wq;
3021 	struct ib_qp_attr attr;
3022 	int attr_mask;
3023 	int is_send;
3024 	u16 wqe_ctr;
3025 	u32 opcode;
3026 	u32 status;
3027 	int qpn;
3028 	int ret;
3029 
3030 	/* Find cqe according to consumer index */
3031 	cqe = next_cqe_sw_v2(hr_cq);
3032 	if (!cqe)
3033 		return -EAGAIN;
3034 
3035 	++hr_cq->cons_index;
3036 	/* Memory barrier */
3037 	rmb();
3038 
3039 	/* 0->SQ, 1->RQ */
3040 	is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3041 
3042 	qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3043 				V2_CQE_BYTE_16_LCL_QPN_S);
3044 
3045 	if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
3046 		hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3047 		hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3048 		if (unlikely(!hr_qp)) {
3049 			dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
3050 				hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
3051 			return -EINVAL;
3052 		}
3053 		*cur_qp = hr_qp;
3054 	}
3055 
3056 	wc->qp = &(*cur_qp)->ibqp;
3057 	wc->vendor_err = 0;
3058 
3059 	if (is_send) {
3060 		wq = &(*cur_qp)->sq;
3061 		if ((*cur_qp)->sq_signal_bits) {
3062 			/*
3063 			 * If sg_signal_bit is 1,
3064 			 * firstly tail pointer updated to wqe
3065 			 * which current cqe correspond to
3066 			 */
3067 			wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3068 						      V2_CQE_BYTE_4_WQE_INDX_M,
3069 						      V2_CQE_BYTE_4_WQE_INDX_S);
3070 			wq->tail += (wqe_ctr - (u16)wq->tail) &
3071 				    (wq->wqe_cnt - 1);
3072 		}
3073 
3074 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3075 		++wq->tail;
3076 	} else if ((*cur_qp)->ibqp.srq) {
3077 		srq = to_hr_srq((*cur_qp)->ibqp.srq);
3078 		wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3079 					      V2_CQE_BYTE_4_WQE_INDX_M,
3080 					      V2_CQE_BYTE_4_WQE_INDX_S);
3081 		wc->wr_id = srq->wrid[wqe_ctr];
3082 		hns_roce_free_srq_wqe(srq, wqe_ctr);
3083 	} else {
3084 		/* Update tail pointer, record wr_id */
3085 		wq = &(*cur_qp)->rq;
3086 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3087 		++wq->tail;
3088 	}
3089 
3090 	status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3091 				V2_CQE_BYTE_4_STATUS_S);
3092 	switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
3093 	case HNS_ROCE_CQE_V2_SUCCESS:
3094 		wc->status = IB_WC_SUCCESS;
3095 		break;
3096 	case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
3097 		wc->status = IB_WC_LOC_LEN_ERR;
3098 		break;
3099 	case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
3100 		wc->status = IB_WC_LOC_QP_OP_ERR;
3101 		break;
3102 	case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
3103 		wc->status = IB_WC_LOC_PROT_ERR;
3104 		break;
3105 	case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
3106 		wc->status = IB_WC_WR_FLUSH_ERR;
3107 		break;
3108 	case HNS_ROCE_CQE_V2_MW_BIND_ERR:
3109 		wc->status = IB_WC_MW_BIND_ERR;
3110 		break;
3111 	case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
3112 		wc->status = IB_WC_BAD_RESP_ERR;
3113 		break;
3114 	case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
3115 		wc->status = IB_WC_LOC_ACCESS_ERR;
3116 		break;
3117 	case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
3118 		wc->status = IB_WC_REM_INV_REQ_ERR;
3119 		break;
3120 	case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
3121 		wc->status = IB_WC_REM_ACCESS_ERR;
3122 		break;
3123 	case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
3124 		wc->status = IB_WC_REM_OP_ERR;
3125 		break;
3126 	case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
3127 		wc->status = IB_WC_RETRY_EXC_ERR;
3128 		break;
3129 	case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
3130 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
3131 		break;
3132 	case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
3133 		wc->status = IB_WC_REM_ABORT_ERR;
3134 		break;
3135 	default:
3136 		wc->status = IB_WC_GENERAL_ERR;
3137 		break;
3138 	}
3139 
3140 	/* flush cqe if wc status is error, excluding flush error */
3141 	if ((wc->status != IB_WC_SUCCESS) &&
3142 	    (wc->status != IB_WC_WR_FLUSH_ERR)) {
3143 		attr_mask = IB_QP_STATE;
3144 		attr.qp_state = IB_QPS_ERR;
3145 		return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
3146 					     &attr, attr_mask,
3147 					     (*cur_qp)->state, IB_QPS_ERR);
3148 	}
3149 
3150 	if (wc->status == IB_WC_WR_FLUSH_ERR)
3151 		return 0;
3152 
3153 	if (is_send) {
3154 		wc->wc_flags = 0;
3155 		/* SQ corresponding to CQE */
3156 		switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3157 				       V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3158 		case HNS_ROCE_SQ_OPCODE_SEND:
3159 			wc->opcode = IB_WC_SEND;
3160 			break;
3161 		case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
3162 			wc->opcode = IB_WC_SEND;
3163 			break;
3164 		case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
3165 			wc->opcode = IB_WC_SEND;
3166 			wc->wc_flags |= IB_WC_WITH_IMM;
3167 			break;
3168 		case HNS_ROCE_SQ_OPCODE_RDMA_READ:
3169 			wc->opcode = IB_WC_RDMA_READ;
3170 			wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3171 			break;
3172 		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
3173 			wc->opcode = IB_WC_RDMA_WRITE;
3174 			break;
3175 		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
3176 			wc->opcode = IB_WC_RDMA_WRITE;
3177 			wc->wc_flags |= IB_WC_WITH_IMM;
3178 			break;
3179 		case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
3180 			wc->opcode = IB_WC_LOCAL_INV;
3181 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3182 			break;
3183 		case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
3184 			wc->opcode = IB_WC_COMP_SWAP;
3185 			wc->byte_len  = 8;
3186 			break;
3187 		case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
3188 			wc->opcode = IB_WC_FETCH_ADD;
3189 			wc->byte_len  = 8;
3190 			break;
3191 		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
3192 			wc->opcode = IB_WC_MASKED_COMP_SWAP;
3193 			wc->byte_len  = 8;
3194 			break;
3195 		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
3196 			wc->opcode = IB_WC_MASKED_FETCH_ADD;
3197 			wc->byte_len  = 8;
3198 			break;
3199 		case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
3200 			wc->opcode = IB_WC_REG_MR;
3201 			break;
3202 		case HNS_ROCE_SQ_OPCODE_BIND_MW:
3203 			wc->opcode = IB_WC_REG_MR;
3204 			break;
3205 		default:
3206 			wc->status = IB_WC_GENERAL_ERR;
3207 			break;
3208 		}
3209 	} else {
3210 		/* RQ correspond to CQE */
3211 		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3212 
3213 		opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3214 					V2_CQE_BYTE_4_OPCODE_S);
3215 		switch (opcode & 0x1f) {
3216 		case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3217 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3218 			wc->wc_flags = IB_WC_WITH_IMM;
3219 			wc->ex.imm_data =
3220 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
3221 			break;
3222 		case HNS_ROCE_V2_OPCODE_SEND:
3223 			wc->opcode = IB_WC_RECV;
3224 			wc->wc_flags = 0;
3225 			break;
3226 		case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3227 			wc->opcode = IB_WC_RECV;
3228 			wc->wc_flags = IB_WC_WITH_IMM;
3229 			wc->ex.imm_data =
3230 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
3231 			break;
3232 		case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3233 			wc->opcode = IB_WC_RECV;
3234 			wc->wc_flags = IB_WC_WITH_INVALIDATE;
3235 			wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3236 			break;
3237 		default:
3238 			wc->status = IB_WC_GENERAL_ERR;
3239 			break;
3240 		}
3241 
3242 		if ((wc->qp->qp_type == IB_QPT_RC ||
3243 		     wc->qp->qp_type == IB_QPT_UC) &&
3244 		    (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3245 		    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3246 		    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3247 		    (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3248 			ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3249 			if (ret)
3250 				return -EAGAIN;
3251 		}
3252 
3253 		wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3254 					    V2_CQE_BYTE_32_SL_S);
3255 		wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3256 						V2_CQE_BYTE_32_RMT_QPN_M,
3257 						V2_CQE_BYTE_32_RMT_QPN_S);
3258 		wc->slid = 0;
3259 		wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3260 					      V2_CQE_BYTE_32_GRH_S) ?
3261 					      IB_WC_GRH : 0);
3262 		wc->port_num = roce_get_field(cqe->byte_32,
3263 				V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3264 		wc->pkey_index = 0;
3265 		memcpy(wc->smac, cqe->smac, 4);
3266 		wc->smac[4] = roce_get_field(cqe->byte_28,
3267 					     V2_CQE_BYTE_28_SMAC_4_M,
3268 					     V2_CQE_BYTE_28_SMAC_4_S);
3269 		wc->smac[5] = roce_get_field(cqe->byte_28,
3270 					     V2_CQE_BYTE_28_SMAC_5_M,
3271 					     V2_CQE_BYTE_28_SMAC_5_S);
3272 		wc->wc_flags |= IB_WC_WITH_SMAC;
3273 		if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3274 			wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3275 							  V2_CQE_BYTE_28_VID_M,
3276 							  V2_CQE_BYTE_28_VID_S);
3277 			wc->wc_flags |= IB_WC_WITH_VLAN;
3278 		} else {
3279 			wc->vlan_id = 0xffff;
3280 		}
3281 
3282 		wc->network_hdr_type = roce_get_field(cqe->byte_28,
3283 						    V2_CQE_BYTE_28_PORT_TYPE_M,
3284 						    V2_CQE_BYTE_28_PORT_TYPE_S);
3285 	}
3286 
3287 	return 0;
3288 }
3289 
3290 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3291 			       struct ib_wc *wc)
3292 {
3293 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3294 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3295 	struct hns_roce_qp *cur_qp = NULL;
3296 	unsigned long flags;
3297 	int npolled;
3298 
3299 	spin_lock_irqsave(&hr_cq->lock, flags);
3300 
3301 	/*
3302 	 * When the device starts to reset, the state is RST_DOWN. At this time,
3303 	 * there may still be some valid CQEs in the hardware that are not
3304 	 * polled. Therefore, it is not allowed to switch to the software mode
3305 	 * immediately. When the state changes to UNINIT, CQE no longer exists
3306 	 * in the hardware, and then switch to software mode.
3307 	 */
3308 	if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3309 		npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3310 		goto out;
3311 	}
3312 
3313 	for (npolled = 0; npolled < num_entries; ++npolled) {
3314 		if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3315 			break;
3316 	}
3317 
3318 	if (npolled) {
3319 		/* Memory barrier */
3320 		wmb();
3321 		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3322 	}
3323 
3324 out:
3325 	spin_unlock_irqrestore(&hr_cq->lock, flags);
3326 
3327 	return npolled;
3328 }
3329 
3330 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3331 			      int step_idx)
3332 {
3333 	int op;
3334 
3335 	if (type == HEM_TYPE_SCCC && step_idx)
3336 		return -EINVAL;
3337 
3338 	switch (type) {
3339 	case HEM_TYPE_QPC:
3340 		op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3341 		break;
3342 	case HEM_TYPE_MTPT:
3343 		op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3344 		break;
3345 	case HEM_TYPE_CQC:
3346 		op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3347 		break;
3348 	case HEM_TYPE_SRQC:
3349 		op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3350 		break;
3351 	case HEM_TYPE_SCCC:
3352 		op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3353 		break;
3354 	case HEM_TYPE_QPC_TIMER:
3355 		op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3356 		break;
3357 	case HEM_TYPE_CQC_TIMER:
3358 		op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3359 		break;
3360 	default:
3361 		dev_warn(hr_dev->dev,
3362 			 "Table %d not to be written by mailbox!\n", type);
3363 		return -EINVAL;
3364 	}
3365 
3366 	return op + step_idx;
3367 }
3368 
3369 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3370 			       struct hns_roce_hem_table *table, int obj,
3371 			       int step_idx)
3372 {
3373 	struct hns_roce_cmd_mailbox *mailbox;
3374 	struct hns_roce_hem_iter iter;
3375 	struct hns_roce_hem_mhop mhop;
3376 	struct hns_roce_hem *hem;
3377 	unsigned long mhop_obj = obj;
3378 	int i, j, k;
3379 	int ret = 0;
3380 	u64 hem_idx = 0;
3381 	u64 l1_idx = 0;
3382 	u64 bt_ba = 0;
3383 	u32 chunk_ba_num;
3384 	u32 hop_num;
3385 	int op;
3386 
3387 	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3388 		return 0;
3389 
3390 	hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3391 	i = mhop.l0_idx;
3392 	j = mhop.l1_idx;
3393 	k = mhop.l2_idx;
3394 	hop_num = mhop.hop_num;
3395 	chunk_ba_num = mhop.bt_chunk_size / 8;
3396 
3397 	if (hop_num == 2) {
3398 		hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3399 			  k;
3400 		l1_idx = i * chunk_ba_num + j;
3401 	} else if (hop_num == 1) {
3402 		hem_idx = i * chunk_ba_num + j;
3403 	} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3404 		hem_idx = i;
3405 	}
3406 
3407 	op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3408 	if (op == -EINVAL)
3409 		return 0;
3410 
3411 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3412 	if (IS_ERR(mailbox))
3413 		return PTR_ERR(mailbox);
3414 
3415 	if (table->type == HEM_TYPE_SCCC)
3416 		obj = mhop.l0_idx;
3417 
3418 	if (check_whether_last_step(hop_num, step_idx)) {
3419 		hem = table->hem[hem_idx];
3420 		for (hns_roce_hem_first(hem, &iter);
3421 		     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3422 			bt_ba = hns_roce_hem_addr(&iter);
3423 
3424 			/* configure the ba, tag, and op */
3425 			ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3426 						obj, 0, op,
3427 						HNS_ROCE_CMD_TIMEOUT_MSECS);
3428 		}
3429 	} else {
3430 		if (step_idx == 0)
3431 			bt_ba = table->bt_l0_dma_addr[i];
3432 		else if (step_idx == 1 && hop_num == 2)
3433 			bt_ba = table->bt_l1_dma_addr[l1_idx];
3434 
3435 		/* configure the ba, tag, and op */
3436 		ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3437 					0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3438 	}
3439 
3440 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3441 	return ret;
3442 }
3443 
3444 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3445 				 struct hns_roce_hem_table *table, int obj,
3446 				 int step_idx)
3447 {
3448 	struct device *dev = hr_dev->dev;
3449 	struct hns_roce_cmd_mailbox *mailbox;
3450 	int ret;
3451 	u16 op = 0xff;
3452 
3453 	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3454 		return 0;
3455 
3456 	switch (table->type) {
3457 	case HEM_TYPE_QPC:
3458 		op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3459 		break;
3460 	case HEM_TYPE_MTPT:
3461 		op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3462 		break;
3463 	case HEM_TYPE_CQC:
3464 		op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3465 		break;
3466 	case HEM_TYPE_SCCC:
3467 	case HEM_TYPE_QPC_TIMER:
3468 	case HEM_TYPE_CQC_TIMER:
3469 		break;
3470 	case HEM_TYPE_SRQC:
3471 		op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3472 		break;
3473 	default:
3474 		dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3475 			 table->type);
3476 		return 0;
3477 	}
3478 
3479 	if (table->type == HEM_TYPE_SCCC ||
3480 	    table->type == HEM_TYPE_QPC_TIMER ||
3481 	    table->type == HEM_TYPE_CQC_TIMER)
3482 		return 0;
3483 
3484 	op += step_idx;
3485 
3486 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3487 	if (IS_ERR(mailbox))
3488 		return PTR_ERR(mailbox);
3489 
3490 	/* configure the tag and op */
3491 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3492 				HNS_ROCE_CMD_TIMEOUT_MSECS);
3493 
3494 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3495 	return ret;
3496 }
3497 
3498 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3499 				 struct hns_roce_v2_qp_context *context,
3500 				 struct hns_roce_qp *hr_qp)
3501 {
3502 	struct hns_roce_cmd_mailbox *mailbox;
3503 	int ret;
3504 
3505 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3506 	if (IS_ERR(mailbox))
3507 		return PTR_ERR(mailbox);
3508 
3509 	memcpy(mailbox->buf, context, sizeof(*context) * 2);
3510 
3511 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3512 				HNS_ROCE_CMD_MODIFY_QPC,
3513 				HNS_ROCE_CMD_TIMEOUT_MSECS);
3514 
3515 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3516 
3517 	return ret;
3518 }
3519 
3520 static void set_access_flags(struct hns_roce_qp *hr_qp,
3521 			     struct hns_roce_v2_qp_context *context,
3522 			     struct hns_roce_v2_qp_context *qpc_mask,
3523 			     const struct ib_qp_attr *attr, int attr_mask)
3524 {
3525 	u8 dest_rd_atomic;
3526 	u32 access_flags;
3527 
3528 	dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3529 			 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3530 
3531 	access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3532 		       attr->qp_access_flags : hr_qp->atomic_rd_en;
3533 
3534 	if (!dest_rd_atomic)
3535 		access_flags &= IB_ACCESS_REMOTE_WRITE;
3536 
3537 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3538 		     !!(access_flags & IB_ACCESS_REMOTE_READ));
3539 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3540 
3541 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3542 		     !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3543 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3544 
3545 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3546 		     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3547 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3548 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3549 		     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3550 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
3551 }
3552 
3553 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3554 			    struct hns_roce_v2_qp_context *context,
3555 			    struct hns_roce_v2_qp_context *qpc_mask)
3556 {
3557 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3558 		roce_set_field(context->byte_4_sqpn_tst,
3559 			       V2_QPC_BYTE_4_SGE_SHIFT_M,
3560 			       V2_QPC_BYTE_4_SGE_SHIFT_S,
3561 			       ilog2((unsigned int)hr_qp->sge.sge_cnt));
3562 	else
3563 		roce_set_field(context->byte_4_sqpn_tst,
3564 			       V2_QPC_BYTE_4_SGE_SHIFT_M,
3565 			       V2_QPC_BYTE_4_SGE_SHIFT_S,
3566 			       hr_qp->sq.max_gs >
3567 			       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3568 			       ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3569 
3570 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3571 		       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3572 
3573 	roce_set_field(context->byte_20_smac_sgid_idx,
3574 		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3575 		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3576 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3577 		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3578 
3579 	roce_set_field(context->byte_20_smac_sgid_idx,
3580 		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3581 		       (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3582 		       hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3583 		       hr_qp->ibqp.srq) ? 0 :
3584 		       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3585 
3586 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3587 		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3588 }
3589 
3590 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3591 				    const struct ib_qp_attr *attr,
3592 				    int attr_mask,
3593 				    struct hns_roce_v2_qp_context *context,
3594 				    struct hns_roce_v2_qp_context *qpc_mask)
3595 {
3596 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3597 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3598 
3599 	/*
3600 	 * In v2 engine, software pass context and context mask to hardware
3601 	 * when modifying qp. If software need modify some fields in context,
3602 	 * we should set all bits of the relevant fields in context mask to
3603 	 * 0 at the same time, else set them to 0x1.
3604 	 */
3605 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3606 		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3607 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3608 		       V2_QPC_BYTE_4_TST_S, 0);
3609 
3610 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3611 		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3612 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3613 		       V2_QPC_BYTE_4_SQPN_S, 0);
3614 
3615 	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3616 		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3617 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3618 		       V2_QPC_BYTE_16_PD_S, 0);
3619 
3620 	roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3621 		       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3622 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3623 		       V2_QPC_BYTE_20_RQWS_S, 0);
3624 
3625 	set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3626 
3627 	/* No VLAN need to set 0xFFF */
3628 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3629 		       V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3630 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3631 		       V2_QPC_BYTE_24_VLAN_ID_S, 0);
3632 
3633 	/*
3634 	 * Set some fields in context to zero, Because the default values
3635 	 * of all fields in context are zero, we need not set them to 0 again.
3636 	 * but we should set the relevant fields of context mask to 0.
3637 	 */
3638 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3639 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3640 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3641 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3642 
3643 	roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3644 		       V2_QPC_BYTE_60_TEMPID_S, 0);
3645 
3646 	roce_set_field(qpc_mask->byte_60_qpst_tempid,
3647 		       V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3648 		       0);
3649 	roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3650 		     V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3651 	roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3652 		     V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3653 	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3654 	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3655 
3656 	if (hr_qp->rdb_en) {
3657 		roce_set_bit(context->byte_68_rq_db,
3658 			     V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3659 		roce_set_bit(qpc_mask->byte_68_rq_db,
3660 			     V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3661 	}
3662 
3663 	roce_set_field(context->byte_68_rq_db,
3664 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3665 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3666 		       ((u32)hr_qp->rdb.dma) >> 1);
3667 	roce_set_field(qpc_mask->byte_68_rq_db,
3668 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3669 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3670 	context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3671 	qpc_mask->rq_db_record_addr = 0;
3672 
3673 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3674 		    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3675 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3676 
3677 	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3678 		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3679 	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3680 		       V2_QPC_BYTE_80_RX_CQN_S, 0);
3681 	if (ibqp->srq) {
3682 		roce_set_field(context->byte_76_srqn_op_en,
3683 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3684 			       to_hr_srq(ibqp->srq)->srqn);
3685 		roce_set_field(qpc_mask->byte_76_srqn_op_en,
3686 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3687 		roce_set_bit(context->byte_76_srqn_op_en,
3688 			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
3689 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3690 			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
3691 	}
3692 
3693 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3694 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3695 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3696 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3697 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3698 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3699 
3700 	roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3701 		       V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3702 
3703 	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3704 		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3705 
3706 	roce_set_field(qpc_mask->byte_104_rq_sge,
3707 		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3708 		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3709 
3710 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3711 		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3712 	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3713 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3714 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3715 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3716 		     V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3717 
3718 	qpc_mask->rq_rnr_timer = 0;
3719 	qpc_mask->rx_msg_len = 0;
3720 	qpc_mask->rx_rkey_pkt_info = 0;
3721 	qpc_mask->rx_va = 0;
3722 
3723 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3724 		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3725 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3726 		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3727 
3728 	roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3729 		     0);
3730 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3731 		       V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3732 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3733 		       V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3734 
3735 	roce_set_field(qpc_mask->byte_144_raq,
3736 		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3737 		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3738 	roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3739 		       V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3740 	roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3741 
3742 	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3743 		       V2_QPC_BYTE_148_RQ_MSN_S, 0);
3744 	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3745 		       V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3746 
3747 	roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3748 		       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3749 	roce_set_field(qpc_mask->byte_152_raq,
3750 		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3751 		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3752 
3753 	roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3754 		       V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3755 
3756 	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3757 		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3758 		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3759 	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3760 		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3761 		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3762 
3763 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3764 		     V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3765 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3766 		     V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3767 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3768 		     V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3769 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3770 		     V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3771 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3772 		     V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3773 	roce_set_field(qpc_mask->byte_168_irrl_idx,
3774 		       V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3775 		       V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3776 
3777 	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3778 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3779 	roce_set_field(qpc_mask->byte_172_sq_psn,
3780 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3781 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3782 
3783 	roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3784 		     0);
3785 
3786 	roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3787 	roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3788 
3789 	roce_set_field(qpc_mask->byte_176_msg_pktn,
3790 		       V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3791 		       V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3792 	roce_set_field(qpc_mask->byte_176_msg_pktn,
3793 		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3794 		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3795 
3796 	roce_set_field(qpc_mask->byte_184_irrl_idx,
3797 		       V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3798 		       V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3799 
3800 	qpc_mask->cur_sge_offset = 0;
3801 
3802 	roce_set_field(qpc_mask->byte_192_ext_sge,
3803 		       V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3804 		       V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3805 	roce_set_field(qpc_mask->byte_192_ext_sge,
3806 		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3807 		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3808 
3809 	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3810 		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3811 
3812 	roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3813 		       V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3814 	roce_set_field(qpc_mask->byte_200_sq_max,
3815 		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3816 		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3817 
3818 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3819 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3820 
3821 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3822 		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3823 
3824 	qpc_mask->sq_timer = 0;
3825 
3826 	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3827 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3828 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3829 	roce_set_field(qpc_mask->byte_232_irrl_sge,
3830 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3831 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3832 
3833 	roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3834 		     0);
3835 	roce_set_bit(qpc_mask->byte_232_irrl_sge,
3836 		     V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3837 	roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3838 		     0);
3839 
3840 	qpc_mask->irrl_cur_sge_offset = 0;
3841 
3842 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3843 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3844 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3845 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3846 		       V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3847 		       V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3848 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3849 		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
3850 		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3851 
3852 	roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3853 		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3854 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3855 		     0);
3856 	roce_set_field(qpc_mask->byte_248_ack_psn,
3857 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3858 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3859 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3860 		     0);
3861 	roce_set_bit(qpc_mask->byte_248_ack_psn,
3862 		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3863 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3864 		     0);
3865 
3866 	hr_qp->access_flags = attr->qp_access_flags;
3867 	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3868 		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3869 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3870 		       V2_QPC_BYTE_252_TX_CQN_S, 0);
3871 
3872 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3873 		       V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3874 
3875 	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3876 		       V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3877 		       V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3878 	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3879 		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3880 		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3881 }
3882 
3883 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3884 				   const struct ib_qp_attr *attr, int attr_mask,
3885 				   struct hns_roce_v2_qp_context *context,
3886 				   struct hns_roce_v2_qp_context *qpc_mask)
3887 {
3888 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3889 
3890 	/*
3891 	 * In v2 engine, software pass context and context mask to hardware
3892 	 * when modifying qp. If software need modify some fields in context,
3893 	 * we should set all bits of the relevant fields in context mask to
3894 	 * 0 at the same time, else set them to 0x1.
3895 	 */
3896 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3897 		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3898 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3899 		       V2_QPC_BYTE_4_TST_S, 0);
3900 
3901 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
3902 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3903 			     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3904 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3905 			     0);
3906 
3907 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3908 			     !!(attr->qp_access_flags &
3909 			     IB_ACCESS_REMOTE_WRITE));
3910 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3911 			     0);
3912 
3913 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3914 			     !!(attr->qp_access_flags &
3915 			     IB_ACCESS_REMOTE_ATOMIC));
3916 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3917 			     0);
3918 		roce_set_bit(context->byte_76_srqn_op_en,
3919 			     V2_QPC_BYTE_76_EXT_ATE_S,
3920 			     !!(attr->qp_access_flags &
3921 				IB_ACCESS_REMOTE_ATOMIC));
3922 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3923 			     V2_QPC_BYTE_76_EXT_ATE_S, 0);
3924 	} else {
3925 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3926 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3927 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3928 			     0);
3929 
3930 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3931 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3932 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3933 			     0);
3934 
3935 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3936 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3937 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3938 			     0);
3939 		roce_set_bit(context->byte_76_srqn_op_en,
3940 			     V2_QPC_BYTE_76_EXT_ATE_S,
3941 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3942 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3943 			     V2_QPC_BYTE_76_EXT_ATE_S, 0);
3944 	}
3945 
3946 	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3947 		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3948 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3949 		       V2_QPC_BYTE_16_PD_S, 0);
3950 
3951 	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3952 		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3953 	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3954 		       V2_QPC_BYTE_80_RX_CQN_S, 0);
3955 
3956 	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3957 		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3958 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3959 		       V2_QPC_BYTE_252_TX_CQN_S, 0);
3960 
3961 	if (ibqp->srq) {
3962 		roce_set_bit(context->byte_76_srqn_op_en,
3963 			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
3964 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3965 			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
3966 		roce_set_field(context->byte_76_srqn_op_en,
3967 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3968 			       to_hr_srq(ibqp->srq)->srqn);
3969 		roce_set_field(qpc_mask->byte_76_srqn_op_en,
3970 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3971 	}
3972 
3973 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3974 		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3975 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3976 		       V2_QPC_BYTE_4_SQPN_S, 0);
3977 
3978 	if (attr_mask & IB_QP_DEST_QPN) {
3979 		roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3980 			       V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3981 		roce_set_field(qpc_mask->byte_56_dqpn_err,
3982 			       V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3983 	}
3984 }
3985 
3986 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3987 				   struct hns_roce_qp *hr_qp, int mtt_cnt,
3988 				   u32 page_size)
3989 {
3990 	struct device *dev = hr_dev->dev;
3991 
3992 	if (hr_qp->rq.wqe_cnt < 1)
3993 		return true;
3994 
3995 	if (mtt_cnt < 1) {
3996 		dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3997 			hr_qp->qpn);
3998 		return false;
3999 	}
4000 
4001 	if (mtt_cnt < MTT_MIN_COUNT &&
4002 		(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
4003 		dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
4004 			hr_qp->qpn);
4005 		return false;
4006 	}
4007 
4008 	return true;
4009 }
4010 
4011 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4012 				 const struct ib_qp_attr *attr, int attr_mask,
4013 				 struct hns_roce_v2_qp_context *context,
4014 				 struct hns_roce_v2_qp_context *qpc_mask)
4015 {
4016 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4017 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4018 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4019 	struct device *dev = hr_dev->dev;
4020 	u64 mtts[MTT_MIN_COUNT] = { 0 };
4021 	dma_addr_t dma_handle_3;
4022 	dma_addr_t dma_handle_2;
4023 	u64 wqe_sge_ba;
4024 	u32 page_size;
4025 	u8 port_num;
4026 	u64 *mtts_3;
4027 	u64 *mtts_2;
4028 	int count;
4029 	u8 *dmac;
4030 	u8 *smac;
4031 	int port;
4032 
4033 	/* Search qp buf's mtts */
4034 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
4035 	count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4036 				  hr_qp->rq.offset / page_size, mtts,
4037 				  MTT_MIN_COUNT, &wqe_sge_ba);
4038 	if (!ibqp->srq)
4039 		if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
4040 			return -EINVAL;
4041 
4042 	/* Search IRRL's mtts */
4043 	mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4044 				     hr_qp->qpn, &dma_handle_2);
4045 	if (!mtts_2) {
4046 		dev_err(dev, "qp irrl_table find failed\n");
4047 		return -EINVAL;
4048 	}
4049 
4050 	/* Search TRRL's mtts */
4051 	mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4052 				     hr_qp->qpn, &dma_handle_3);
4053 	if (!mtts_3) {
4054 		dev_err(dev, "qp trrl_table find failed\n");
4055 		return -EINVAL;
4056 	}
4057 
4058 	if (attr_mask & IB_QP_ALT_PATH) {
4059 		dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
4060 		return -EINVAL;
4061 	}
4062 
4063 	dmac = (u8 *)attr->ah_attr.roce.dmac;
4064 	context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4065 	qpc_mask->wqe_sge_ba = 0;
4066 
4067 	/*
4068 	 * In v2 engine, software pass context and context mask to hardware
4069 	 * when modifying qp. If software need modify some fields in context,
4070 	 * we should set all bits of the relevant fields in context mask to
4071 	 * 0 at the same time, else set them to 0x1.
4072 	 */
4073 	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4074 		       V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
4075 	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4076 		       V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
4077 
4078 	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4079 		       V2_QPC_BYTE_12_SQ_HOP_NUM_S,
4080 		       hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
4081 		       0 : hr_dev->caps.wqe_sq_hop_num);
4082 	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4083 		       V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
4084 
4085 	roce_set_field(context->byte_20_smac_sgid_idx,
4086 		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4087 		       V2_QPC_BYTE_20_SGE_HOP_NUM_S,
4088 		       ((ibqp->qp_type == IB_QPT_GSI) ||
4089 		       hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4090 		       hr_dev->caps.wqe_sge_hop_num : 0);
4091 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4092 		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4093 		       V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4094 
4095 	roce_set_field(context->byte_20_smac_sgid_idx,
4096 		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4097 		       V2_QPC_BYTE_20_RQ_HOP_NUM_S,
4098 		       hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
4099 		       0 : hr_dev->caps.wqe_rq_hop_num);
4100 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4101 		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4102 		       V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4103 
4104 	roce_set_field(context->byte_16_buf_ba_pg_sz,
4105 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4106 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
4107 		       hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
4108 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4109 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4110 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4111 
4112 	roce_set_field(context->byte_16_buf_ba_pg_sz,
4113 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4114 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
4115 		       hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
4116 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4117 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4118 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4119 
4120 	context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
4121 	qpc_mask->rq_cur_blk_addr = 0;
4122 
4123 	roce_set_field(context->byte_92_srq_info,
4124 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4125 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
4126 		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
4127 	roce_set_field(qpc_mask->byte_92_srq_info,
4128 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4129 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4130 
4131 	context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
4132 	qpc_mask->rq_nxt_blk_addr = 0;
4133 
4134 	roce_set_field(context->byte_104_rq_sge,
4135 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4136 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
4137 		       mtts[1] >> (32 + PAGE_ADDR_SHIFT));
4138 	roce_set_field(qpc_mask->byte_104_rq_sge,
4139 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4140 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4141 
4142 	roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4143 		       V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
4144 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4145 		       V2_QPC_BYTE_132_TRRL_BA_S, 0);
4146 	context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
4147 	qpc_mask->trrl_ba = 0;
4148 	roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4149 		       V2_QPC_BYTE_140_TRRL_BA_S,
4150 		       (u32)(dma_handle_3 >> (32 + 16 + 4)));
4151 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4152 		       V2_QPC_BYTE_140_TRRL_BA_S, 0);
4153 
4154 	context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
4155 	qpc_mask->irrl_ba = 0;
4156 	roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4157 		       V2_QPC_BYTE_208_IRRL_BA_S,
4158 		       dma_handle_2 >> (32 + 6));
4159 	roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4160 		       V2_QPC_BYTE_208_IRRL_BA_S, 0);
4161 
4162 	roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4163 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4164 
4165 	roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4166 		     hr_qp->sq_signal_bits);
4167 	roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4168 		     0);
4169 
4170 	port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4171 
4172 	smac = (u8 *)hr_dev->dev_addr[port];
4173 	/* when dmac equals smac or loop_idc is 1, it should loopback */
4174 	if (ether_addr_equal_unaligned(dmac, smac) ||
4175 	    hr_dev->loop_idc == 0x1) {
4176 		roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4177 		roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4178 	}
4179 
4180 	if (attr_mask & IB_QP_DEST_QPN) {
4181 		roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4182 			       V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4183 		roce_set_field(qpc_mask->byte_56_dqpn_err,
4184 			       V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4185 	}
4186 
4187 	/* Configure GID index */
4188 	port_num = rdma_ah_get_port_num(&attr->ah_attr);
4189 	roce_set_field(context->byte_20_smac_sgid_idx,
4190 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4191 		       hns_get_gid_index(hr_dev, port_num - 1,
4192 					 grh->sgid_index));
4193 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4194 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4195 	memcpy(&(context->dmac), dmac, sizeof(u32));
4196 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4197 		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4198 	qpc_mask->dmac = 0;
4199 	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4200 		       V2_QPC_BYTE_52_DMAC_S, 0);
4201 
4202 	/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
4203 	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4204 		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
4205 	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4206 		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4207 
4208 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4209 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4210 			       V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
4211 	else if (attr_mask & IB_QP_PATH_MTU)
4212 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4213 			       V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
4214 
4215 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4216 		       V2_QPC_BYTE_24_MTU_S, 0);
4217 
4218 	roce_set_field(context->byte_84_rq_ci_pi,
4219 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4220 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
4221 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4222 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4223 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4224 
4225 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4226 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
4227 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
4228 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4229 		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4230 	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4231 		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4232 	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4233 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4234 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4235 
4236 	context->rq_rnr_timer = 0;
4237 	qpc_mask->rq_rnr_timer = 0;
4238 
4239 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4240 		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4241 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4242 		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4243 
4244 	/* rocee send 2^lp_sgen_ini segs every time */
4245 	roce_set_field(context->byte_168_irrl_idx,
4246 		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
4247 		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4248 	roce_set_field(qpc_mask->byte_168_irrl_idx,
4249 		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
4250 		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4251 
4252 	return 0;
4253 }
4254 
4255 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4256 				const struct ib_qp_attr *attr, int attr_mask,
4257 				struct hns_roce_v2_qp_context *context,
4258 				struct hns_roce_v2_qp_context *qpc_mask)
4259 {
4260 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4261 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4262 	struct device *dev = hr_dev->dev;
4263 	u64 sge_cur_blk = 0;
4264 	u64 sq_cur_blk = 0;
4265 	u32 page_size;
4266 	int count;
4267 
4268 	/* Search qp buf's mtts */
4269 	count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4270 	if (count < 1) {
4271 		dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
4272 		return -EINVAL;
4273 	}
4274 
4275 	if (hr_qp->sge.offset) {
4276 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
4277 		count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4278 					  hr_qp->sge.offset / page_size,
4279 					  &sge_cur_blk, 1, NULL);
4280 		if (count < 1) {
4281 			dev_err(dev, "qp(0x%lx) sge pa find failed\n",
4282 				hr_qp->qpn);
4283 			return -EINVAL;
4284 		}
4285 	}
4286 
4287 	/* Not support alternate path and path migration */
4288 	if ((attr_mask & IB_QP_ALT_PATH) ||
4289 	    (attr_mask & IB_QP_PATH_MIG_STATE)) {
4290 		dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4291 		return -EINVAL;
4292 	}
4293 
4294 	/*
4295 	 * In v2 engine, software pass context and context mask to hardware
4296 	 * when modifying qp. If software need modify some fields in context,
4297 	 * we should set all bits of the relevant fields in context mask to
4298 	 * 0 at the same time, else set them to 0x1.
4299 	 */
4300 	context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
4301 	roce_set_field(context->byte_168_irrl_idx,
4302 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4303 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4304 		       sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
4305 	qpc_mask->sq_cur_blk_addr = 0;
4306 	roce_set_field(qpc_mask->byte_168_irrl_idx,
4307 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4308 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4309 
4310 	context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
4311 		       hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4312 		       cpu_to_le32(sge_cur_blk >>
4313 		       PAGE_ADDR_SHIFT) : 0;
4314 	roce_set_field(context->byte_184_irrl_idx,
4315 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4316 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4317 		       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
4318 		       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4319 		       (sge_cur_blk >>
4320 		       (32 + PAGE_ADDR_SHIFT)) : 0);
4321 	qpc_mask->sq_cur_sge_blk_addr = 0;
4322 	roce_set_field(qpc_mask->byte_184_irrl_idx,
4323 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4324 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4325 
4326 	context->rx_sq_cur_blk_addr =
4327 		cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
4328 	roce_set_field(context->byte_232_irrl_sge,
4329 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4330 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4331 		       sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
4332 	qpc_mask->rx_sq_cur_blk_addr = 0;
4333 	roce_set_field(qpc_mask->byte_232_irrl_sge,
4334 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4335 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4336 
4337 	/*
4338 	 * Set some fields in context to zero, Because the default values
4339 	 * of all fields in context are zero, we need not set them to 0 again.
4340 	 * but we should set the relevant fields of context mask to 0.
4341 	 */
4342 	roce_set_field(qpc_mask->byte_232_irrl_sge,
4343 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4344 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4345 
4346 	roce_set_field(qpc_mask->byte_240_irrl_tail,
4347 		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
4348 		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4349 
4350 	roce_set_field(qpc_mask->byte_248_ack_psn,
4351 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4352 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4353 	roce_set_bit(qpc_mask->byte_248_ack_psn,
4354 		     V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4355 	roce_set_field(qpc_mask->byte_248_ack_psn,
4356 		       V2_QPC_BYTE_248_IRRL_PSN_M,
4357 		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4358 
4359 	roce_set_field(qpc_mask->byte_240_irrl_tail,
4360 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4361 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4362 
4363 	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4364 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4365 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4366 
4367 	roce_set_bit(qpc_mask->byte_248_ack_psn,
4368 		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4369 
4370 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4371 		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4372 
4373 	roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4374 		       V2_QPC_BYTE_212_LSN_S, 0x100);
4375 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4376 		       V2_QPC_BYTE_212_LSN_S, 0);
4377 
4378 	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4379 		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4380 
4381 	return 0;
4382 }
4383 
4384 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4385 					     enum ib_qp_state new_state)
4386 {
4387 
4388 	if ((cur_state != IB_QPS_RESET &&
4389 	    (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4390 	    ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4391 	    (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4392 	    (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4393 		return true;
4394 
4395 	return false;
4396 
4397 }
4398 
4399 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4400 				const struct ib_qp_attr *attr,
4401 				int attr_mask,
4402 				struct hns_roce_v2_qp_context *context,
4403 				struct hns_roce_v2_qp_context *qpc_mask)
4404 {
4405 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4406 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4407 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4408 	const struct ib_gid_attr *gid_attr = NULL;
4409 	int is_roce_protocol;
4410 	u16 vlan_id = 0xffff;
4411 	bool is_udp = false;
4412 	u8 ib_port;
4413 	u8 hr_port;
4414 	int ret;
4415 
4416 	ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4417 	hr_port = ib_port - 1;
4418 	is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4419 			   rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4420 
4421 	if (is_roce_protocol) {
4422 		gid_attr = attr->ah_attr.grh.sgid_attr;
4423 		ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4424 		if (ret)
4425 			return ret;
4426 
4427 		if (gid_attr)
4428 			is_udp = (gid_attr->gid_type ==
4429 				 IB_GID_TYPE_ROCE_UDP_ENCAP);
4430 	}
4431 
4432 	if (vlan_id < VLAN_N_VID) {
4433 		roce_set_bit(context->byte_76_srqn_op_en,
4434 			     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4435 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4436 			     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4437 		roce_set_bit(context->byte_168_irrl_idx,
4438 			     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4439 		roce_set_bit(qpc_mask->byte_168_irrl_idx,
4440 			     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4441 	}
4442 
4443 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4444 		       V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4445 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4446 		       V2_QPC_BYTE_24_VLAN_ID_S, 0);
4447 
4448 	if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4449 		dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
4450 			grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4451 		return -EINVAL;
4452 	}
4453 
4454 	if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4455 		dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4456 		return -EINVAL;
4457 	}
4458 
4459 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4460 		       V2_QPC_BYTE_52_UDPSPN_S,
4461 		       is_udp ? 0x12b7 : 0);
4462 
4463 	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4464 		       V2_QPC_BYTE_52_UDPSPN_S, 0);
4465 
4466 	roce_set_field(context->byte_20_smac_sgid_idx,
4467 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4468 		       grh->sgid_index);
4469 
4470 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4471 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4472 
4473 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4474 		       V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4475 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4476 		       V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4477 
4478 	if (hr_dev->pci_dev->revision == 0x21 && is_udp)
4479 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4480 			       V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4481 	else
4482 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4483 			       V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4484 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4485 		       V2_QPC_BYTE_24_TC_S, 0);
4486 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4487 		       V2_QPC_BYTE_28_FL_S, grh->flow_label);
4488 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4489 		       V2_QPC_BYTE_28_FL_S, 0);
4490 	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4491 	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4492 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4493 		       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4494 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4495 		       V2_QPC_BYTE_28_SL_S, 0);
4496 	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4497 
4498 	return 0;
4499 }
4500 
4501 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4502 				      const struct ib_qp_attr *attr,
4503 				      int attr_mask,
4504 				      enum ib_qp_state cur_state,
4505 				      enum ib_qp_state new_state,
4506 				      struct hns_roce_v2_qp_context *context,
4507 				      struct hns_roce_v2_qp_context *qpc_mask)
4508 {
4509 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4510 	int ret = 0;
4511 
4512 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4513 		memset(qpc_mask, 0, sizeof(*qpc_mask));
4514 		modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4515 					qpc_mask);
4516 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4517 		modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4518 				       qpc_mask);
4519 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4520 		ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4521 					    qpc_mask);
4522 		if (ret)
4523 			goto out;
4524 	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4525 		ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4526 					   qpc_mask);
4527 		if (ret)
4528 			goto out;
4529 	} else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4530 		/* Nothing */
4531 		;
4532 	} else {
4533 		dev_err(hr_dev->dev, "Illegal state for QP!\n");
4534 		ret = -EINVAL;
4535 		goto out;
4536 	}
4537 
4538 out:
4539 	return ret;
4540 }
4541 
4542 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4543 				      const struct ib_qp_attr *attr,
4544 				      int attr_mask,
4545 				      struct hns_roce_v2_qp_context *context,
4546 				      struct hns_roce_v2_qp_context *qpc_mask)
4547 {
4548 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4549 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4550 	int ret = 0;
4551 
4552 	if (attr_mask & IB_QP_AV) {
4553 		ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4554 					   qpc_mask);
4555 		if (ret)
4556 			return ret;
4557 	}
4558 
4559 	if (attr_mask & IB_QP_TIMEOUT) {
4560 		if (attr->timeout < 31) {
4561 			roce_set_field(context->byte_28_at_fl,
4562 				       V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4563 				       attr->timeout);
4564 			roce_set_field(qpc_mask->byte_28_at_fl,
4565 				       V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4566 				       0);
4567 		} else {
4568 			dev_warn(hr_dev->dev,
4569 				 "Local ACK timeout shall be 0 to 30.\n");
4570 		}
4571 	}
4572 
4573 	if (attr_mask & IB_QP_RETRY_CNT) {
4574 		roce_set_field(context->byte_212_lsn,
4575 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4576 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4577 			       attr->retry_cnt);
4578 		roce_set_field(qpc_mask->byte_212_lsn,
4579 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4580 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4581 
4582 		roce_set_field(context->byte_212_lsn,
4583 			       V2_QPC_BYTE_212_RETRY_CNT_M,
4584 			       V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
4585 		roce_set_field(qpc_mask->byte_212_lsn,
4586 			       V2_QPC_BYTE_212_RETRY_CNT_M,
4587 			       V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4588 	}
4589 
4590 	if (attr_mask & IB_QP_RNR_RETRY) {
4591 		roce_set_field(context->byte_244_rnr_rxack,
4592 			       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4593 			       V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4594 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4595 			       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4596 			       V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4597 
4598 		roce_set_field(context->byte_244_rnr_rxack,
4599 			       V2_QPC_BYTE_244_RNR_CNT_M,
4600 			       V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4601 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4602 			       V2_QPC_BYTE_244_RNR_CNT_M,
4603 			       V2_QPC_BYTE_244_RNR_CNT_S, 0);
4604 	}
4605 
4606 	/* RC&UC&UD required attr */
4607 	if (attr_mask & IB_QP_SQ_PSN) {
4608 		roce_set_field(context->byte_172_sq_psn,
4609 			       V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4610 			       V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4611 		roce_set_field(qpc_mask->byte_172_sq_psn,
4612 			       V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4613 			       V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4614 
4615 		roce_set_field(context->byte_196_sq_psn,
4616 			       V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4617 			       V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4618 		roce_set_field(qpc_mask->byte_196_sq_psn,
4619 			       V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4620 			       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4621 
4622 		roce_set_field(context->byte_220_retry_psn_msn,
4623 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4624 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4625 		roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4626 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4627 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4628 
4629 		roce_set_field(context->byte_224_retry_msg,
4630 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4631 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4632 			       attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4633 		roce_set_field(qpc_mask->byte_224_retry_msg,
4634 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4635 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4636 
4637 		roce_set_field(context->byte_224_retry_msg,
4638 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4639 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4640 			       attr->sq_psn);
4641 		roce_set_field(qpc_mask->byte_224_retry_msg,
4642 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4643 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4644 
4645 		roce_set_field(context->byte_244_rnr_rxack,
4646 			       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4647 			       V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4648 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4649 			       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4650 			       V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4651 	}
4652 
4653 	if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4654 	     attr->max_dest_rd_atomic) {
4655 		roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4656 			       V2_QPC_BYTE_140_RR_MAX_S,
4657 			       fls(attr->max_dest_rd_atomic - 1));
4658 		roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4659 			       V2_QPC_BYTE_140_RR_MAX_S, 0);
4660 	}
4661 
4662 	if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4663 		roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4664 			       V2_QPC_BYTE_208_SR_MAX_S,
4665 			       fls(attr->max_rd_atomic - 1));
4666 		roce_set_field(qpc_mask->byte_208_irrl,
4667 			       V2_QPC_BYTE_208_SR_MAX_M,
4668 			       V2_QPC_BYTE_208_SR_MAX_S, 0);
4669 	}
4670 
4671 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4672 		set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4673 
4674 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4675 		roce_set_field(context->byte_80_rnr_rx_cqn,
4676 			       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4677 			       V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4678 			       attr->min_rnr_timer);
4679 		roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4680 			       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4681 			       V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4682 	}
4683 
4684 	/* RC&UC required attr */
4685 	if (attr_mask & IB_QP_RQ_PSN) {
4686 		roce_set_field(context->byte_108_rx_reqepsn,
4687 			       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4688 			       V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4689 		roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4690 			       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4691 			       V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4692 
4693 		roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4694 			       V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4695 		roce_set_field(qpc_mask->byte_152_raq,
4696 			       V2_QPC_BYTE_152_RAQ_PSN_M,
4697 			       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4698 	}
4699 
4700 	if (attr_mask & IB_QP_QKEY) {
4701 		context->qkey_xrcd = cpu_to_le32(attr->qkey);
4702 		qpc_mask->qkey_xrcd = 0;
4703 		hr_qp->qkey = attr->qkey;
4704 	}
4705 
4706 	return ret;
4707 }
4708 
4709 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4710 					  const struct ib_qp_attr *attr,
4711 					  int attr_mask)
4712 {
4713 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4714 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4715 
4716 	if (attr_mask & IB_QP_ACCESS_FLAGS)
4717 		hr_qp->atomic_rd_en = attr->qp_access_flags;
4718 
4719 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4720 		hr_qp->resp_depth = attr->max_dest_rd_atomic;
4721 	if (attr_mask & IB_QP_PORT) {
4722 		hr_qp->port = attr->port_num - 1;
4723 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4724 	}
4725 }
4726 
4727 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4728 				 const struct ib_qp_attr *attr,
4729 				 int attr_mask, enum ib_qp_state cur_state,
4730 				 enum ib_qp_state new_state)
4731 {
4732 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4733 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4734 	struct hns_roce_v2_qp_context ctx[2];
4735 	struct hns_roce_v2_qp_context *context = ctx;
4736 	struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4737 	struct device *dev = hr_dev->dev;
4738 	int ret;
4739 
4740 	/*
4741 	 * In v2 engine, software pass context and context mask to hardware
4742 	 * when modifying qp. If software need modify some fields in context,
4743 	 * we should set all bits of the relevant fields in context mask to
4744 	 * 0 at the same time, else set them to 0x1.
4745 	 */
4746 	memset(context, 0, sizeof(*context));
4747 	memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4748 	ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4749 					 new_state, context, qpc_mask);
4750 	if (ret)
4751 		goto out;
4752 
4753 	/* When QP state is err, SQ and RQ WQE should be flushed */
4754 	if (new_state == IB_QPS_ERR) {
4755 		roce_set_field(context->byte_160_sq_ci_pi,
4756 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4757 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4758 			       hr_qp->sq.head);
4759 		roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4760 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4761 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4762 
4763 		if (!ibqp->srq) {
4764 			roce_set_field(context->byte_84_rq_ci_pi,
4765 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4766 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4767 			       hr_qp->rq.head);
4768 			roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4769 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4770 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4771 		}
4772 	}
4773 
4774 	/* Configure the optional fields */
4775 	ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4776 					 qpc_mask);
4777 	if (ret)
4778 		goto out;
4779 
4780 	roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4781 		     ibqp->srq ? 1 : 0);
4782 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4783 		     V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4784 
4785 	/* Every status migrate must change state */
4786 	roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4787 		       V2_QPC_BYTE_60_QP_ST_S, new_state);
4788 	roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4789 		       V2_QPC_BYTE_60_QP_ST_S, 0);
4790 
4791 	/* SW pass context to HW */
4792 	ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
4793 	if (ret) {
4794 		dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4795 		goto out;
4796 	}
4797 
4798 	hr_qp->state = new_state;
4799 
4800 	hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4801 
4802 	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4803 		hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4804 				     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4805 		if (ibqp->send_cq != ibqp->recv_cq)
4806 			hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4807 					     hr_qp->qpn, NULL);
4808 
4809 		hr_qp->rq.head = 0;
4810 		hr_qp->rq.tail = 0;
4811 		hr_qp->sq.head = 0;
4812 		hr_qp->sq.tail = 0;
4813 		hr_qp->next_sge = 0;
4814 		if (hr_qp->rq.wqe_cnt)
4815 			*hr_qp->rdb.db_record = 0;
4816 	}
4817 
4818 out:
4819 	return ret;
4820 }
4821 
4822 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4823 {
4824 	switch (state) {
4825 	case HNS_ROCE_QP_ST_RST:	return IB_QPS_RESET;
4826 	case HNS_ROCE_QP_ST_INIT:	return IB_QPS_INIT;
4827 	case HNS_ROCE_QP_ST_RTR:	return IB_QPS_RTR;
4828 	case HNS_ROCE_QP_ST_RTS:	return IB_QPS_RTS;
4829 	case HNS_ROCE_QP_ST_SQ_DRAINING:
4830 	case HNS_ROCE_QP_ST_SQD:	return IB_QPS_SQD;
4831 	case HNS_ROCE_QP_ST_SQER:	return IB_QPS_SQE;
4832 	case HNS_ROCE_QP_ST_ERR:	return IB_QPS_ERR;
4833 	default:			return -1;
4834 	}
4835 }
4836 
4837 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4838 				 struct hns_roce_qp *hr_qp,
4839 				 struct hns_roce_v2_qp_context *hr_context)
4840 {
4841 	struct hns_roce_cmd_mailbox *mailbox;
4842 	int ret;
4843 
4844 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4845 	if (IS_ERR(mailbox))
4846 		return PTR_ERR(mailbox);
4847 
4848 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4849 				HNS_ROCE_CMD_QUERY_QPC,
4850 				HNS_ROCE_CMD_TIMEOUT_MSECS);
4851 	if (ret) {
4852 		dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4853 		goto out;
4854 	}
4855 
4856 	memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4857 
4858 out:
4859 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4860 	return ret;
4861 }
4862 
4863 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4864 				int qp_attr_mask,
4865 				struct ib_qp_init_attr *qp_init_attr)
4866 {
4867 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4868 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4869 	struct hns_roce_v2_qp_context context = {};
4870 	struct device *dev = hr_dev->dev;
4871 	int tmp_qp_state;
4872 	int state;
4873 	int ret;
4874 
4875 	memset(qp_attr, 0, sizeof(*qp_attr));
4876 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4877 
4878 	mutex_lock(&hr_qp->mutex);
4879 
4880 	if (hr_qp->state == IB_QPS_RESET) {
4881 		qp_attr->qp_state = IB_QPS_RESET;
4882 		ret = 0;
4883 		goto done;
4884 	}
4885 
4886 	ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4887 	if (ret) {
4888 		dev_err(dev, "query qpc error\n");
4889 		ret = -EINVAL;
4890 		goto out;
4891 	}
4892 
4893 	state = roce_get_field(context.byte_60_qpst_tempid,
4894 			       V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4895 	tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4896 	if (tmp_qp_state == -1) {
4897 		dev_err(dev, "Illegal ib_qp_state\n");
4898 		ret = -EINVAL;
4899 		goto out;
4900 	}
4901 	hr_qp->state = (u8)tmp_qp_state;
4902 	qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4903 	qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4904 							V2_QPC_BYTE_24_MTU_M,
4905 							V2_QPC_BYTE_24_MTU_S);
4906 	qp_attr->path_mig_state = IB_MIG_ARMED;
4907 	qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
4908 	if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4909 		qp_attr->qkey = V2_QKEY_VAL;
4910 
4911 	qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4912 					 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4913 					 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4914 	qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4915 					      V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4916 					      V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4917 	qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4918 						  V2_QPC_BYTE_56_DQPN_M,
4919 						  V2_QPC_BYTE_56_DQPN_S);
4920 	qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4921 				    V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4922 				    ((roce_get_bit(context.byte_76_srqn_op_en,
4923 				    V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4924 				    ((roce_get_bit(context.byte_76_srqn_op_en,
4925 				    V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4926 
4927 	if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4928 	    hr_qp->ibqp.qp_type == IB_QPT_UC) {
4929 		struct ib_global_route *grh =
4930 				rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4931 
4932 		rdma_ah_set_sl(&qp_attr->ah_attr,
4933 			       roce_get_field(context.byte_28_at_fl,
4934 					      V2_QPC_BYTE_28_SL_M,
4935 					      V2_QPC_BYTE_28_SL_S));
4936 		grh->flow_label = roce_get_field(context.byte_28_at_fl,
4937 						 V2_QPC_BYTE_28_FL_M,
4938 						 V2_QPC_BYTE_28_FL_S);
4939 		grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4940 						 V2_QPC_BYTE_20_SGID_IDX_M,
4941 						 V2_QPC_BYTE_20_SGID_IDX_S);
4942 		grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4943 						V2_QPC_BYTE_24_HOP_LIMIT_M,
4944 						V2_QPC_BYTE_24_HOP_LIMIT_S);
4945 		grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4946 						    V2_QPC_BYTE_24_TC_M,
4947 						    V2_QPC_BYTE_24_TC_S);
4948 
4949 		memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4950 	}
4951 
4952 	qp_attr->port_num = hr_qp->port + 1;
4953 	qp_attr->sq_draining = 0;
4954 	qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4955 						     V2_QPC_BYTE_208_SR_MAX_M,
4956 						     V2_QPC_BYTE_208_SR_MAX_S);
4957 	qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4958 						     V2_QPC_BYTE_140_RR_MAX_M,
4959 						     V2_QPC_BYTE_140_RR_MAX_S);
4960 	qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4961 						 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4962 						 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4963 	qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4964 					      V2_QPC_BYTE_28_AT_M,
4965 					      V2_QPC_BYTE_28_AT_S);
4966 	qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4967 					    V2_QPC_BYTE_212_RETRY_CNT_M,
4968 					    V2_QPC_BYTE_212_RETRY_CNT_S);
4969 	qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
4970 
4971 done:
4972 	qp_attr->cur_qp_state = qp_attr->qp_state;
4973 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4974 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4975 
4976 	if (!ibqp->uobject) {
4977 		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4978 		qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4979 	} else {
4980 		qp_attr->cap.max_send_wr = 0;
4981 		qp_attr->cap.max_send_sge = 0;
4982 	}
4983 
4984 	qp_init_attr->cap = qp_attr->cap;
4985 
4986 out:
4987 	mutex_unlock(&hr_qp->mutex);
4988 	return ret;
4989 }
4990 
4991 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4992 					 struct hns_roce_qp *hr_qp,
4993 					 struct ib_udata *udata)
4994 {
4995 	struct hns_roce_cq *send_cq, *recv_cq;
4996 	struct ib_device *ibdev = &hr_dev->ib_dev;
4997 	unsigned long flags;
4998 	int ret = 0;
4999 
5000 	if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
5001 		/* Modify qp to reset before destroying qp */
5002 		ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5003 					    hr_qp->state, IB_QPS_RESET);
5004 		if (ret)
5005 			ibdev_err(ibdev, "modify QP to Reset failed.\n");
5006 	}
5007 
5008 	send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5009 	recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5010 
5011 	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5012 	hns_roce_lock_cqs(send_cq, recv_cq);
5013 
5014 	list_del(&hr_qp->node);
5015 	list_del(&hr_qp->sq_node);
5016 	list_del(&hr_qp->rq_node);
5017 
5018 	if (!udata) {
5019 		if (recv_cq)
5020 			__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5021 					       (hr_qp->ibqp.srq ?
5022 						to_hr_srq(hr_qp->ibqp.srq) :
5023 						NULL));
5024 
5025 		if (send_cq && send_cq != recv_cq)
5026 			__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5027 
5028 	}
5029 
5030 	hns_roce_qp_remove(hr_dev, hr_qp);
5031 
5032 	hns_roce_unlock_cqs(send_cq, recv_cq);
5033 	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5034 
5035 	hns_roce_qp_free(hr_dev, hr_qp);
5036 
5037 	/* Not special_QP, free their QPN */
5038 	if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
5039 	    (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
5040 	    (hr_qp->ibqp.qp_type == IB_QPT_UD))
5041 		hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
5042 
5043 	hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
5044 
5045 	if (udata) {
5046 		struct hns_roce_ucontext *context =
5047 			rdma_udata_to_drv_context(
5048 				udata,
5049 				struct hns_roce_ucontext,
5050 				ibucontext);
5051 
5052 		if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
5053 			hns_roce_db_unmap_user(context, &hr_qp->sdb);
5054 
5055 		if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
5056 			hns_roce_db_unmap_user(context, &hr_qp->rdb);
5057 	} else {
5058 		kfree(hr_qp->sq.wrid);
5059 		kfree(hr_qp->rq.wrid);
5060 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
5061 		if (hr_qp->rq.wqe_cnt)
5062 			hns_roce_free_db(hr_dev, &hr_qp->rdb);
5063 	}
5064 	ib_umem_release(hr_qp->umem);
5065 
5066 	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
5067 	     hr_qp->rq.wqe_cnt) {
5068 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
5069 		kfree(hr_qp->rq_inl_buf.wqe_list);
5070 	}
5071 
5072 	return ret;
5073 }
5074 
5075 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5076 {
5077 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5078 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5079 	int ret;
5080 
5081 	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5082 	if (ret)
5083 		ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
5084 			  hr_qp->qpn, ret);
5085 
5086 	kfree(hr_qp);
5087 
5088 	return 0;
5089 }
5090 
5091 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5092 						struct hns_roce_qp *hr_qp)
5093 {
5094 	struct hns_roce_sccc_clr_done *resp;
5095 	struct hns_roce_sccc_clr *clr;
5096 	struct hns_roce_cmq_desc desc;
5097 	int ret, i;
5098 
5099 	mutex_lock(&hr_dev->qp_table.scc_mutex);
5100 
5101 	/* set scc ctx clear done flag */
5102 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5103 	ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5104 	if (ret) {
5105 		dev_err(hr_dev->dev, "Reset SCC ctx  failed(%d)\n", ret);
5106 		goto out;
5107 	}
5108 
5109 	/* clear scc context */
5110 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5111 	clr = (struct hns_roce_sccc_clr *)desc.data;
5112 	clr->qpn = cpu_to_le32(hr_qp->qpn);
5113 	ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5114 	if (ret) {
5115 		dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
5116 		goto out;
5117 	}
5118 
5119 	/* query scc context clear is done or not */
5120 	resp = (struct hns_roce_sccc_clr_done *)desc.data;
5121 	for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5122 		hns_roce_cmq_setup_basic_desc(&desc,
5123 					      HNS_ROCE_OPC_QUERY_SCCC, true);
5124 		ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5125 		if (ret) {
5126 			dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
5127 			goto out;
5128 		}
5129 
5130 		if (resp->clr_done)
5131 			goto out;
5132 
5133 		msleep(20);
5134 	}
5135 
5136 	dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
5137 	ret = -ETIMEDOUT;
5138 
5139 out:
5140 	mutex_unlock(&hr_dev->qp_table.scc_mutex);
5141 	return ret;
5142 }
5143 
5144 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5145 {
5146 	struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5147 	struct hns_roce_v2_cq_context *cq_context;
5148 	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5149 	struct hns_roce_v2_cq_context *cqc_mask;
5150 	struct hns_roce_cmd_mailbox *mailbox;
5151 	int ret;
5152 
5153 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5154 	if (IS_ERR(mailbox))
5155 		return PTR_ERR(mailbox);
5156 
5157 	cq_context = mailbox->buf;
5158 	cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5159 
5160 	memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5161 
5162 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5163 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5164 		       cq_count);
5165 	roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5166 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5167 		       0);
5168 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5169 		       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5170 		       cq_period);
5171 	roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5172 		       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5173 		       0);
5174 
5175 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5176 				HNS_ROCE_CMD_MODIFY_CQC,
5177 				HNS_ROCE_CMD_TIMEOUT_MSECS);
5178 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5179 	if (ret)
5180 		dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
5181 
5182 	return ret;
5183 }
5184 
5185 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
5186 {
5187 	struct hns_roce_qp *hr_qp;
5188 	struct ib_qp_attr attr;
5189 	int attr_mask;
5190 	int ret;
5191 
5192 	hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
5193 	if (!hr_qp) {
5194 		dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
5195 		return;
5196 	}
5197 
5198 	if (hr_qp->ibqp.uobject) {
5199 		if (hr_qp->sdb_en == 1) {
5200 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
5201 			if (hr_qp->rdb_en == 1)
5202 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
5203 		} else {
5204 			dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
5205 			return;
5206 		}
5207 	}
5208 
5209 	attr_mask = IB_QP_STATE;
5210 	attr.qp_state = IB_QPS_ERR;
5211 	ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
5212 				    hr_qp->state, IB_QPS_ERR);
5213 	if (ret)
5214 		dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
5215 			qpn);
5216 }
5217 
5218 static void hns_roce_irq_work_handle(struct work_struct *work)
5219 {
5220 	struct hns_roce_work *irq_work =
5221 				container_of(work, struct hns_roce_work, work);
5222 	struct device *dev = irq_work->hr_dev->dev;
5223 	u32 qpn = irq_work->qpn;
5224 	u32 cqn = irq_work->cqn;
5225 
5226 	switch (irq_work->event_type) {
5227 	case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5228 		dev_info(dev, "Path migrated succeeded.\n");
5229 		break;
5230 	case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5231 		dev_warn(dev, "Path migration failed.\n");
5232 		break;
5233 	case HNS_ROCE_EVENT_TYPE_COMM_EST:
5234 		break;
5235 	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5236 		dev_warn(dev, "Send queue drained.\n");
5237 		break;
5238 	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5239 		dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
5240 			qpn, irq_work->sub_type);
5241 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
5242 		break;
5243 	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5244 		dev_err(dev, "Invalid request local work queue 0x%x error.\n",
5245 			qpn);
5246 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
5247 		break;
5248 	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5249 		dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
5250 			qpn, irq_work->sub_type);
5251 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
5252 		break;
5253 	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5254 		dev_warn(dev, "SRQ limit reach.\n");
5255 		break;
5256 	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5257 		dev_warn(dev, "SRQ last wqe reach.\n");
5258 		break;
5259 	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5260 		dev_err(dev, "SRQ catas error.\n");
5261 		break;
5262 	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5263 		dev_err(dev, "CQ 0x%x access err.\n", cqn);
5264 		break;
5265 	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5266 		dev_warn(dev, "CQ 0x%x overflow\n", cqn);
5267 		break;
5268 	case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5269 		dev_warn(dev, "DB overflow.\n");
5270 		break;
5271 	case HNS_ROCE_EVENT_TYPE_FLR:
5272 		dev_warn(dev, "Function level reset.\n");
5273 		break;
5274 	default:
5275 		break;
5276 	}
5277 
5278 	kfree(irq_work);
5279 }
5280 
5281 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5282 				      struct hns_roce_eq *eq,
5283 				      u32 qpn, u32 cqn)
5284 {
5285 	struct hns_roce_work *irq_work;
5286 
5287 	irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5288 	if (!irq_work)
5289 		return;
5290 
5291 	INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5292 	irq_work->hr_dev = hr_dev;
5293 	irq_work->qpn = qpn;
5294 	irq_work->cqn = cqn;
5295 	irq_work->event_type = eq->event_type;
5296 	irq_work->sub_type = eq->sub_type;
5297 	queue_work(hr_dev->irq_workq, &(irq_work->work));
5298 }
5299 
5300 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
5301 {
5302 	struct hns_roce_dev *hr_dev = eq->hr_dev;
5303 	__le32 doorbell[2] = {};
5304 
5305 	if (eq->type_flag == HNS_ROCE_AEQ) {
5306 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5307 			       HNS_ROCE_V2_EQ_DB_CMD_S,
5308 			       eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5309 			       HNS_ROCE_EQ_DB_CMD_AEQ :
5310 			       HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5311 	} else {
5312 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
5313 			       HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
5314 
5315 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5316 			       HNS_ROCE_V2_EQ_DB_CMD_S,
5317 			       eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5318 			       HNS_ROCE_EQ_DB_CMD_CEQ :
5319 			       HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5320 	}
5321 
5322 	roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
5323 		       HNS_ROCE_V2_EQ_DB_PARA_S,
5324 		       (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
5325 
5326 	hns_roce_write64(hr_dev, doorbell, eq->doorbell);
5327 }
5328 
5329 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
5330 {
5331 	u32 buf_chk_sz;
5332 	unsigned long off;
5333 
5334 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5335 	off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
5336 
5337 	return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
5338 		off % buf_chk_sz);
5339 }
5340 
5341 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
5342 {
5343 	u32 buf_chk_sz;
5344 	unsigned long off;
5345 
5346 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5347 
5348 	off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
5349 
5350 	if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5351 		return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
5352 			off % buf_chk_sz);
5353 	else
5354 		return (struct hns_roce_aeqe *)((u8 *)
5355 			(eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
5356 }
5357 
5358 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5359 {
5360 	struct hns_roce_aeqe *aeqe;
5361 
5362 	if (!eq->hop_num)
5363 		aeqe = get_aeqe_v2(eq, eq->cons_index);
5364 	else
5365 		aeqe = mhop_get_aeqe(eq, eq->cons_index);
5366 
5367 	return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5368 		!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5369 }
5370 
5371 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5372 			       struct hns_roce_eq *eq)
5373 {
5374 	struct device *dev = hr_dev->dev;
5375 	struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5376 	int aeqe_found = 0;
5377 	int event_type;
5378 	int sub_type;
5379 	u32 srqn;
5380 	u32 qpn;
5381 	u32 cqn;
5382 
5383 	while (aeqe) {
5384 		/* Make sure we read AEQ entry after we have checked the
5385 		 * ownership bit
5386 		 */
5387 		dma_rmb();
5388 
5389 		event_type = roce_get_field(aeqe->asyn,
5390 					    HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5391 					    HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5392 		sub_type = roce_get_field(aeqe->asyn,
5393 					  HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5394 					  HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5395 		qpn = roce_get_field(aeqe->event.qp_event.qp,
5396 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5397 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5398 		cqn = roce_get_field(aeqe->event.cq_event.cq,
5399 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5400 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5401 		srqn = roce_get_field(aeqe->event.srq_event.srq,
5402 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5403 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5404 
5405 		switch (event_type) {
5406 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5407 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5408 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
5409 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5410 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5411 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5412 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5413 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5414 			hns_roce_qp_event(hr_dev, qpn, event_type);
5415 			break;
5416 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5417 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5418 			hns_roce_srq_event(hr_dev, srqn, event_type);
5419 			break;
5420 		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5421 		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5422 			hns_roce_cq_event(hr_dev, cqn, event_type);
5423 			break;
5424 		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5425 			break;
5426 		case HNS_ROCE_EVENT_TYPE_MB:
5427 			hns_roce_cmd_event(hr_dev,
5428 					le16_to_cpu(aeqe->event.cmd.token),
5429 					aeqe->event.cmd.status,
5430 					le64_to_cpu(aeqe->event.cmd.out_param));
5431 			break;
5432 		case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5433 			break;
5434 		case HNS_ROCE_EVENT_TYPE_FLR:
5435 			break;
5436 		default:
5437 			dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5438 				event_type, eq->eqn, eq->cons_index);
5439 			break;
5440 		}
5441 
5442 		eq->event_type = event_type;
5443 		eq->sub_type = sub_type;
5444 		++eq->cons_index;
5445 		aeqe_found = 1;
5446 
5447 		if (eq->cons_index > (2 * eq->entries - 1))
5448 			eq->cons_index = 0;
5449 
5450 		hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5451 
5452 		aeqe = next_aeqe_sw_v2(eq);
5453 	}
5454 
5455 	set_eq_cons_index_v2(eq);
5456 	return aeqe_found;
5457 }
5458 
5459 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
5460 {
5461 	u32 buf_chk_sz;
5462 	unsigned long off;
5463 
5464 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5465 	off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5466 
5467 	return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5468 		off % buf_chk_sz);
5469 }
5470 
5471 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5472 {
5473 	u32 buf_chk_sz;
5474 	unsigned long off;
5475 
5476 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5477 
5478 	off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5479 
5480 	if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5481 		return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5482 			off % buf_chk_sz);
5483 	else
5484 		return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5485 			buf_chk_sz]) + off % buf_chk_sz);
5486 }
5487 
5488 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5489 {
5490 	struct hns_roce_ceqe *ceqe;
5491 
5492 	if (!eq->hop_num)
5493 		ceqe = get_ceqe_v2(eq, eq->cons_index);
5494 	else
5495 		ceqe = mhop_get_ceqe(eq, eq->cons_index);
5496 
5497 	return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5498 		(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5499 }
5500 
5501 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5502 			       struct hns_roce_eq *eq)
5503 {
5504 	struct device *dev = hr_dev->dev;
5505 	struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5506 	int ceqe_found = 0;
5507 	u32 cqn;
5508 
5509 	while (ceqe) {
5510 		/* Make sure we read CEQ entry after we have checked the
5511 		 * ownership bit
5512 		 */
5513 		dma_rmb();
5514 
5515 		cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5516 				     HNS_ROCE_V2_CEQE_COMP_CQN_S);
5517 
5518 		hns_roce_cq_completion(hr_dev, cqn);
5519 
5520 		++eq->cons_index;
5521 		ceqe_found = 1;
5522 
5523 		if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
5524 			dev_warn(dev, "cons_index overflow, set back to 0.\n");
5525 			eq->cons_index = 0;
5526 		}
5527 
5528 		ceqe = next_ceqe_sw_v2(eq);
5529 	}
5530 
5531 	set_eq_cons_index_v2(eq);
5532 
5533 	return ceqe_found;
5534 }
5535 
5536 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5537 {
5538 	struct hns_roce_eq *eq = eq_ptr;
5539 	struct hns_roce_dev *hr_dev = eq->hr_dev;
5540 	int int_work = 0;
5541 
5542 	if (eq->type_flag == HNS_ROCE_CEQ)
5543 		/* Completion event interrupt */
5544 		int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5545 	else
5546 		/* Asychronous event interrupt */
5547 		int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5548 
5549 	return IRQ_RETVAL(int_work);
5550 }
5551 
5552 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5553 {
5554 	struct hns_roce_dev *hr_dev = dev_id;
5555 	struct device *dev = hr_dev->dev;
5556 	int int_work = 0;
5557 	u32 int_st;
5558 	u32 int_en;
5559 
5560 	/* Abnormal interrupt */
5561 	int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5562 	int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5563 
5564 	if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5565 		struct pci_dev *pdev = hr_dev->pci_dev;
5566 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5567 		const struct hnae3_ae_ops *ops = ae_dev->ops;
5568 
5569 		dev_err(dev, "AEQ overflow!\n");
5570 
5571 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5572 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5573 
5574 		/* Set reset level for reset_event() */
5575 		if (ops->set_default_reset_request)
5576 			ops->set_default_reset_request(ae_dev,
5577 						       HNAE3_FUNC_RESET);
5578 		if (ops->reset_event)
5579 			ops->reset_event(pdev, NULL);
5580 
5581 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5582 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5583 
5584 		int_work = 1;
5585 	} else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5586 		dev_err(dev, "BUS ERR!\n");
5587 
5588 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5589 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5590 
5591 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5592 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5593 
5594 		int_work = 1;
5595 	} else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5596 		dev_err(dev, "OTHER ERR!\n");
5597 
5598 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5599 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5600 
5601 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5602 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5603 
5604 		int_work = 1;
5605 	} else
5606 		dev_err(dev, "There is no abnormal irq found!\n");
5607 
5608 	return IRQ_RETVAL(int_work);
5609 }
5610 
5611 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5612 					int eq_num, int enable_flag)
5613 {
5614 	int i;
5615 
5616 	if (enable_flag == EQ_ENABLE) {
5617 		for (i = 0; i < eq_num; i++)
5618 			roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5619 				   i * EQ_REG_OFFSET,
5620 				   HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5621 
5622 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5623 			   HNS_ROCE_V2_VF_ABN_INT_EN_M);
5624 		roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5625 			   HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5626 	} else {
5627 		for (i = 0; i < eq_num; i++)
5628 			roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5629 				   i * EQ_REG_OFFSET,
5630 				   HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5631 
5632 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5633 			   HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5634 		roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5635 			   HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5636 	}
5637 }
5638 
5639 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5640 {
5641 	struct device *dev = hr_dev->dev;
5642 	int ret;
5643 
5644 	if (eqn < hr_dev->caps.num_comp_vectors)
5645 		ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5646 					0, HNS_ROCE_CMD_DESTROY_CEQC,
5647 					HNS_ROCE_CMD_TIMEOUT_MSECS);
5648 	else
5649 		ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5650 					0, HNS_ROCE_CMD_DESTROY_AEQC,
5651 					HNS_ROCE_CMD_TIMEOUT_MSECS);
5652 	if (ret)
5653 		dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5654 }
5655 
5656 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5657 				  struct hns_roce_eq *eq)
5658 {
5659 	struct device *dev = hr_dev->dev;
5660 	u64 idx;
5661 	u64 size;
5662 	u32 buf_chk_sz;
5663 	u32 bt_chk_sz;
5664 	u32 mhop_num;
5665 	int eqe_alloc;
5666 	int i = 0;
5667 	int j = 0;
5668 
5669 	mhop_num = hr_dev->caps.eqe_hop_num;
5670 	buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5671 	bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5672 
5673 	if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5674 		dma_free_coherent(dev, (unsigned int)(eq->entries *
5675 				  eq->eqe_size), eq->bt_l0, eq->l0_dma);
5676 		return;
5677 	}
5678 
5679 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5680 	if (mhop_num == 1) {
5681 		for (i = 0; i < eq->l0_last_num; i++) {
5682 			if (i == eq->l0_last_num - 1) {
5683 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5684 				size = (eq->entries - eqe_alloc) * eq->eqe_size;
5685 				dma_free_coherent(dev, size, eq->buf[i],
5686 						  eq->buf_dma[i]);
5687 				break;
5688 			}
5689 			dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5690 					  eq->buf_dma[i]);
5691 		}
5692 	} else if (mhop_num == 2) {
5693 		for (i = 0; i < eq->l0_last_num; i++) {
5694 			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5695 					  eq->l1_dma[i]);
5696 
5697 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5698 				idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
5699 				if ((i == eq->l0_last_num - 1)
5700 				     && j == eq->l1_last_num - 1) {
5701 					eqe_alloc = (buf_chk_sz / eq->eqe_size)
5702 						    * idx;
5703 					size = (eq->entries - eqe_alloc)
5704 						* eq->eqe_size;
5705 					dma_free_coherent(dev, size,
5706 							  eq->buf[idx],
5707 							  eq->buf_dma[idx]);
5708 					break;
5709 				}
5710 				dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5711 						  eq->buf_dma[idx]);
5712 			}
5713 		}
5714 	}
5715 	kfree(eq->buf_dma);
5716 	kfree(eq->buf);
5717 	kfree(eq->l1_dma);
5718 	kfree(eq->bt_l1);
5719 	eq->buf_dma = NULL;
5720 	eq->buf = NULL;
5721 	eq->l1_dma = NULL;
5722 	eq->bt_l1 = NULL;
5723 }
5724 
5725 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5726 				struct hns_roce_eq *eq)
5727 {
5728 	u32 buf_chk_sz;
5729 
5730 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5731 
5732 	if (hr_dev->caps.eqe_hop_num) {
5733 		hns_roce_mhop_free_eq(hr_dev, eq);
5734 		return;
5735 	}
5736 
5737 	dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
5738 			  eq->buf_list->map);
5739 	kfree(eq->buf_list);
5740 }
5741 
5742 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5743 				struct hns_roce_eq *eq,
5744 				void *mb_buf)
5745 {
5746 	struct hns_roce_eq_context *eqc;
5747 
5748 	eqc = mb_buf;
5749 	memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5750 
5751 	/* init eqc */
5752 	eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5753 	eq->hop_num = hr_dev->caps.eqe_hop_num;
5754 	eq->cons_index = 0;
5755 	eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5756 	eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5757 	eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5758 	eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5759 	eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5760 	eq->shift = ilog2((unsigned int)eq->entries);
5761 
5762 	if (!eq->hop_num)
5763 		eq->eqe_ba = eq->buf_list->map;
5764 	else
5765 		eq->eqe_ba = eq->l0_dma;
5766 
5767 	/* set eqc state */
5768 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
5769 		       HNS_ROCE_V2_EQ_STATE_VALID);
5770 
5771 	/* set eqe hop num */
5772 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
5773 		       HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5774 
5775 	/* set eqc over_ignore */
5776 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
5777 		       HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5778 
5779 	/* set eqc coalesce */
5780 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
5781 		       HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5782 
5783 	/* set eqc arm_state */
5784 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
5785 		       HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5786 
5787 	/* set eqn */
5788 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5789 		       eq->eqn);
5790 
5791 	/* set eqe_cnt */
5792 	roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5793 		       HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
5794 
5795 	/* set eqe_ba_pg_sz */
5796 	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5797 		       HNS_ROCE_EQC_BA_PG_SZ_S,
5798 		       eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5799 
5800 	/* set eqe_buf_pg_sz */
5801 	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5802 		       HNS_ROCE_EQC_BUF_PG_SZ_S,
5803 		       eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5804 
5805 	/* set eq_producer_idx */
5806 	roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5807 		       HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
5808 
5809 	/* set eq_max_cnt */
5810 	roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
5811 		       HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5812 
5813 	/* set eq_period */
5814 	roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
5815 		       HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5816 
5817 	/* set eqe_report_timer */
5818 	roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
5819 		       HNS_ROCE_EQC_REPORT_TIMER_S,
5820 		       HNS_ROCE_EQ_INIT_REPORT_TIMER);
5821 
5822 	/* set eqe_ba [34:3] */
5823 	roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
5824 		       HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5825 
5826 	/* set eqe_ba [64:35] */
5827 	roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
5828 		       HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5829 
5830 	/* set eq shift */
5831 	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5832 		       eq->shift);
5833 
5834 	/* set eq MSI_IDX */
5835 	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5836 		       HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
5837 
5838 	/* set cur_eqe_ba [27:12] */
5839 	roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5840 		       HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5841 
5842 	/* set cur_eqe_ba [59:28] */
5843 	roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5844 		       HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5845 
5846 	/* set cur_eqe_ba [63:60] */
5847 	roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5848 		       HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5849 
5850 	/* set eq consumer idx */
5851 	roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5852 		       HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
5853 
5854 	/* set nex_eqe_ba[43:12] */
5855 	roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5856 		       HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5857 
5858 	/* set nex_eqe_ba[63:44] */
5859 	roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5860 		       HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5861 }
5862 
5863 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5864 				  struct hns_roce_eq *eq)
5865 {
5866 	struct device *dev = hr_dev->dev;
5867 	int eq_alloc_done = 0;
5868 	int eq_buf_cnt = 0;
5869 	int eqe_alloc;
5870 	u32 buf_chk_sz;
5871 	u32 bt_chk_sz;
5872 	u32 mhop_num;
5873 	u64 size;
5874 	u64 idx;
5875 	int ba_num;
5876 	int bt_num;
5877 	int record_i;
5878 	int record_j;
5879 	int i = 0;
5880 	int j = 0;
5881 
5882 	mhop_num = hr_dev->caps.eqe_hop_num;
5883 	buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5884 	bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5885 
5886 	ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5887 			      buf_chk_sz);
5888 	bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
5889 
5890 	if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5891 		if (eq->entries > buf_chk_sz / eq->eqe_size) {
5892 			dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5893 				eq->entries);
5894 			return -EINVAL;
5895 		}
5896 		eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5897 					       &(eq->l0_dma), GFP_KERNEL);
5898 		if (!eq->bt_l0)
5899 			return -ENOMEM;
5900 
5901 		eq->cur_eqe_ba = eq->l0_dma;
5902 		eq->nxt_eqe_ba = 0;
5903 
5904 		return 0;
5905 	}
5906 
5907 	eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5908 	if (!eq->buf_dma)
5909 		return -ENOMEM;
5910 	eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5911 	if (!eq->buf)
5912 		goto err_kcalloc_buf;
5913 
5914 	if (mhop_num == 2) {
5915 		eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5916 		if (!eq->l1_dma)
5917 			goto err_kcalloc_l1_dma;
5918 
5919 		eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5920 		if (!eq->bt_l1)
5921 			goto err_kcalloc_bt_l1;
5922 	}
5923 
5924 	/* alloc L0 BT */
5925 	eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5926 	if (!eq->bt_l0)
5927 		goto err_dma_alloc_l0;
5928 
5929 	if (mhop_num == 1) {
5930 		if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
5931 			dev_err(dev, "ba_num %d is too large for 1 hop\n",
5932 				ba_num);
5933 
5934 		/* alloc buf */
5935 		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5936 			if (eq_buf_cnt + 1 < ba_num) {
5937 				size = buf_chk_sz;
5938 			} else {
5939 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5940 				size = (eq->entries - eqe_alloc) * eq->eqe_size;
5941 			}
5942 			eq->buf[i] = dma_alloc_coherent(dev, size,
5943 							&(eq->buf_dma[i]),
5944 							GFP_KERNEL);
5945 			if (!eq->buf[i])
5946 				goto err_dma_alloc_buf;
5947 
5948 			*(eq->bt_l0 + i) = eq->buf_dma[i];
5949 
5950 			eq_buf_cnt++;
5951 			if (eq_buf_cnt >= ba_num)
5952 				break;
5953 		}
5954 		eq->cur_eqe_ba = eq->buf_dma[0];
5955 		if (ba_num > 1)
5956 			eq->nxt_eqe_ba = eq->buf_dma[1];
5957 
5958 	} else if (mhop_num == 2) {
5959 		/* alloc L1 BT and buf */
5960 		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5961 			eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5962 							  &(eq->l1_dma[i]),
5963 							  GFP_KERNEL);
5964 			if (!eq->bt_l1[i])
5965 				goto err_dma_alloc_l1;
5966 			*(eq->bt_l0 + i) = eq->l1_dma[i];
5967 
5968 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5969 				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5970 				if (eq_buf_cnt + 1 < ba_num) {
5971 					size = buf_chk_sz;
5972 				} else {
5973 					eqe_alloc = (buf_chk_sz / eq->eqe_size)
5974 						    * idx;
5975 					size = (eq->entries - eqe_alloc)
5976 						* eq->eqe_size;
5977 				}
5978 				eq->buf[idx] = dma_alloc_coherent(dev, size,
5979 								  &(eq->buf_dma[idx]),
5980 								  GFP_KERNEL);
5981 				if (!eq->buf[idx])
5982 					goto err_dma_alloc_buf;
5983 
5984 				*(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5985 
5986 				eq_buf_cnt++;
5987 				if (eq_buf_cnt >= ba_num) {
5988 					eq_alloc_done = 1;
5989 					break;
5990 				}
5991 			}
5992 
5993 			if (eq_alloc_done)
5994 				break;
5995 		}
5996 		eq->cur_eqe_ba = eq->buf_dma[0];
5997 		if (ba_num > 1)
5998 			eq->nxt_eqe_ba = eq->buf_dma[1];
5999 	}
6000 
6001 	eq->l0_last_num = i + 1;
6002 	if (mhop_num == 2)
6003 		eq->l1_last_num = j + 1;
6004 
6005 	return 0;
6006 
6007 err_dma_alloc_l1:
6008 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
6009 	eq->bt_l0 = NULL;
6010 	eq->l0_dma = 0;
6011 	for (i -= 1; i >= 0; i--) {
6012 		dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
6013 				  eq->l1_dma[i]);
6014 
6015 		for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
6016 			idx = i * bt_chk_sz / BA_BYTE_LEN + j;
6017 			dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
6018 					  eq->buf_dma[idx]);
6019 		}
6020 	}
6021 	goto err_dma_alloc_l0;
6022 
6023 err_dma_alloc_buf:
6024 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
6025 	eq->bt_l0 = NULL;
6026 	eq->l0_dma = 0;
6027 
6028 	if (mhop_num == 1)
6029 		for (i -= 1; i >= 0; i--)
6030 			dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
6031 					  eq->buf_dma[i]);
6032 	else if (mhop_num == 2) {
6033 		record_i = i;
6034 		record_j = j;
6035 		for (; i >= 0; i--) {
6036 			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
6037 					  eq->l1_dma[i]);
6038 
6039 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
6040 				if (i == record_i && j >= record_j)
6041 					break;
6042 
6043 				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
6044 				dma_free_coherent(dev, buf_chk_sz,
6045 						  eq->buf[idx],
6046 						  eq->buf_dma[idx]);
6047 			}
6048 		}
6049 	}
6050 
6051 err_dma_alloc_l0:
6052 	kfree(eq->bt_l1);
6053 	eq->bt_l1 = NULL;
6054 
6055 err_kcalloc_bt_l1:
6056 	kfree(eq->l1_dma);
6057 	eq->l1_dma = NULL;
6058 
6059 err_kcalloc_l1_dma:
6060 	kfree(eq->buf);
6061 	eq->buf = NULL;
6062 
6063 err_kcalloc_buf:
6064 	kfree(eq->buf_dma);
6065 	eq->buf_dma = NULL;
6066 
6067 	return -ENOMEM;
6068 }
6069 
6070 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6071 				 struct hns_roce_eq *eq,
6072 				 unsigned int eq_cmd)
6073 {
6074 	struct device *dev = hr_dev->dev;
6075 	struct hns_roce_cmd_mailbox *mailbox;
6076 	u32 buf_chk_sz = 0;
6077 	int ret;
6078 
6079 	/* Allocate mailbox memory */
6080 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6081 	if (IS_ERR(mailbox))
6082 		return PTR_ERR(mailbox);
6083 
6084 	if (!hr_dev->caps.eqe_hop_num) {
6085 		buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
6086 
6087 		eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
6088 				       GFP_KERNEL);
6089 		if (!eq->buf_list) {
6090 			ret = -ENOMEM;
6091 			goto free_cmd_mbox;
6092 		}
6093 
6094 		eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
6095 						       &(eq->buf_list->map),
6096 						       GFP_KERNEL);
6097 		if (!eq->buf_list->buf) {
6098 			ret = -ENOMEM;
6099 			goto err_alloc_buf;
6100 		}
6101 
6102 	} else {
6103 		ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
6104 		if (ret) {
6105 			ret = -ENOMEM;
6106 			goto free_cmd_mbox;
6107 		}
6108 	}
6109 
6110 	hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
6111 
6112 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
6113 				eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
6114 	if (ret) {
6115 		dev_err(dev, "[mailbox cmd] create eqc failed.\n");
6116 		goto err_cmd_mbox;
6117 	}
6118 
6119 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6120 
6121 	return 0;
6122 
6123 err_cmd_mbox:
6124 	if (!hr_dev->caps.eqe_hop_num)
6125 		dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
6126 				  eq->buf_list->map);
6127 	else {
6128 		hns_roce_mhop_free_eq(hr_dev, eq);
6129 		goto free_cmd_mbox;
6130 	}
6131 
6132 err_alloc_buf:
6133 	kfree(eq->buf_list);
6134 
6135 free_cmd_mbox:
6136 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6137 
6138 	return ret;
6139 }
6140 
6141 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6142 				  int comp_num, int aeq_num, int other_num)
6143 {
6144 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6145 	int i, j;
6146 	int ret;
6147 
6148 	for (i = 0; i < irq_num; i++) {
6149 		hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6150 					       GFP_KERNEL);
6151 		if (!hr_dev->irq_names[i]) {
6152 			ret = -ENOMEM;
6153 			goto err_kzalloc_failed;
6154 		}
6155 	}
6156 
6157 	/* irq contains: abnormal + AEQ + CEQ */
6158 	for (j = 0; j < other_num; j++)
6159 		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6160 			 "hns-abn-%d", j);
6161 
6162 	for (j = other_num; j < (other_num + aeq_num); j++)
6163 		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6164 			 "hns-aeq-%d", j - other_num);
6165 
6166 	for (j = (other_num + aeq_num); j < irq_num; j++)
6167 		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6168 			 "hns-ceq-%d", j - other_num - aeq_num);
6169 
6170 	for (j = 0; j < irq_num; j++) {
6171 		if (j < other_num)
6172 			ret = request_irq(hr_dev->irq[j],
6173 					  hns_roce_v2_msix_interrupt_abn,
6174 					  0, hr_dev->irq_names[j], hr_dev);
6175 
6176 		else if (j < (other_num + comp_num))
6177 			ret = request_irq(eq_table->eq[j - other_num].irq,
6178 					  hns_roce_v2_msix_interrupt_eq,
6179 					  0, hr_dev->irq_names[j + aeq_num],
6180 					  &eq_table->eq[j - other_num]);
6181 		else
6182 			ret = request_irq(eq_table->eq[j - other_num].irq,
6183 					  hns_roce_v2_msix_interrupt_eq,
6184 					  0, hr_dev->irq_names[j - comp_num],
6185 					  &eq_table->eq[j - other_num]);
6186 		if (ret) {
6187 			dev_err(hr_dev->dev, "Request irq error!\n");
6188 			goto err_request_failed;
6189 		}
6190 	}
6191 
6192 	return 0;
6193 
6194 err_request_failed:
6195 	for (j -= 1; j >= 0; j--)
6196 		if (j < other_num)
6197 			free_irq(hr_dev->irq[j], hr_dev);
6198 		else
6199 			free_irq(eq_table->eq[j - other_num].irq,
6200 				 &eq_table->eq[j - other_num]);
6201 
6202 err_kzalloc_failed:
6203 	for (i -= 1; i >= 0; i--)
6204 		kfree(hr_dev->irq_names[i]);
6205 
6206 	return ret;
6207 }
6208 
6209 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6210 {
6211 	int irq_num;
6212 	int eq_num;
6213 	int i;
6214 
6215 	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6216 	irq_num = eq_num + hr_dev->caps.num_other_vectors;
6217 
6218 	for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6219 		free_irq(hr_dev->irq[i], hr_dev);
6220 
6221 	for (i = 0; i < eq_num; i++)
6222 		free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6223 
6224 	for (i = 0; i < irq_num; i++)
6225 		kfree(hr_dev->irq_names[i]);
6226 }
6227 
6228 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6229 {
6230 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6231 	struct device *dev = hr_dev->dev;
6232 	struct hns_roce_eq *eq;
6233 	unsigned int eq_cmd;
6234 	int irq_num;
6235 	int eq_num;
6236 	int other_num;
6237 	int comp_num;
6238 	int aeq_num;
6239 	int i;
6240 	int ret;
6241 
6242 	other_num = hr_dev->caps.num_other_vectors;
6243 	comp_num = hr_dev->caps.num_comp_vectors;
6244 	aeq_num = hr_dev->caps.num_aeq_vectors;
6245 
6246 	eq_num = comp_num + aeq_num;
6247 	irq_num = eq_num + other_num;
6248 
6249 	eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6250 	if (!eq_table->eq)
6251 		return -ENOMEM;
6252 
6253 	/* create eq */
6254 	for (i = 0; i < eq_num; i++) {
6255 		eq = &eq_table->eq[i];
6256 		eq->hr_dev = hr_dev;
6257 		eq->eqn = i;
6258 		if (i < comp_num) {
6259 			/* CEQ */
6260 			eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6261 			eq->type_flag = HNS_ROCE_CEQ;
6262 			eq->entries = hr_dev->caps.ceqe_depth;
6263 			eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
6264 			eq->irq = hr_dev->irq[i + other_num + aeq_num];
6265 			eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6266 			eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6267 		} else {
6268 			/* AEQ */
6269 			eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6270 			eq->type_flag = HNS_ROCE_AEQ;
6271 			eq->entries = hr_dev->caps.aeqe_depth;
6272 			eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
6273 			eq->irq = hr_dev->irq[i - comp_num + other_num];
6274 			eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6275 			eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6276 		}
6277 
6278 		ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6279 		if (ret) {
6280 			dev_err(dev, "eq create failed.\n");
6281 			goto err_create_eq_fail;
6282 		}
6283 	}
6284 
6285 	/* enable irq */
6286 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6287 
6288 	ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6289 				     aeq_num, other_num);
6290 	if (ret) {
6291 		dev_err(dev, "Request irq failed.\n");
6292 		goto err_request_irq_fail;
6293 	}
6294 
6295 	hr_dev->irq_workq =
6296 		create_singlethread_workqueue("hns_roce_irq_workqueue");
6297 	if (!hr_dev->irq_workq) {
6298 		dev_err(dev, "Create irq workqueue failed!\n");
6299 		ret = -ENOMEM;
6300 		goto err_create_wq_fail;
6301 	}
6302 
6303 	return 0;
6304 
6305 err_create_wq_fail:
6306 	__hns_roce_free_irq(hr_dev);
6307 
6308 err_request_irq_fail:
6309 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6310 
6311 err_create_eq_fail:
6312 	for (i -= 1; i >= 0; i--)
6313 		hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
6314 	kfree(eq_table->eq);
6315 
6316 	return ret;
6317 }
6318 
6319 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6320 {
6321 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6322 	int eq_num;
6323 	int i;
6324 
6325 	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6326 
6327 	/* Disable irq */
6328 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6329 
6330 	__hns_roce_free_irq(hr_dev);
6331 
6332 	for (i = 0; i < eq_num; i++) {
6333 		hns_roce_v2_destroy_eqc(hr_dev, i);
6334 
6335 		hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
6336 	}
6337 
6338 	kfree(eq_table->eq);
6339 
6340 	flush_workqueue(hr_dev->irq_workq);
6341 	destroy_workqueue(hr_dev->irq_workq);
6342 }
6343 
6344 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
6345 				   struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
6346 				   u32 cqn, void *mb_buf, u64 *mtts_wqe,
6347 				   u64 *mtts_idx, dma_addr_t dma_handle_wqe,
6348 				   dma_addr_t dma_handle_idx)
6349 {
6350 	struct hns_roce_srq_context *srq_context;
6351 
6352 	srq_context = mb_buf;
6353 	memset(srq_context, 0, sizeof(*srq_context));
6354 
6355 	roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
6356 		       SRQC_BYTE_4_SRQ_ST_S, 1);
6357 
6358 	roce_set_field(srq_context->byte_4_srqn_srqst,
6359 		       SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
6360 		       SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
6361 		       (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6362 		       hr_dev->caps.srqwqe_hop_num));
6363 	roce_set_field(srq_context->byte_4_srqn_srqst,
6364 		       SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
6365 		       ilog2(srq->wqe_cnt));
6366 
6367 	roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
6368 		       SRQC_BYTE_4_SRQN_S, srq->srqn);
6369 
6370 	roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6371 		       SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6372 
6373 	roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
6374 		       SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
6375 
6376 	srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
6377 
6378 	roce_set_field(srq_context->byte_24_wqe_bt_ba,
6379 		       SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
6380 		       SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
6381 		       dma_handle_wqe >> 35);
6382 
6383 	roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
6384 		       SRQC_BYTE_28_PD_S, pdn);
6385 	roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
6386 		       SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
6387 		       fls(srq->max_gs - 1));
6388 
6389 	srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
6390 	roce_set_field(srq_context->rsv_idx_bt_ba,
6391 		       SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
6392 		       SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
6393 		       dma_handle_idx >> 35);
6394 
6395 	srq_context->idx_cur_blk_addr =
6396 		cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
6397 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6398 		       SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
6399 		       SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
6400 		       mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
6401 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6402 		       SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
6403 		       SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
6404 		       hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6405 		       hr_dev->caps.idx_hop_num);
6406 
6407 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6408 		       SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
6409 		       SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
6410 		       hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
6411 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6412 		       SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
6413 		       SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
6414 		       hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
6415 
6416 	srq_context->idx_nxt_blk_addr =
6417 		cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
6418 	roce_set_field(srq_context->rsv_idxnxtblkaddr,
6419 		       SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
6420 		       SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
6421 		       mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
6422 	roce_set_field(srq_context->byte_56_xrc_cqn,
6423 		       SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
6424 		       cqn);
6425 	roce_set_field(srq_context->byte_56_xrc_cqn,
6426 		       SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
6427 		       SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
6428 		       hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
6429 	roce_set_field(srq_context->byte_56_xrc_cqn,
6430 		       SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
6431 		       SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
6432 		       hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
6433 
6434 	roce_set_bit(srq_context->db_record_addr_record_en,
6435 		     SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
6436 }
6437 
6438 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
6439 				  struct ib_srq_attr *srq_attr,
6440 				  enum ib_srq_attr_mask srq_attr_mask,
6441 				  struct ib_udata *udata)
6442 {
6443 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6444 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6445 	struct hns_roce_srq_context *srq_context;
6446 	struct hns_roce_srq_context *srqc_mask;
6447 	struct hns_roce_cmd_mailbox *mailbox;
6448 	int ret;
6449 
6450 	if (srq_attr_mask & IB_SRQ_LIMIT) {
6451 		if (srq_attr->srq_limit >= srq->wqe_cnt)
6452 			return -EINVAL;
6453 
6454 		mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6455 		if (IS_ERR(mailbox))
6456 			return PTR_ERR(mailbox);
6457 
6458 		srq_context = mailbox->buf;
6459 		srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
6460 
6461 		memset(srqc_mask, 0xff, sizeof(*srqc_mask));
6462 
6463 		roce_set_field(srq_context->byte_8_limit_wl,
6464 			       SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6465 			       SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6466 		roce_set_field(srqc_mask->byte_8_limit_wl,
6467 			       SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6468 			       SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6469 
6470 		ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6471 					HNS_ROCE_CMD_MODIFY_SRQC,
6472 					HNS_ROCE_CMD_TIMEOUT_MSECS);
6473 		hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6474 		if (ret) {
6475 			dev_err(hr_dev->dev,
6476 				"MODIFY SRQ Failed to cmd mailbox.\n");
6477 			return ret;
6478 		}
6479 	}
6480 
6481 	return 0;
6482 }
6483 
6484 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
6485 {
6486 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6487 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6488 	struct hns_roce_srq_context *srq_context;
6489 	struct hns_roce_cmd_mailbox *mailbox;
6490 	int limit_wl;
6491 	int ret;
6492 
6493 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6494 	if (IS_ERR(mailbox))
6495 		return PTR_ERR(mailbox);
6496 
6497 	srq_context = mailbox->buf;
6498 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6499 				HNS_ROCE_CMD_QUERY_SRQC,
6500 				HNS_ROCE_CMD_TIMEOUT_MSECS);
6501 	if (ret) {
6502 		dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6503 		goto out;
6504 	}
6505 
6506 	limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6507 				  SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6508 				  SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6509 
6510 	attr->srq_limit = limit_wl;
6511 	attr->max_wr    = srq->wqe_cnt - 1;
6512 	attr->max_sge   = srq->max_gs;
6513 
6514 	memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6515 
6516 out:
6517 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6518 	return ret;
6519 }
6520 
6521 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6522 			    unsigned long size)
6523 {
6524 	int wqe_idx;
6525 
6526 	if (unlikely(bitmap_full(idx_que->bitmap, size)))
6527 		return -ENOSPC;
6528 
6529 	wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6530 
6531 	bitmap_set(idx_que->bitmap, wqe_idx, 1);
6532 
6533 	return wqe_idx;
6534 }
6535 
6536 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6537 			   int cur_idx, int wqe_idx)
6538 {
6539 	unsigned int *addr;
6540 
6541 	addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6542 						   cur_idx * idx_que->entry_sz);
6543 	*addr = wqe_idx;
6544 }
6545 
6546 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6547 				     const struct ib_recv_wr *wr,
6548 				     const struct ib_recv_wr **bad_wr)
6549 {
6550 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6551 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6552 	struct hns_roce_v2_wqe_data_seg *dseg;
6553 	struct hns_roce_v2_db srq_db;
6554 	unsigned long flags;
6555 	int ret = 0;
6556 	int wqe_idx;
6557 	void *wqe;
6558 	int nreq;
6559 	int ind;
6560 	int i;
6561 
6562 	spin_lock_irqsave(&srq->lock, flags);
6563 
6564 	ind = srq->head & (srq->wqe_cnt - 1);
6565 
6566 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
6567 		if (unlikely(wr->num_sge > srq->max_gs)) {
6568 			ret = -EINVAL;
6569 			*bad_wr = wr;
6570 			break;
6571 		}
6572 
6573 		if (unlikely(srq->head == srq->tail)) {
6574 			ret = -ENOMEM;
6575 			*bad_wr = wr;
6576 			break;
6577 		}
6578 
6579 		wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
6580 		if (wqe_idx < 0) {
6581 			ret = -ENOMEM;
6582 			*bad_wr = wr;
6583 			break;
6584 		}
6585 
6586 		fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6587 		wqe = get_srq_wqe(srq, wqe_idx);
6588 		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6589 
6590 		for (i = 0; i < wr->num_sge; ++i) {
6591 			dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6592 			dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6593 			dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6594 		}
6595 
6596 		if (i < srq->max_gs) {
6597 			dseg[i].len = 0;
6598 			dseg[i].lkey = cpu_to_le32(0x100);
6599 			dseg[i].addr = 0;
6600 		}
6601 
6602 		srq->wrid[wqe_idx] = wr->wr_id;
6603 		ind = (ind + 1) & (srq->wqe_cnt - 1);
6604 	}
6605 
6606 	if (likely(nreq)) {
6607 		srq->head += nreq;
6608 
6609 		/*
6610 		 * Make sure that descriptors are written before
6611 		 * doorbell record.
6612 		 */
6613 		wmb();
6614 
6615 		srq_db.byte_4 =
6616 			cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6617 				    (srq->srqn & V2_DB_BYTE_4_TAG_M));
6618 		srq_db.parameter = cpu_to_le32(srq->head);
6619 
6620 		hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6621 
6622 	}
6623 
6624 	spin_unlock_irqrestore(&srq->lock, flags);
6625 
6626 	return ret;
6627 }
6628 
6629 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6630 	.query_cqc_info = hns_roce_v2_query_cqc_info,
6631 };
6632 
6633 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6634 	.destroy_qp = hns_roce_v2_destroy_qp,
6635 	.modify_cq = hns_roce_v2_modify_cq,
6636 	.poll_cq = hns_roce_v2_poll_cq,
6637 	.post_recv = hns_roce_v2_post_recv,
6638 	.post_send = hns_roce_v2_post_send,
6639 	.query_qp = hns_roce_v2_query_qp,
6640 	.req_notify_cq = hns_roce_v2_req_notify_cq,
6641 };
6642 
6643 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6644 	.modify_srq = hns_roce_v2_modify_srq,
6645 	.post_srq_recv = hns_roce_v2_post_srq_recv,
6646 	.query_srq = hns_roce_v2_query_srq,
6647 };
6648 
6649 static const struct hns_roce_hw hns_roce_hw_v2 = {
6650 	.cmq_init = hns_roce_v2_cmq_init,
6651 	.cmq_exit = hns_roce_v2_cmq_exit,
6652 	.hw_profile = hns_roce_v2_profile,
6653 	.hw_init = hns_roce_v2_init,
6654 	.hw_exit = hns_roce_v2_exit,
6655 	.post_mbox = hns_roce_v2_post_mbox,
6656 	.chk_mbox = hns_roce_v2_chk_mbox,
6657 	.rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6658 	.set_gid = hns_roce_v2_set_gid,
6659 	.set_mac = hns_roce_v2_set_mac,
6660 	.write_mtpt = hns_roce_v2_write_mtpt,
6661 	.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6662 	.frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6663 	.mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6664 	.write_cqc = hns_roce_v2_write_cqc,
6665 	.set_hem = hns_roce_v2_set_hem,
6666 	.clear_hem = hns_roce_v2_clear_hem,
6667 	.modify_qp = hns_roce_v2_modify_qp,
6668 	.query_qp = hns_roce_v2_query_qp,
6669 	.destroy_qp = hns_roce_v2_destroy_qp,
6670 	.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6671 	.modify_cq = hns_roce_v2_modify_cq,
6672 	.post_send = hns_roce_v2_post_send,
6673 	.post_recv = hns_roce_v2_post_recv,
6674 	.req_notify_cq = hns_roce_v2_req_notify_cq,
6675 	.poll_cq = hns_roce_v2_poll_cq,
6676 	.init_eq = hns_roce_v2_init_eq_table,
6677 	.cleanup_eq = hns_roce_v2_cleanup_eq_table,
6678 	.write_srqc = hns_roce_v2_write_srqc,
6679 	.modify_srq = hns_roce_v2_modify_srq,
6680 	.query_srq = hns_roce_v2_query_srq,
6681 	.post_srq_recv = hns_roce_v2_post_srq_recv,
6682 	.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6683 	.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6684 };
6685 
6686 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6687 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6688 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6689 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6690 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6691 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6692 	/* required last entry */
6693 	{0, }
6694 };
6695 
6696 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6697 
6698 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6699 				  struct hnae3_handle *handle)
6700 {
6701 	struct hns_roce_v2_priv *priv = hr_dev->priv;
6702 	int i;
6703 
6704 	hr_dev->pci_dev = handle->pdev;
6705 	hr_dev->dev = &handle->pdev->dev;
6706 	hr_dev->hw = &hns_roce_hw_v2;
6707 	hr_dev->dfx = &hns_roce_dfx_hw_v2;
6708 	hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6709 	hr_dev->odb_offset = hr_dev->sdb_offset;
6710 
6711 	/* Get info from NIC driver. */
6712 	hr_dev->reg_base = handle->rinfo.roce_io_base;
6713 	hr_dev->caps.num_ports = 1;
6714 	hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6715 	hr_dev->iboe.phy_port[0] = 0;
6716 
6717 	addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6718 			    hr_dev->iboe.netdevs[0]->dev_addr);
6719 
6720 	for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6721 		hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6722 						i + handle->rinfo.base_vector);
6723 
6724 	/* cmd issue mode: 0 is poll, 1 is event */
6725 	hr_dev->cmd_mod = 1;
6726 	hr_dev->loop_idc = 0;
6727 
6728 	hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6729 	priv->handle = handle;
6730 }
6731 
6732 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6733 {
6734 	struct hns_roce_dev *hr_dev;
6735 	int ret;
6736 
6737 	hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6738 	if (!hr_dev)
6739 		return -ENOMEM;
6740 
6741 	hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6742 	if (!hr_dev->priv) {
6743 		ret = -ENOMEM;
6744 		goto error_failed_kzalloc;
6745 	}
6746 
6747 	hns_roce_hw_v2_get_cfg(hr_dev, handle);
6748 
6749 	ret = hns_roce_init(hr_dev);
6750 	if (ret) {
6751 		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6752 		goto error_failed_get_cfg;
6753 	}
6754 
6755 	handle->priv = hr_dev;
6756 
6757 	return 0;
6758 
6759 error_failed_get_cfg:
6760 	kfree(hr_dev->priv);
6761 
6762 error_failed_kzalloc:
6763 	ib_dealloc_device(&hr_dev->ib_dev);
6764 
6765 	return ret;
6766 }
6767 
6768 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6769 					   bool reset)
6770 {
6771 	struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6772 
6773 	if (!hr_dev)
6774 		return;
6775 
6776 	handle->priv = NULL;
6777 
6778 	hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6779 	hns_roce_handle_device_err(hr_dev);
6780 
6781 	hns_roce_exit(hr_dev);
6782 	kfree(hr_dev->priv);
6783 	ib_dealloc_device(&hr_dev->ib_dev);
6784 }
6785 
6786 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6787 {
6788 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6789 	const struct pci_device_id *id;
6790 	struct device *dev = &handle->pdev->dev;
6791 	int ret;
6792 
6793 	handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6794 
6795 	if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6796 		handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6797 		goto reset_chk_err;
6798 	}
6799 
6800 	id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6801 	if (!id)
6802 		return 0;
6803 
6804 	ret = __hns_roce_hw_v2_init_instance(handle);
6805 	if (ret) {
6806 		handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6807 		dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6808 		if (ops->ae_dev_resetting(handle) ||
6809 		    ops->get_hw_reset_stat(handle))
6810 			goto reset_chk_err;
6811 		else
6812 			return ret;
6813 	}
6814 
6815 	handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6816 
6817 
6818 	return 0;
6819 
6820 reset_chk_err:
6821 	dev_err(dev, "Device is busy in resetting state.\n"
6822 		     "please retry later.\n");
6823 
6824 	return -EBUSY;
6825 }
6826 
6827 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6828 					   bool reset)
6829 {
6830 	if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6831 		return;
6832 
6833 	handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6834 
6835 	__hns_roce_hw_v2_uninit_instance(handle, reset);
6836 
6837 	handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6838 }
6839 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6840 {
6841 	struct hns_roce_dev *hr_dev;
6842 
6843 	if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6844 		set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6845 		return 0;
6846 	}
6847 
6848 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6849 	clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6850 
6851 	hr_dev = (struct hns_roce_dev *)handle->priv;
6852 	if (!hr_dev)
6853 		return 0;
6854 
6855 	hr_dev->is_reset = true;
6856 	hr_dev->active = false;
6857 	hr_dev->dis_db = true;
6858 
6859 	hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6860 
6861 	return 0;
6862 }
6863 
6864 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6865 {
6866 	struct device *dev = &handle->pdev->dev;
6867 	int ret;
6868 
6869 	if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6870 			       &handle->rinfo.state)) {
6871 		handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6872 		return 0;
6873 	}
6874 
6875 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6876 
6877 	dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6878 	ret = __hns_roce_hw_v2_init_instance(handle);
6879 	if (ret) {
6880 		/* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6881 		 * callback function, RoCE Engine reinitialize. If RoCE reinit
6882 		 * failed, we should inform NIC driver.
6883 		 */
6884 		handle->priv = NULL;
6885 		dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6886 	} else {
6887 		handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6888 		dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6889 	}
6890 
6891 	return ret;
6892 }
6893 
6894 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6895 {
6896 	if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6897 		return 0;
6898 
6899 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6900 	dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6901 	msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6902 	__hns_roce_hw_v2_uninit_instance(handle, false);
6903 
6904 	return 0;
6905 }
6906 
6907 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6908 				       enum hnae3_reset_notify_type type)
6909 {
6910 	int ret = 0;
6911 
6912 	switch (type) {
6913 	case HNAE3_DOWN_CLIENT:
6914 		ret = hns_roce_hw_v2_reset_notify_down(handle);
6915 		break;
6916 	case HNAE3_INIT_CLIENT:
6917 		ret = hns_roce_hw_v2_reset_notify_init(handle);
6918 		break;
6919 	case HNAE3_UNINIT_CLIENT:
6920 		ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6921 		break;
6922 	default:
6923 		break;
6924 	}
6925 
6926 	return ret;
6927 }
6928 
6929 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6930 	.init_instance = hns_roce_hw_v2_init_instance,
6931 	.uninit_instance = hns_roce_hw_v2_uninit_instance,
6932 	.reset_notify = hns_roce_hw_v2_reset_notify,
6933 };
6934 
6935 static struct hnae3_client hns_roce_hw_v2_client = {
6936 	.name = "hns_roce_hw_v2",
6937 	.type = HNAE3_CLIENT_ROCE,
6938 	.ops = &hns_roce_hw_v2_ops,
6939 };
6940 
6941 static int __init hns_roce_hw_v2_init(void)
6942 {
6943 	return hnae3_register_client(&hns_roce_hw_v2_client);
6944 }
6945 
6946 static void __exit hns_roce_hw_v2_exit(void)
6947 {
6948 	hnae3_unregister_client(&hns_roce_hw_v2_client);
6949 }
6950 
6951 module_init(hns_roce_hw_v2_init);
6952 module_exit(hns_roce_hw_v2_exit);
6953 
6954 MODULE_LICENSE("Dual BSD/GPL");
6955 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6956 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6957 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6958 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6959