1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/iopoll.h>
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <net/addrconf.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/uverbs_ioctl.h>
44
45 #include "hnae3.h"
46 #include "hns_roce_common.h"
47 #include "hns_roce_device.h"
48 #include "hns_roce_cmd.h"
49 #include "hns_roce_hem.h"
50 #include "hns_roce_hw_v2.h"
51
52 enum {
53 CMD_RST_PRC_OTHERS,
54 CMD_RST_PRC_SUCCESS,
55 CMD_RST_PRC_EBUSY,
56 };
57
58 enum ecc_resource_type {
59 ECC_RESOURCE_QPC,
60 ECC_RESOURCE_CQC,
61 ECC_RESOURCE_MPT,
62 ECC_RESOURCE_SRQC,
63 ECC_RESOURCE_GMV,
64 ECC_RESOURCE_QPC_TIMER,
65 ECC_RESOURCE_CQC_TIMER,
66 ECC_RESOURCE_SCCC,
67 ECC_RESOURCE_COUNT,
68 };
69
70 static const struct {
71 const char *name;
72 u8 read_bt0_op;
73 u8 write_bt0_op;
74 } fmea_ram_res[] = {
75 { "ECC_RESOURCE_QPC",
76 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
77 { "ECC_RESOURCE_CQC",
78 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
79 { "ECC_RESOURCE_MPT",
80 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
81 { "ECC_RESOURCE_SRQC",
82 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
83 /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
84 { "ECC_RESOURCE_GMV",
85 0, 0 },
86 { "ECC_RESOURCE_QPC_TIMER",
87 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
88 { "ECC_RESOURCE_CQC_TIMER",
89 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
90 { "ECC_RESOURCE_SCCC",
91 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
92 };
93
set_data_seg_v2(struct hns_roce_v2_wqe_data_seg * dseg,struct ib_sge * sg)94 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
95 struct ib_sge *sg)
96 {
97 dseg->lkey = cpu_to_le32(sg->lkey);
98 dseg->addr = cpu_to_le64(sg->addr);
99 dseg->len = cpu_to_le32(sg->length);
100 }
101
102 /*
103 * mapped-value = 1 + real-value
104 * The hns wr opcode real value is start from 0, In order to distinguish between
105 * initialized and uninitialized map values, we plus 1 to the actual value when
106 * defining the mapping, so that the validity can be identified by checking the
107 * mapped value is greater than 0.
108 */
109 #define HR_OPC_MAP(ib_key, hr_key) \
110 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
111
112 static const u32 hns_roce_op_code[] = {
113 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
114 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
115 HR_OPC_MAP(SEND, SEND),
116 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
117 HR_OPC_MAP(RDMA_READ, RDMA_READ),
118 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
119 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
120 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
121 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
122 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
123 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
124 };
125
to_hr_opcode(u32 ib_opcode)126 static u32 to_hr_opcode(u32 ib_opcode)
127 {
128 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
129 return HNS_ROCE_V2_WQE_OP_MASK;
130
131 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
132 HNS_ROCE_V2_WQE_OP_MASK;
133 }
134
set_frmr_seg(struct hns_roce_v2_rc_send_wqe * rc_sq_wqe,const struct ib_reg_wr * wr)135 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
136 const struct ib_reg_wr *wr)
137 {
138 struct hns_roce_wqe_frmr_seg *fseg =
139 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
140 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
141 u64 pbl_ba;
142
143 /* use ib_access_flags */
144 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
145 hr_reg_write_bool(fseg, FRMR_ATOMIC,
146 wr->access & IB_ACCESS_REMOTE_ATOMIC);
147 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
148 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
149 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
150
151 /* Data structure reuse may lead to confusion */
152 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
153 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
154 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
155
156 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
157 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
158 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
159 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
160
161 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
162 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
163 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
164 hr_reg_clear(fseg, FRMR_BLK_MODE);
165 }
166
set_atomic_seg(const struct ib_send_wr * wr,struct hns_roce_v2_rc_send_wqe * rc_sq_wqe,unsigned int valid_num_sge)167 static void set_atomic_seg(const struct ib_send_wr *wr,
168 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
169 unsigned int valid_num_sge)
170 {
171 struct hns_roce_v2_wqe_data_seg *dseg =
172 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
173 struct hns_roce_wqe_atomic_seg *aseg =
174 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
175
176 set_data_seg_v2(dseg, wr->sg_list);
177
178 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
179 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
180 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
181 } else {
182 aseg->fetchadd_swap_data =
183 cpu_to_le64(atomic_wr(wr)->compare_add);
184 aseg->cmp_data = 0;
185 }
186
187 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
188 }
189
fill_ext_sge_inl_data(struct hns_roce_qp * qp,const struct ib_send_wr * wr,unsigned int * sge_idx,u32 msg_len)190 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
191 const struct ib_send_wr *wr,
192 unsigned int *sge_idx, u32 msg_len)
193 {
194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
195 unsigned int left_len_in_pg;
196 unsigned int idx = *sge_idx;
197 unsigned int i = 0;
198 unsigned int len;
199 void *addr;
200 void *dseg;
201
202 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
203 ibdev_err(ibdev,
204 "no enough extended sge space for inline data.\n");
205 return -EINVAL;
206 }
207
208 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
209 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
210 len = wr->sg_list[0].length;
211 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
212
213 /* When copying data to extended sge space, the left length in page may
214 * not long enough for current user's sge. So the data should be
215 * splited into several parts, one in the first page, and the others in
216 * the subsequent pages.
217 */
218 while (1) {
219 if (len <= left_len_in_pg) {
220 memcpy(dseg, addr, len);
221
222 idx += len / HNS_ROCE_SGE_SIZE;
223
224 i++;
225 if (i >= wr->num_sge)
226 break;
227
228 left_len_in_pg -= len;
229 len = wr->sg_list[i].length;
230 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
231 dseg += len;
232 } else {
233 memcpy(dseg, addr, left_len_in_pg);
234
235 len -= left_len_in_pg;
236 addr += left_len_in_pg;
237 idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
238 dseg = hns_roce_get_extend_sge(qp,
239 idx & (qp->sge.sge_cnt - 1));
240 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
241 }
242 }
243
244 *sge_idx = idx;
245
246 return 0;
247 }
248
set_extend_sge(struct hns_roce_qp * qp,struct ib_sge * sge,unsigned int * sge_ind,unsigned int cnt)249 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
250 unsigned int *sge_ind, unsigned int cnt)
251 {
252 struct hns_roce_v2_wqe_data_seg *dseg;
253 unsigned int idx = *sge_ind;
254
255 while (cnt > 0) {
256 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
257 if (likely(sge->length)) {
258 set_data_seg_v2(dseg, sge);
259 idx++;
260 cnt--;
261 }
262 sge++;
263 }
264
265 *sge_ind = idx;
266 }
267
check_inl_data_len(struct hns_roce_qp * qp,unsigned int len)268 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
269 {
270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
271 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
272
273 if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
274 ibdev_err(&hr_dev->ib_dev,
275 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
276 len, qp->max_inline_data, mtu);
277 return false;
278 }
279
280 return true;
281 }
282
set_rc_inl(struct hns_roce_qp * qp,const struct ib_send_wr * wr,struct hns_roce_v2_rc_send_wqe * rc_sq_wqe,unsigned int * sge_idx)283 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
284 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
285 unsigned int *sge_idx)
286 {
287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
288 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
289 struct ib_device *ibdev = &hr_dev->ib_dev;
290 unsigned int curr_idx = *sge_idx;
291 void *dseg = rc_sq_wqe;
292 unsigned int i;
293 int ret;
294
295 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
296 ibdev_err(ibdev, "invalid inline parameters!\n");
297 return -EINVAL;
298 }
299
300 if (!check_inl_data_len(qp, msg_len))
301 return -EINVAL;
302
303 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
304
305 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
306 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
307
308 for (i = 0; i < wr->num_sge; i++) {
309 memcpy(dseg, ((void *)wr->sg_list[i].addr),
310 wr->sg_list[i].length);
311 dseg += wr->sg_list[i].length;
312 }
313 } else {
314 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
315
316 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
317 if (ret)
318 return ret;
319
320 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
321 }
322
323 *sge_idx = curr_idx;
324
325 return 0;
326 }
327
set_rwqe_data_seg(struct ib_qp * ibqp,const struct ib_send_wr * wr,struct hns_roce_v2_rc_send_wqe * rc_sq_wqe,unsigned int * sge_ind,unsigned int valid_num_sge)328 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
329 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
330 unsigned int *sge_ind,
331 unsigned int valid_num_sge)
332 {
333 struct hns_roce_v2_wqe_data_seg *dseg =
334 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
335 struct hns_roce_qp *qp = to_hr_qp(ibqp);
336 int j = 0;
337 int i;
338
339 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
340 (*sge_ind) & (qp->sge.sge_cnt - 1));
341
342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
343 !!(wr->send_flags & IB_SEND_INLINE));
344 if (wr->send_flags & IB_SEND_INLINE)
345 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
346
347 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
348 for (i = 0; i < wr->num_sge; i++) {
349 if (likely(wr->sg_list[i].length)) {
350 set_data_seg_v2(dseg, wr->sg_list + i);
351 dseg++;
352 }
353 }
354 } else {
355 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
356 if (likely(wr->sg_list[i].length)) {
357 set_data_seg_v2(dseg, wr->sg_list + i);
358 dseg++;
359 j++;
360 }
361 }
362
363 set_extend_sge(qp, wr->sg_list + i, sge_ind,
364 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
365 }
366
367 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
368
369 return 0;
370 }
371
check_send_valid(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)372 static int check_send_valid(struct hns_roce_dev *hr_dev,
373 struct hns_roce_qp *hr_qp)
374 {
375 struct ib_device *ibdev = &hr_dev->ib_dev;
376
377 if (unlikely(hr_qp->state == IB_QPS_RESET ||
378 hr_qp->state == IB_QPS_INIT ||
379 hr_qp->state == IB_QPS_RTR)) {
380 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
381 hr_qp->state);
382 return -EINVAL;
383 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
384 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
385 hr_dev->state);
386 return -EIO;
387 }
388
389 return 0;
390 }
391
calc_wr_sge_num(const struct ib_send_wr * wr,unsigned int * sge_len)392 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
393 unsigned int *sge_len)
394 {
395 unsigned int valid_num = 0;
396 unsigned int len = 0;
397 int i;
398
399 for (i = 0; i < wr->num_sge; i++) {
400 if (likely(wr->sg_list[i].length)) {
401 len += wr->sg_list[i].length;
402 valid_num++;
403 }
404 }
405
406 *sge_len = len;
407 return valid_num;
408 }
409
get_immtdata(const struct ib_send_wr * wr)410 static __le32 get_immtdata(const struct ib_send_wr *wr)
411 {
412 switch (wr->opcode) {
413 case IB_WR_SEND_WITH_IMM:
414 case IB_WR_RDMA_WRITE_WITH_IMM:
415 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
416 default:
417 return 0;
418 }
419 }
420
set_ud_opcode(struct hns_roce_v2_ud_send_wqe * ud_sq_wqe,const struct ib_send_wr * wr)421 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
422 const struct ib_send_wr *wr)
423 {
424 u32 ib_op = wr->opcode;
425
426 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
427 return -EINVAL;
428
429 ud_sq_wqe->immtdata = get_immtdata(wr);
430
431 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
432
433 return 0;
434 }
435
fill_ud_av(struct hns_roce_v2_ud_send_wqe * ud_sq_wqe,struct hns_roce_ah * ah)436 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
437 struct hns_roce_ah *ah)
438 {
439 struct ib_device *ib_dev = ah->ibah.device;
440 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
441
442 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
443 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
444 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
445 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
446
447 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
448 return -EINVAL;
449
450 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
451
452 ud_sq_wqe->sgid_index = ah->av.gid_index;
453
454 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
455 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
456
457 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
458 return 0;
459
460 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
461 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
462
463 return 0;
464 }
465
set_ud_wqe(struct hns_roce_qp * qp,const struct ib_send_wr * wr,void * wqe,unsigned int * sge_idx,unsigned int owner_bit)466 static inline int set_ud_wqe(struct hns_roce_qp *qp,
467 const struct ib_send_wr *wr,
468 void *wqe, unsigned int *sge_idx,
469 unsigned int owner_bit)
470 {
471 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
472 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
473 unsigned int curr_idx = *sge_idx;
474 unsigned int valid_num_sge;
475 u32 msg_len = 0;
476 int ret;
477
478 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
479
480 ret = set_ud_opcode(ud_sq_wqe, wr);
481 if (WARN_ON(ret))
482 return ret;
483
484 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
485
486 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
487 !!(wr->send_flags & IB_SEND_SIGNALED));
488 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
489 !!(wr->send_flags & IB_SEND_SOLICITED));
490
491 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
492 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
493 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
494 curr_idx & (qp->sge.sge_cnt - 1));
495
496 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
497 qp->qkey : ud_wr(wr)->remote_qkey);
498 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
499
500 ret = fill_ud_av(ud_sq_wqe, ah);
501 if (ret)
502 return ret;
503
504 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
505
506 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
507
508 /*
509 * The pipeline can sequentially post all valid WQEs into WQ buffer,
510 * including new WQEs waiting for the doorbell to update the PI again.
511 * Therefore, the owner bit of WQE MUST be updated after all fields
512 * and extSGEs have been written into DDR instead of cache.
513 */
514 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
515 dma_wmb();
516
517 *sge_idx = curr_idx;
518 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
519
520 return 0;
521 }
522
set_rc_opcode(struct hns_roce_dev * hr_dev,struct hns_roce_v2_rc_send_wqe * rc_sq_wqe,const struct ib_send_wr * wr)523 static int set_rc_opcode(struct hns_roce_dev *hr_dev,
524 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
525 const struct ib_send_wr *wr)
526 {
527 u32 ib_op = wr->opcode;
528 int ret = 0;
529
530 rc_sq_wqe->immtdata = get_immtdata(wr);
531
532 switch (ib_op) {
533 case IB_WR_RDMA_READ:
534 case IB_WR_RDMA_WRITE:
535 case IB_WR_RDMA_WRITE_WITH_IMM:
536 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
537 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
538 break;
539 case IB_WR_SEND:
540 case IB_WR_SEND_WITH_IMM:
541 break;
542 case IB_WR_ATOMIC_CMP_AND_SWP:
543 case IB_WR_ATOMIC_FETCH_AND_ADD:
544 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
545 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
546 break;
547 case IB_WR_REG_MR:
548 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
549 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
550 else
551 ret = -EOPNOTSUPP;
552 break;
553 case IB_WR_SEND_WITH_INV:
554 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
555 break;
556 default:
557 ret = -EINVAL;
558 }
559
560 if (unlikely(ret))
561 return ret;
562
563 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
564
565 return ret;
566 }
567
set_rc_wqe(struct hns_roce_qp * qp,const struct ib_send_wr * wr,void * wqe,unsigned int * sge_idx,unsigned int owner_bit)568 static inline int set_rc_wqe(struct hns_roce_qp *qp,
569 const struct ib_send_wr *wr,
570 void *wqe, unsigned int *sge_idx,
571 unsigned int owner_bit)
572 {
573 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
574 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
575 unsigned int curr_idx = *sge_idx;
576 unsigned int valid_num_sge;
577 u32 msg_len = 0;
578 int ret;
579
580 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
581
582 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
583
584 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
585 if (WARN_ON(ret))
586 return ret;
587
588 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE,
589 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
590
591 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
592 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
593
594 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
595 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
596
597 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
598 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
599 if (msg_len != ATOMIC_WR_LEN)
600 return -EINVAL;
601 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
602 } else if (wr->opcode != IB_WR_REG_MR) {
603 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
604 &curr_idx, valid_num_sge);
605 if (ret)
606 return ret;
607 }
608
609 /*
610 * The pipeline can sequentially post all valid WQEs into WQ buffer,
611 * including new WQEs waiting for the doorbell to update the PI again.
612 * Therefore, the owner bit of WQE MUST be updated after all fields
613 * and extSGEs have been written into DDR instead of cache.
614 */
615 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
616 dma_wmb();
617
618 *sge_idx = curr_idx;
619 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
620
621 return ret;
622 }
623
update_sq_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * qp)624 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
625 struct hns_roce_qp *qp)
626 {
627 if (unlikely(qp->state == IB_QPS_ERR)) {
628 flush_cqe(hr_dev, qp);
629 } else {
630 struct hns_roce_v2_db sq_db = {};
631
632 hr_reg_write(&sq_db, DB_TAG, qp->qpn);
633 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
634 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
635 hr_reg_write(&sq_db, DB_SL, qp->sl);
636
637 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
638 }
639 }
640
update_rq_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * qp)641 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
642 struct hns_roce_qp *qp)
643 {
644 if (unlikely(qp->state == IB_QPS_ERR)) {
645 flush_cqe(hr_dev, qp);
646 } else {
647 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
648 *qp->rdb.db_record =
649 qp->rq.head & V2_DB_PRODUCER_IDX_M;
650 } else {
651 struct hns_roce_v2_db rq_db = {};
652
653 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
654 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
655 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
656
657 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
658 qp->rq.db_reg);
659 }
660 }
661 }
662
hns_roce_write512(struct hns_roce_dev * hr_dev,u64 * val,u64 __iomem * dest)663 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
664 u64 __iomem *dest)
665 {
666 #define HNS_ROCE_WRITE_TIMES 8
667 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
668 struct hnae3_handle *handle = priv->handle;
669 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
670 int i;
671
672 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
673 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
674 writeq_relaxed(*(val + i), dest + i);
675 }
676
write_dwqe(struct hns_roce_dev * hr_dev,struct hns_roce_qp * qp,void * wqe)677 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
678 void *wqe)
679 {
680 #define HNS_ROCE_SL_SHIFT 2
681 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
682
683 /* All kinds of DirectWQE have the same header field layout */
684 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
685 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
686 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
687 qp->sl >> HNS_ROCE_SL_SHIFT);
688 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
689
690 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
691 }
692
hns_roce_v2_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)693 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
694 const struct ib_send_wr *wr,
695 const struct ib_send_wr **bad_wr)
696 {
697 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
698 struct ib_device *ibdev = &hr_dev->ib_dev;
699 struct hns_roce_qp *qp = to_hr_qp(ibqp);
700 unsigned long flags = 0;
701 unsigned int owner_bit;
702 unsigned int sge_idx;
703 unsigned int wqe_idx;
704 void *wqe = NULL;
705 u32 nreq;
706 int ret;
707
708 spin_lock_irqsave(&qp->sq.lock, flags);
709
710 ret = check_send_valid(hr_dev, qp);
711 if (unlikely(ret)) {
712 *bad_wr = wr;
713 nreq = 0;
714 goto out;
715 }
716
717 sge_idx = qp->next_sge;
718
719 for (nreq = 0; wr; ++nreq, wr = wr->next) {
720 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
721 ret = -ENOMEM;
722 *bad_wr = wr;
723 goto out;
724 }
725
726 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
727
728 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
729 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
730 wr->num_sge, qp->sq.max_gs);
731 ret = -EINVAL;
732 *bad_wr = wr;
733 goto out;
734 }
735
736 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
737 qp->sq.wrid[wqe_idx] = wr->wr_id;
738 owner_bit =
739 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
740
741 /* Corresponding to the QP type, wqe process separately */
742 if (ibqp->qp_type == IB_QPT_RC)
743 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
744 else
745 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
746
747 if (unlikely(ret)) {
748 *bad_wr = wr;
749 goto out;
750 }
751 }
752
753 out:
754 if (likely(nreq)) {
755 qp->sq.head += nreq;
756 qp->next_sge = sge_idx;
757
758 if (nreq == 1 && !ret &&
759 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
760 write_dwqe(hr_dev, qp, wqe);
761 else
762 update_sq_db(hr_dev, qp);
763 }
764
765 spin_unlock_irqrestore(&qp->sq.lock, flags);
766
767 return ret;
768 }
769
check_recv_valid(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)770 static int check_recv_valid(struct hns_roce_dev *hr_dev,
771 struct hns_roce_qp *hr_qp)
772 {
773 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
774 return -EIO;
775
776 if (hr_qp->state == IB_QPS_RESET)
777 return -EINVAL;
778
779 return 0;
780 }
781
fill_recv_sge_to_wqe(const struct ib_recv_wr * wr,void * wqe,u32 max_sge,bool rsv)782 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
783 u32 max_sge, bool rsv)
784 {
785 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
786 u32 i, cnt;
787
788 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
789 /* Skip zero-length sge */
790 if (!wr->sg_list[i].length)
791 continue;
792 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
793 cnt++;
794 }
795
796 /* Fill a reserved sge to make hw stop reading remaining segments */
797 if (rsv) {
798 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
799 dseg[cnt].addr = 0;
800 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
801 } else {
802 /* Clear remaining segments to make ROCEE ignore sges */
803 if (cnt < max_sge)
804 memset(dseg + cnt, 0,
805 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
806 }
807 }
808
fill_rq_wqe(struct hns_roce_qp * hr_qp,const struct ib_recv_wr * wr,u32 wqe_idx,u32 max_sge)809 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
810 u32 wqe_idx, u32 max_sge)
811 {
812 void *wqe = NULL;
813
814 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
815 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
816 }
817
hns_roce_v2_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)818 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
819 const struct ib_recv_wr *wr,
820 const struct ib_recv_wr **bad_wr)
821 {
822 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
823 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
824 struct ib_device *ibdev = &hr_dev->ib_dev;
825 u32 wqe_idx, nreq, max_sge;
826 unsigned long flags;
827 int ret;
828
829 spin_lock_irqsave(&hr_qp->rq.lock, flags);
830
831 ret = check_recv_valid(hr_dev, hr_qp);
832 if (unlikely(ret)) {
833 *bad_wr = wr;
834 nreq = 0;
835 goto out;
836 }
837
838 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
839 for (nreq = 0; wr; ++nreq, wr = wr->next) {
840 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
841 hr_qp->ibqp.recv_cq))) {
842 ret = -ENOMEM;
843 *bad_wr = wr;
844 goto out;
845 }
846
847 if (unlikely(wr->num_sge > max_sge)) {
848 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
849 wr->num_sge, max_sge);
850 ret = -EINVAL;
851 *bad_wr = wr;
852 goto out;
853 }
854
855 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
856 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
857 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
858 }
859
860 out:
861 if (likely(nreq)) {
862 hr_qp->rq.head += nreq;
863
864 update_rq_db(hr_dev, hr_qp);
865 }
866 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
867
868 return ret;
869 }
870
get_srq_wqe_buf(struct hns_roce_srq * srq,u32 n)871 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
872 {
873 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
874 }
875
get_idx_buf(struct hns_roce_idx_que * idx_que,u32 n)876 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
877 {
878 return hns_roce_buf_offset(idx_que->mtr.kmem,
879 n << idx_que->entry_shift);
880 }
881
hns_roce_free_srq_wqe(struct hns_roce_srq * srq,u32 wqe_index)882 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
883 {
884 /* always called with interrupts disabled. */
885 spin_lock(&srq->lock);
886
887 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
888 srq->idx_que.tail++;
889
890 spin_unlock(&srq->lock);
891 }
892
hns_roce_srqwq_overflow(struct hns_roce_srq * srq)893 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
894 {
895 struct hns_roce_idx_que *idx_que = &srq->idx_que;
896
897 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
898 }
899
check_post_srq_valid(struct hns_roce_srq * srq,u32 max_sge,const struct ib_recv_wr * wr)900 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
901 const struct ib_recv_wr *wr)
902 {
903 struct ib_device *ib_dev = srq->ibsrq.device;
904
905 if (unlikely(wr->num_sge > max_sge)) {
906 ibdev_err(ib_dev,
907 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
908 wr->num_sge, max_sge);
909 return -EINVAL;
910 }
911
912 if (unlikely(hns_roce_srqwq_overflow(srq))) {
913 ibdev_err(ib_dev,
914 "failed to check srqwq status, srqwq is full.\n");
915 return -ENOMEM;
916 }
917
918 return 0;
919 }
920
get_srq_wqe_idx(struct hns_roce_srq * srq,u32 * wqe_idx)921 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
922 {
923 struct hns_roce_idx_que *idx_que = &srq->idx_que;
924 u32 pos;
925
926 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
927 if (unlikely(pos == srq->wqe_cnt))
928 return -ENOSPC;
929
930 bitmap_set(idx_que->bitmap, pos, 1);
931 *wqe_idx = pos;
932 return 0;
933 }
934
fill_wqe_idx(struct hns_roce_srq * srq,unsigned int wqe_idx)935 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
936 {
937 struct hns_roce_idx_que *idx_que = &srq->idx_que;
938 unsigned int head;
939 __le32 *buf;
940
941 head = idx_que->head & (srq->wqe_cnt - 1);
942
943 buf = get_idx_buf(idx_que, head);
944 *buf = cpu_to_le32(wqe_idx);
945
946 idx_que->head++;
947 }
948
update_srq_db(struct hns_roce_v2_db * db,struct hns_roce_srq * srq)949 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
950 {
951 hr_reg_write(db, DB_TAG, srq->srqn);
952 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
953 hr_reg_write(db, DB_PI, srq->idx_que.head);
954 }
955
hns_roce_v2_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)956 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
957 const struct ib_recv_wr *wr,
958 const struct ib_recv_wr **bad_wr)
959 {
960 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
961 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
962 struct hns_roce_v2_db srq_db;
963 unsigned long flags;
964 int ret = 0;
965 u32 max_sge;
966 u32 wqe_idx;
967 void *wqe;
968 u32 nreq;
969
970 spin_lock_irqsave(&srq->lock, flags);
971
972 max_sge = srq->max_gs - srq->rsv_sge;
973 for (nreq = 0; wr; ++nreq, wr = wr->next) {
974 ret = check_post_srq_valid(srq, max_sge, wr);
975 if (ret) {
976 *bad_wr = wr;
977 break;
978 }
979
980 ret = get_srq_wqe_idx(srq, &wqe_idx);
981 if (unlikely(ret)) {
982 *bad_wr = wr;
983 break;
984 }
985
986 wqe = get_srq_wqe_buf(srq, wqe_idx);
987 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
988 fill_wqe_idx(srq, wqe_idx);
989 srq->wrid[wqe_idx] = wr->wr_id;
990 }
991
992 if (likely(nreq)) {
993 update_srq_db(&srq_db, srq);
994
995 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
996 }
997
998 spin_unlock_irqrestore(&srq->lock, flags);
999
1000 return ret;
1001 }
1002
hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev * hr_dev,unsigned long instance_stage,unsigned long reset_stage)1003 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1004 unsigned long instance_stage,
1005 unsigned long reset_stage)
1006 {
1007 /* When hardware reset has been completed once or more, we should stop
1008 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1009 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1010 * stage of soft reset process, we should exit with error, and then
1011 * HNAE3_INIT_CLIENT related process can rollback the operation like
1012 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1013 * process will exit with error to notify NIC driver to reschedule soft
1014 * reset process once again.
1015 */
1016 hr_dev->is_reset = true;
1017 hr_dev->dis_db = true;
1018
1019 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1020 instance_stage == HNS_ROCE_STATE_INIT)
1021 return CMD_RST_PRC_EBUSY;
1022
1023 return CMD_RST_PRC_SUCCESS;
1024 }
1025
hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev * hr_dev,unsigned long instance_stage,unsigned long reset_stage)1026 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1027 unsigned long instance_stage,
1028 unsigned long reset_stage)
1029 {
1030 #define HW_RESET_TIMEOUT_US 1000000
1031 #define HW_RESET_SLEEP_US 1000
1032
1033 struct hns_roce_v2_priv *priv = hr_dev->priv;
1034 struct hnae3_handle *handle = priv->handle;
1035 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1036 unsigned long val;
1037 int ret;
1038
1039 /* When hardware reset is detected, we should stop sending mailbox&cmq&
1040 * doorbell to hardware. If now in .init_instance() function, we should
1041 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1042 * process, we should exit with error, and then HNAE3_INIT_CLIENT
1043 * related process can rollback the operation like notifing hardware to
1044 * free resources, HNAE3_INIT_CLIENT related process will exit with
1045 * error to notify NIC driver to reschedule soft reset process once
1046 * again.
1047 */
1048 hr_dev->dis_db = true;
1049
1050 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1051 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1052 HW_RESET_TIMEOUT_US, false, handle);
1053 if (!ret)
1054 hr_dev->is_reset = true;
1055
1056 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1057 instance_stage == HNS_ROCE_STATE_INIT)
1058 return CMD_RST_PRC_EBUSY;
1059
1060 return CMD_RST_PRC_SUCCESS;
1061 }
1062
hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev * hr_dev)1063 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1064 {
1065 struct hns_roce_v2_priv *priv = hr_dev->priv;
1066 struct hnae3_handle *handle = priv->handle;
1067 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1068
1069 /* When software reset is detected at .init_instance() function, we
1070 * should stop sending mailbox&cmq&doorbell to hardware, and exit
1071 * with error.
1072 */
1073 hr_dev->dis_db = true;
1074 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1075 hr_dev->is_reset = true;
1076
1077 return CMD_RST_PRC_EBUSY;
1078 }
1079
check_aedev_reset_status(struct hns_roce_dev * hr_dev,struct hnae3_handle * handle)1080 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1081 struct hnae3_handle *handle)
1082 {
1083 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1084 unsigned long instance_stage; /* the current instance stage */
1085 unsigned long reset_stage; /* the current reset stage */
1086 unsigned long reset_cnt;
1087 bool sw_resetting;
1088 bool hw_resetting;
1089
1090 /* Get information about reset from NIC driver or RoCE driver itself,
1091 * the meaning of the following variables from NIC driver are described
1092 * as below:
1093 * reset_cnt -- The count value of completed hardware reset.
1094 * hw_resetting -- Whether hardware device is resetting now.
1095 * sw_resetting -- Whether NIC's software reset process is running now.
1096 */
1097 instance_stage = handle->rinfo.instance_state;
1098 reset_stage = handle->rinfo.reset_state;
1099 reset_cnt = ops->ae_dev_reset_cnt(handle);
1100 if (reset_cnt != hr_dev->reset_cnt)
1101 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1102 reset_stage);
1103
1104 hw_resetting = ops->get_cmdq_stat(handle);
1105 if (hw_resetting)
1106 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1107 reset_stage);
1108
1109 sw_resetting = ops->ae_dev_resetting(handle);
1110 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1111 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1112
1113 return CMD_RST_PRC_OTHERS;
1114 }
1115
check_device_is_in_reset(struct hns_roce_dev * hr_dev)1116 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1117 {
1118 struct hns_roce_v2_priv *priv = hr_dev->priv;
1119 struct hnae3_handle *handle = priv->handle;
1120 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1121
1122 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1123 return true;
1124
1125 if (ops->get_hw_reset_stat(handle))
1126 return true;
1127
1128 if (ops->ae_dev_resetting(handle))
1129 return true;
1130
1131 return false;
1132 }
1133
v2_chk_mbox_is_avail(struct hns_roce_dev * hr_dev,bool * busy)1134 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1135 {
1136 struct hns_roce_v2_priv *priv = hr_dev->priv;
1137 u32 status;
1138
1139 if (hr_dev->is_reset)
1140 status = CMD_RST_PRC_SUCCESS;
1141 else
1142 status = check_aedev_reset_status(hr_dev, priv->handle);
1143
1144 *busy = (status == CMD_RST_PRC_EBUSY);
1145
1146 return status == CMD_RST_PRC_OTHERS;
1147 }
1148
hns_roce_alloc_cmq_desc(struct hns_roce_dev * hr_dev,struct hns_roce_v2_cmq_ring * ring)1149 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1150 struct hns_roce_v2_cmq_ring *ring)
1151 {
1152 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1153
1154 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1155 &ring->desc_dma_addr, GFP_KERNEL);
1156 if (!ring->desc)
1157 return -ENOMEM;
1158
1159 return 0;
1160 }
1161
hns_roce_free_cmq_desc(struct hns_roce_dev * hr_dev,struct hns_roce_v2_cmq_ring * ring)1162 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1163 struct hns_roce_v2_cmq_ring *ring)
1164 {
1165 dma_free_coherent(hr_dev->dev,
1166 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1167 ring->desc, ring->desc_dma_addr);
1168
1169 ring->desc_dma_addr = 0;
1170 }
1171
init_csq(struct hns_roce_dev * hr_dev,struct hns_roce_v2_cmq_ring * csq)1172 static int init_csq(struct hns_roce_dev *hr_dev,
1173 struct hns_roce_v2_cmq_ring *csq)
1174 {
1175 dma_addr_t dma;
1176 int ret;
1177
1178 csq->desc_num = CMD_CSQ_DESC_NUM;
1179 spin_lock_init(&csq->lock);
1180 csq->flag = TYPE_CSQ;
1181 csq->head = 0;
1182
1183 ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1184 if (ret)
1185 return ret;
1186
1187 dma = csq->desc_dma_addr;
1188 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1189 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1190 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1191 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1192
1193 /* Make sure to write CI first and then PI */
1194 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1195 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1196
1197 return 0;
1198 }
1199
hns_roce_v2_cmq_init(struct hns_roce_dev * hr_dev)1200 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1201 {
1202 struct hns_roce_v2_priv *priv = hr_dev->priv;
1203 int ret;
1204
1205 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1206
1207 ret = init_csq(hr_dev, &priv->cmq.csq);
1208 if (ret)
1209 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1210
1211 return ret;
1212 }
1213
hns_roce_v2_cmq_exit(struct hns_roce_dev * hr_dev)1214 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1215 {
1216 struct hns_roce_v2_priv *priv = hr_dev->priv;
1217
1218 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1219 }
1220
hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc * desc,enum hns_roce_opcode_type opcode,bool is_read)1221 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1222 enum hns_roce_opcode_type opcode,
1223 bool is_read)
1224 {
1225 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1226 desc->opcode = cpu_to_le16(opcode);
1227 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1228 if (is_read)
1229 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1230 else
1231 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1232 }
1233
hns_roce_cmq_csq_done(struct hns_roce_dev * hr_dev)1234 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1235 {
1236 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1237 struct hns_roce_v2_priv *priv = hr_dev->priv;
1238
1239 return tail == priv->cmq.csq.head;
1240 }
1241
update_cmdq_status(struct hns_roce_dev * hr_dev)1242 static void update_cmdq_status(struct hns_roce_dev *hr_dev)
1243 {
1244 struct hns_roce_v2_priv *priv = hr_dev->priv;
1245 struct hnae3_handle *handle = priv->handle;
1246
1247 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
1248 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
1249 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
1250 }
1251
hns_roce_cmd_err_convert_errno(u16 desc_ret)1252 static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
1253 {
1254 struct hns_roce_cmd_errcode errcode_table[] = {
1255 {CMD_EXEC_SUCCESS, 0},
1256 {CMD_NO_AUTH, -EPERM},
1257 {CMD_NOT_EXIST, -EOPNOTSUPP},
1258 {CMD_CRQ_FULL, -EXFULL},
1259 {CMD_NEXT_ERR, -ENOSR},
1260 {CMD_NOT_EXEC, -ENOTBLK},
1261 {CMD_PARA_ERR, -EINVAL},
1262 {CMD_RESULT_ERR, -ERANGE},
1263 {CMD_TIMEOUT, -ETIME},
1264 {CMD_HILINK_ERR, -ENOLINK},
1265 {CMD_INFO_ILLEGAL, -ENXIO},
1266 {CMD_INVALID, -EBADR},
1267 };
1268 u16 i;
1269
1270 for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
1271 if (desc_ret == errcode_table[i].return_status)
1272 return errcode_table[i].errno;
1273 return -EIO;
1274 }
1275
__hns_roce_cmq_send(struct hns_roce_dev * hr_dev,struct hns_roce_cmq_desc * desc,int num)1276 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1277 struct hns_roce_cmq_desc *desc, int num)
1278 {
1279 struct hns_roce_v2_priv *priv = hr_dev->priv;
1280 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1281 u32 timeout = 0;
1282 u16 desc_ret;
1283 u32 tail;
1284 int ret;
1285 int i;
1286
1287 spin_lock_bh(&csq->lock);
1288
1289 tail = csq->head;
1290
1291 for (i = 0; i < num; i++) {
1292 csq->desc[csq->head++] = desc[i];
1293 if (csq->head == csq->desc_num)
1294 csq->head = 0;
1295 }
1296
1297 /* Write to hardware */
1298 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1299
1300 do {
1301 if (hns_roce_cmq_csq_done(hr_dev))
1302 break;
1303 udelay(1);
1304 } while (++timeout < priv->cmq.tx_timeout);
1305
1306 if (hns_roce_cmq_csq_done(hr_dev)) {
1307 ret = 0;
1308 for (i = 0; i < num; i++) {
1309 /* check the result of hardware write back */
1310 desc[i] = csq->desc[tail++];
1311 if (tail == csq->desc_num)
1312 tail = 0;
1313
1314 desc_ret = le16_to_cpu(desc[i].retval);
1315 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1316 continue;
1317
1318 dev_err_ratelimited(hr_dev->dev,
1319 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
1320 desc->opcode, desc_ret);
1321 ret = hns_roce_cmd_err_convert_errno(desc_ret);
1322 }
1323 } else {
1324 /* FW/HW reset or incorrect number of desc */
1325 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1326 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1327 csq->head, tail);
1328 csq->head = tail;
1329
1330 update_cmdq_status(hr_dev);
1331
1332 ret = -EAGAIN;
1333 }
1334
1335 spin_unlock_bh(&csq->lock);
1336
1337 return ret;
1338 }
1339
hns_roce_cmq_send(struct hns_roce_dev * hr_dev,struct hns_roce_cmq_desc * desc,int num)1340 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1341 struct hns_roce_cmq_desc *desc, int num)
1342 {
1343 bool busy;
1344 int ret;
1345
1346 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1347 return -EIO;
1348
1349 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1350 return busy ? -EBUSY : 0;
1351
1352 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1353 if (ret) {
1354 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1355 return busy ? -EBUSY : 0;
1356 }
1357
1358 return ret;
1359 }
1360
config_hem_ba_to_hw(struct hns_roce_dev * hr_dev,dma_addr_t base_addr,u8 cmd,unsigned long tag)1361 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
1362 dma_addr_t base_addr, u8 cmd, unsigned long tag)
1363 {
1364 struct hns_roce_cmd_mailbox *mbox;
1365 int ret;
1366
1367 mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1368 if (IS_ERR(mbox))
1369 return PTR_ERR(mbox);
1370
1371 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
1372 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1373 return ret;
1374 }
1375
hns_roce_cmq_query_hw_info(struct hns_roce_dev * hr_dev)1376 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1377 {
1378 struct hns_roce_query_version *resp;
1379 struct hns_roce_cmq_desc desc;
1380 int ret;
1381
1382 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1383 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1384 if (ret)
1385 return ret;
1386
1387 resp = (struct hns_roce_query_version *)desc.data;
1388 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1389 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1390
1391 return 0;
1392 }
1393
func_clr_hw_resetting_state(struct hns_roce_dev * hr_dev,struct hnae3_handle * handle)1394 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1395 struct hnae3_handle *handle)
1396 {
1397 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1398 unsigned long end;
1399
1400 hr_dev->dis_db = true;
1401
1402 dev_warn(hr_dev->dev,
1403 "func clear is pending, device in resetting state.\n");
1404 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1405 while (end) {
1406 if (!ops->get_hw_reset_stat(handle)) {
1407 hr_dev->is_reset = true;
1408 dev_info(hr_dev->dev,
1409 "func clear success after reset.\n");
1410 return;
1411 }
1412 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1413 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1414 }
1415
1416 dev_warn(hr_dev->dev, "func clear failed.\n");
1417 }
1418
func_clr_sw_resetting_state(struct hns_roce_dev * hr_dev,struct hnae3_handle * handle)1419 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1420 struct hnae3_handle *handle)
1421 {
1422 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1423 unsigned long end;
1424
1425 hr_dev->dis_db = true;
1426
1427 dev_warn(hr_dev->dev,
1428 "func clear is pending, device in resetting state.\n");
1429 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1430 while (end) {
1431 if (ops->ae_dev_reset_cnt(handle) !=
1432 hr_dev->reset_cnt) {
1433 hr_dev->is_reset = true;
1434 dev_info(hr_dev->dev,
1435 "func clear success after sw reset\n");
1436 return;
1437 }
1438 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1439 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1440 }
1441
1442 dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
1443 }
1444
hns_roce_func_clr_rst_proc(struct hns_roce_dev * hr_dev,int retval,int flag)1445 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1446 int flag)
1447 {
1448 struct hns_roce_v2_priv *priv = hr_dev->priv;
1449 struct hnae3_handle *handle = priv->handle;
1450 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1451
1452 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1453 hr_dev->dis_db = true;
1454 hr_dev->is_reset = true;
1455 dev_info(hr_dev->dev, "func clear success after reset.\n");
1456 return;
1457 }
1458
1459 if (ops->get_hw_reset_stat(handle)) {
1460 func_clr_hw_resetting_state(hr_dev, handle);
1461 return;
1462 }
1463
1464 if (ops->ae_dev_resetting(handle) &&
1465 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1466 func_clr_sw_resetting_state(hr_dev, handle);
1467 return;
1468 }
1469
1470 if (retval && !flag)
1471 dev_warn(hr_dev->dev,
1472 "func clear read failed, ret = %d.\n", retval);
1473
1474 dev_warn(hr_dev->dev, "func clear failed.\n");
1475 }
1476
__hns_roce_function_clear(struct hns_roce_dev * hr_dev,int vf_id)1477 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1478 {
1479 bool fclr_write_fail_flag = false;
1480 struct hns_roce_func_clear *resp;
1481 struct hns_roce_cmq_desc desc;
1482 unsigned long end;
1483 int ret = 0;
1484
1485 if (check_device_is_in_reset(hr_dev))
1486 goto out;
1487
1488 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1489 resp = (struct hns_roce_func_clear *)desc.data;
1490 resp->rst_funcid_en = cpu_to_le32(vf_id);
1491
1492 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1493 if (ret) {
1494 fclr_write_fail_flag = true;
1495 dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
1496 ret);
1497 goto out;
1498 }
1499
1500 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1501 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1502 while (end) {
1503 if (check_device_is_in_reset(hr_dev))
1504 goto out;
1505 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1506 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1507
1508 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1509 true);
1510
1511 resp->rst_funcid_en = cpu_to_le32(vf_id);
1512 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1513 if (ret)
1514 continue;
1515
1516 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
1517 if (vf_id == 0)
1518 hr_dev->is_reset = true;
1519 return;
1520 }
1521 }
1522
1523 out:
1524 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1525 }
1526
hns_roce_free_vf_resource(struct hns_roce_dev * hr_dev,int vf_id)1527 static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1528 {
1529 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1530 struct hns_roce_cmq_desc desc[2];
1531 struct hns_roce_cmq_req *req_a;
1532
1533 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1534 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1535 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1536 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1537 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1538
1539 return hns_roce_cmq_send(hr_dev, desc, 2);
1540 }
1541
hns_roce_function_clear(struct hns_roce_dev * hr_dev)1542 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1543 {
1544 int ret;
1545 int i;
1546
1547 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1548 return;
1549
1550 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1551 __hns_roce_function_clear(hr_dev, i);
1552
1553 if (i == 0)
1554 continue;
1555
1556 ret = hns_roce_free_vf_resource(hr_dev, i);
1557 if (ret)
1558 ibdev_err(&hr_dev->ib_dev,
1559 "failed to free vf resource, vf_id = %d, ret = %d.\n",
1560 i, ret);
1561 }
1562 }
1563
hns_roce_clear_extdb_list_info(struct hns_roce_dev * hr_dev)1564 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1565 {
1566 struct hns_roce_cmq_desc desc;
1567 int ret;
1568
1569 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1570 false);
1571 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1572 if (ret)
1573 ibdev_err(&hr_dev->ib_dev,
1574 "failed to clear extended doorbell info, ret = %d.\n",
1575 ret);
1576
1577 return ret;
1578 }
1579
hns_roce_query_fw_ver(struct hns_roce_dev * hr_dev)1580 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1581 {
1582 struct hns_roce_query_fw_info *resp;
1583 struct hns_roce_cmq_desc desc;
1584 int ret;
1585
1586 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1587 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1588 if (ret)
1589 return ret;
1590
1591 resp = (struct hns_roce_query_fw_info *)desc.data;
1592 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1593
1594 return 0;
1595 }
1596
hns_roce_query_func_info(struct hns_roce_dev * hr_dev)1597 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1598 {
1599 struct hns_roce_cmq_desc desc;
1600 int ret;
1601
1602 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1603 hr_dev->func_num = 1;
1604 return 0;
1605 }
1606
1607 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1608 true);
1609 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1610 if (ret) {
1611 hr_dev->func_num = 1;
1612 return ret;
1613 }
1614
1615 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1616 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1617
1618 return 0;
1619 }
1620
hns_roce_hw_v2_query_counter(struct hns_roce_dev * hr_dev,u64 * stats,u32 port,int * num_counters)1621 static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
1622 u64 *stats, u32 port, int *num_counters)
1623 {
1624 #define CNT_PER_DESC 3
1625 struct hns_roce_cmq_desc *desc;
1626 int bd_idx, cnt_idx;
1627 __le64 *cnt_data;
1628 int desc_num;
1629 int ret;
1630 int i;
1631
1632 if (port > hr_dev->caps.num_ports)
1633 return -EINVAL;
1634
1635 desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
1636 desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL);
1637 if (!desc)
1638 return -ENOMEM;
1639
1640 for (i = 0; i < desc_num; i++) {
1641 hns_roce_cmq_setup_basic_desc(&desc[i],
1642 HNS_ROCE_OPC_QUERY_COUNTER, true);
1643 if (i != desc_num - 1)
1644 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1645 }
1646
1647 ret = hns_roce_cmq_send(hr_dev, desc, desc_num);
1648 if (ret) {
1649 ibdev_err(&hr_dev->ib_dev,
1650 "failed to get counter, ret = %d.\n", ret);
1651 goto err_out;
1652 }
1653
1654 for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
1655 bd_idx = i / CNT_PER_DESC;
1656 if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
1657 bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
1658 break;
1659
1660 cnt_data = (__le64 *)&desc[bd_idx].data[0];
1661 cnt_idx = i % CNT_PER_DESC;
1662 stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
1663 }
1664 *num_counters = i;
1665
1666 err_out:
1667 kfree(desc);
1668 return ret;
1669 }
1670
hns_roce_config_global_param(struct hns_roce_dev * hr_dev)1671 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1672 {
1673 struct hns_roce_cmq_desc desc;
1674 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1675 u32 clock_cycles_of_1us;
1676
1677 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1678 false);
1679
1680 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1681 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1682 else
1683 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1684
1685 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1686 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1687
1688 return hns_roce_cmq_send(hr_dev, &desc, 1);
1689 }
1690
load_func_res_caps(struct hns_roce_dev * hr_dev,bool is_vf)1691 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1692 {
1693 struct hns_roce_cmq_desc desc[2];
1694 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1695 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1696 struct hns_roce_caps *caps = &hr_dev->caps;
1697 enum hns_roce_opcode_type opcode;
1698 u32 func_num;
1699 int ret;
1700
1701 if (is_vf) {
1702 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1703 func_num = 1;
1704 } else {
1705 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1706 func_num = hr_dev->func_num;
1707 }
1708
1709 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1710 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1711 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1712
1713 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1714 if (ret)
1715 return ret;
1716
1717 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1718 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1719 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1720 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1721 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1722 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1723 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1724 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1725
1726 if (is_vf) {
1727 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1728 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1729 func_num;
1730 } else {
1731 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1732 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1733 func_num;
1734 }
1735
1736 return 0;
1737 }
1738
load_pf_timer_res_caps(struct hns_roce_dev * hr_dev)1739 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1740 {
1741 struct hns_roce_cmq_desc desc;
1742 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1743 struct hns_roce_caps *caps = &hr_dev->caps;
1744 int ret;
1745
1746 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1747 true);
1748
1749 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1750 if (ret)
1751 return ret;
1752
1753 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1754 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1755
1756 return 0;
1757 }
1758
hns_roce_query_pf_resource(struct hns_roce_dev * hr_dev)1759 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1760 {
1761 struct device *dev = hr_dev->dev;
1762 int ret;
1763
1764 ret = load_func_res_caps(hr_dev, false);
1765 if (ret) {
1766 dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
1767 return ret;
1768 }
1769
1770 ret = load_pf_timer_res_caps(hr_dev);
1771 if (ret)
1772 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1773 ret);
1774
1775 return ret;
1776 }
1777
hns_roce_query_vf_resource(struct hns_roce_dev * hr_dev)1778 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1779 {
1780 struct device *dev = hr_dev->dev;
1781 int ret;
1782
1783 ret = load_func_res_caps(hr_dev, true);
1784 if (ret)
1785 dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);
1786
1787 return ret;
1788 }
1789
__hns_roce_set_vf_switch_param(struct hns_roce_dev * hr_dev,u32 vf_id)1790 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1791 u32 vf_id)
1792 {
1793 struct hns_roce_vf_switch *swt;
1794 struct hns_roce_cmq_desc desc;
1795 int ret;
1796
1797 swt = (struct hns_roce_vf_switch *)desc.data;
1798 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1799 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1800 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
1801 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1802 if (ret)
1803 return ret;
1804
1805 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1806 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1807 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
1808 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
1809 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
1810
1811 return hns_roce_cmq_send(hr_dev, &desc, 1);
1812 }
1813
hns_roce_set_vf_switch_param(struct hns_roce_dev * hr_dev)1814 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1815 {
1816 u32 vf_id;
1817 int ret;
1818
1819 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1820 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1821 if (ret)
1822 return ret;
1823 }
1824 return 0;
1825 }
1826
config_vf_hem_resource(struct hns_roce_dev * hr_dev,int vf_id)1827 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1828 {
1829 struct hns_roce_cmq_desc desc[2];
1830 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1831 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1832 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1833 struct hns_roce_caps *caps = &hr_dev->caps;
1834
1835 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1836 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1837 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1838
1839 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1840
1841 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1842 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1843 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1844 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1845 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1846 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1847 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1848 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1849 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1850 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1851 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1852 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1853 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1854 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1855
1856 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1857 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1858 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1859 vf_id * caps->gmv_bt_num);
1860 } else {
1861 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1862 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1863 vf_id * caps->sgid_bt_num);
1864 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1865 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1866 vf_id * caps->smac_bt_num);
1867 }
1868
1869 return hns_roce_cmq_send(hr_dev, desc, 2);
1870 }
1871
hns_roce_alloc_vf_resource(struct hns_roce_dev * hr_dev)1872 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1873 {
1874 u32 func_num = max_t(u32, 1, hr_dev->func_num);
1875 u32 vf_id;
1876 int ret;
1877
1878 for (vf_id = 0; vf_id < func_num; vf_id++) {
1879 ret = config_vf_hem_resource(hr_dev, vf_id);
1880 if (ret) {
1881 dev_err(hr_dev->dev,
1882 "failed to config vf-%u hem res, ret = %d.\n",
1883 vf_id, ret);
1884 return ret;
1885 }
1886 }
1887
1888 return 0;
1889 }
1890
hns_roce_v2_set_bt(struct hns_roce_dev * hr_dev)1891 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1892 {
1893 struct hns_roce_cmq_desc desc;
1894 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1895 struct hns_roce_caps *caps = &hr_dev->caps;
1896
1897 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1898
1899 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1900 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1901 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1902 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1903 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1904 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1905
1906 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1907 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1908 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1909 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1910 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1911 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1912
1913 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1914 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1915 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1916 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1917 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1918 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1919
1920 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1921 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1922 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1923 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1924 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1925 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1926
1927 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1928 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1929 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1930 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1931 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1932 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1933
1934 return hns_roce_cmq_send(hr_dev, &desc, 1);
1935 }
1936
calc_pg_sz(u32 obj_num,u32 obj_size,u32 hop_num,u32 ctx_bt_num,u32 * buf_page_size,u32 * bt_page_size,u32 hem_type)1937 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
1938 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
1939 {
1940 u64 obj_per_chunk;
1941 u64 bt_chunk_size = PAGE_SIZE;
1942 u64 buf_chunk_size = PAGE_SIZE;
1943 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
1944
1945 *buf_page_size = 0;
1946 *bt_page_size = 0;
1947
1948 switch (hop_num) {
1949 case 3:
1950 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1951 (bt_chunk_size / BA_BYTE_LEN) *
1952 (bt_chunk_size / BA_BYTE_LEN) *
1953 obj_per_chunk_default;
1954 break;
1955 case 2:
1956 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1957 (bt_chunk_size / BA_BYTE_LEN) *
1958 obj_per_chunk_default;
1959 break;
1960 case 1:
1961 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1962 obj_per_chunk_default;
1963 break;
1964 case HNS_ROCE_HOP_NUM_0:
1965 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1966 break;
1967 default:
1968 pr_err("table %u not support hop_num = %u!\n", hem_type,
1969 hop_num);
1970 return;
1971 }
1972
1973 if (hem_type >= HEM_TYPE_MTT)
1974 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1975 else
1976 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1977 }
1978
set_hem_page_size(struct hns_roce_dev * hr_dev)1979 static void set_hem_page_size(struct hns_roce_dev *hr_dev)
1980 {
1981 struct hns_roce_caps *caps = &hr_dev->caps;
1982
1983 /* EQ */
1984 caps->eqe_ba_pg_sz = 0;
1985 caps->eqe_buf_pg_sz = 0;
1986
1987 /* Link Table */
1988 caps->llm_buf_pg_sz = 0;
1989
1990 /* MR */
1991 caps->mpt_ba_pg_sz = 0;
1992 caps->mpt_buf_pg_sz = 0;
1993 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
1994 caps->pbl_buf_pg_sz = 0;
1995 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
1996 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
1997 HEM_TYPE_MTPT);
1998
1999 /* QP */
2000 caps->qpc_ba_pg_sz = 0;
2001 caps->qpc_buf_pg_sz = 0;
2002 caps->qpc_timer_ba_pg_sz = 0;
2003 caps->qpc_timer_buf_pg_sz = 0;
2004 caps->sccc_ba_pg_sz = 0;
2005 caps->sccc_buf_pg_sz = 0;
2006 caps->mtt_ba_pg_sz = 0;
2007 caps->mtt_buf_pg_sz = 0;
2008 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2009 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2010 HEM_TYPE_QPC);
2011
2012 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2013 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2014 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2015 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2016
2017 /* CQ */
2018 caps->cqc_ba_pg_sz = 0;
2019 caps->cqc_buf_pg_sz = 0;
2020 caps->cqc_timer_ba_pg_sz = 0;
2021 caps->cqc_timer_buf_pg_sz = 0;
2022 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2023 caps->cqe_buf_pg_sz = 0;
2024 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2025 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2026 HEM_TYPE_CQC);
2027 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2028 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2029
2030 /* SRQ */
2031 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2032 caps->srqc_ba_pg_sz = 0;
2033 caps->srqc_buf_pg_sz = 0;
2034 caps->srqwqe_ba_pg_sz = 0;
2035 caps->srqwqe_buf_pg_sz = 0;
2036 caps->idx_ba_pg_sz = 0;
2037 caps->idx_buf_pg_sz = 0;
2038 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2039 caps->srqc_hop_num, caps->srqc_bt_num,
2040 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2041 HEM_TYPE_SRQC);
2042 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2043 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2044 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2045 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2046 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2047 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2048 }
2049
2050 /* GMV */
2051 caps->gmv_ba_pg_sz = 0;
2052 caps->gmv_buf_pg_sz = 0;
2053 }
2054
2055 /* Apply all loaded caps before setting to hardware */
apply_func_caps(struct hns_roce_dev * hr_dev)2056 static void apply_func_caps(struct hns_roce_dev *hr_dev)
2057 {
2058 struct hns_roce_caps *caps = &hr_dev->caps;
2059 struct hns_roce_v2_priv *priv = hr_dev->priv;
2060
2061 /* The following configurations don't need to be got from firmware. */
2062 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2063 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2064 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2065
2066 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2067 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2068 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2069
2070 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2071 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2072
2073 if (!caps->num_comp_vectors)
2074 caps->num_comp_vectors =
2075 min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
2076 (u32)priv->handle->rinfo.num_vectors -
2077 (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
2078
2079 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2080 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2081 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2082 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2083
2084 /* The following configurations will be overwritten */
2085 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2086 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2087 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2088
2089 /* The following configurations are not got from firmware */
2090 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2091
2092 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2093 caps->gid_table_len[0] = caps->gmv_bt_num *
2094 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2095
2096 caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
2097 caps->gmv_entry_sz);
2098 } else {
2099 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2100
2101 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2102 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2103 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2104 caps->gid_table_len[0] /= func_num;
2105 }
2106
2107 if (hr_dev->is_vf) {
2108 caps->default_aeq_arm_st = 0x3;
2109 caps->default_ceq_arm_st = 0x3;
2110 caps->default_ceq_max_cnt = 0x1;
2111 caps->default_ceq_period = 0x10;
2112 caps->default_aeq_max_cnt = 0x1;
2113 caps->default_aeq_period = 0x10;
2114 }
2115
2116 set_hem_page_size(hr_dev);
2117 }
2118
hns_roce_query_caps(struct hns_roce_dev * hr_dev)2119 static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
2120 {
2121 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2122 struct hns_roce_caps *caps = &hr_dev->caps;
2123 struct hns_roce_query_pf_caps_a *resp_a;
2124 struct hns_roce_query_pf_caps_b *resp_b;
2125 struct hns_roce_query_pf_caps_c *resp_c;
2126 struct hns_roce_query_pf_caps_d *resp_d;
2127 struct hns_roce_query_pf_caps_e *resp_e;
2128 enum hns_roce_opcode_type cmd;
2129 int ctx_hop_num;
2130 int pbl_hop_num;
2131 int ret;
2132 int i;
2133
2134 cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
2135 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
2136
2137 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2138 hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
2139 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2140 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2141 else
2142 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2143 }
2144
2145 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2146 if (ret)
2147 return ret;
2148
2149 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2150 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2151 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2152 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2153 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2154
2155 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2156 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2157 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2158 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2159 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2160 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2161 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2162 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2163 caps->num_other_vectors = resp_a->num_other_vectors;
2164 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2165 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2166
2167 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2168 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2169 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2170 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2171 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2172 caps->idx_entry_sz = resp_b->idx_entry_sz;
2173 caps->sccc_sz = resp_b->sccc_sz;
2174 caps->max_mtu = resp_b->max_mtu;
2175 caps->min_cqes = resp_b->min_cqes;
2176 caps->min_wqes = resp_b->min_wqes;
2177 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2178 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2179 caps->phy_num_uars = resp_b->phy_num_uars;
2180 ctx_hop_num = resp_b->ctx_hop_num;
2181 pbl_hop_num = resp_b->pbl_hop_num;
2182
2183 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
2184
2185 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
2186 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2187 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2188
2189 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
2190 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
2191 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
2192 caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
2193 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
2194 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
2195 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
2196 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2197 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2198
2199 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
2200 caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
2201 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2202 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
2203 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
2204 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
2205 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
2206 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
2207 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
2208 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
2209
2210 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
2211 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
2212 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
2213 caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS);
2214 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
2215 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
2216
2217 caps->qpc_hop_num = ctx_hop_num;
2218 caps->sccc_hop_num = ctx_hop_num;
2219 caps->srqc_hop_num = ctx_hop_num;
2220 caps->cqc_hop_num = ctx_hop_num;
2221 caps->mpt_hop_num = ctx_hop_num;
2222 caps->mtt_hop_num = pbl_hop_num;
2223 caps->cqe_hop_num = pbl_hop_num;
2224 caps->srqwqe_hop_num = pbl_hop_num;
2225 caps->idx_hop_num = pbl_hop_num;
2226 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM);
2227 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
2228 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
2229
2230 if (!(caps->page_size_cap & PAGE_SIZE))
2231 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
2232
2233 if (!hr_dev->is_vf) {
2234 caps->cqe_sz = resp_a->cqe_sz;
2235 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2236 caps->default_aeq_arm_st =
2237 hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
2238 caps->default_ceq_arm_st =
2239 hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
2240 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2241 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2242 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2243 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2244 }
2245
2246 return 0;
2247 }
2248
config_hem_entry_size(struct hns_roce_dev * hr_dev,u32 type,u32 val)2249 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2250 {
2251 struct hns_roce_cmq_desc desc;
2252 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2253
2254 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2255 false);
2256
2257 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2258 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2259
2260 return hns_roce_cmq_send(hr_dev, &desc, 1);
2261 }
2262
hns_roce_config_entry_size(struct hns_roce_dev * hr_dev)2263 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2264 {
2265 struct hns_roce_caps *caps = &hr_dev->caps;
2266 int ret;
2267
2268 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2269 return 0;
2270
2271 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2272 caps->qpc_sz);
2273 if (ret) {
2274 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2275 return ret;
2276 }
2277
2278 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2279 caps->sccc_sz);
2280 if (ret)
2281 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2282
2283 return ret;
2284 }
2285
hns_roce_v2_vf_profile(struct hns_roce_dev * hr_dev)2286 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2287 {
2288 struct device *dev = hr_dev->dev;
2289 int ret;
2290
2291 hr_dev->func_num = 1;
2292
2293 ret = hns_roce_query_caps(hr_dev);
2294 if (ret) {
2295 dev_err(dev, "failed to query VF caps, ret = %d.\n", ret);
2296 return ret;
2297 }
2298
2299 ret = hns_roce_query_vf_resource(hr_dev);
2300 if (ret) {
2301 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2302 return ret;
2303 }
2304
2305 apply_func_caps(hr_dev);
2306
2307 ret = hns_roce_v2_set_bt(hr_dev);
2308 if (ret)
2309 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2310
2311 return ret;
2312 }
2313
hns_roce_v2_pf_profile(struct hns_roce_dev * hr_dev)2314 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2315 {
2316 struct device *dev = hr_dev->dev;
2317 int ret;
2318
2319 ret = hns_roce_query_func_info(hr_dev);
2320 if (ret) {
2321 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2322 return ret;
2323 }
2324
2325 ret = hns_roce_config_global_param(hr_dev);
2326 if (ret) {
2327 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2328 return ret;
2329 }
2330
2331 ret = hns_roce_set_vf_switch_param(hr_dev);
2332 if (ret) {
2333 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2334 return ret;
2335 }
2336
2337 ret = hns_roce_query_caps(hr_dev);
2338 if (ret) {
2339 dev_err(dev, "failed to query PF caps, ret = %d.\n", ret);
2340 return ret;
2341 }
2342
2343 ret = hns_roce_query_pf_resource(hr_dev);
2344 if (ret) {
2345 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2346 return ret;
2347 }
2348
2349 apply_func_caps(hr_dev);
2350
2351 ret = hns_roce_alloc_vf_resource(hr_dev);
2352 if (ret) {
2353 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2354 return ret;
2355 }
2356
2357 ret = hns_roce_v2_set_bt(hr_dev);
2358 if (ret) {
2359 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2360 return ret;
2361 }
2362
2363 /* Configure the size of QPC, SCCC, etc. */
2364 return hns_roce_config_entry_size(hr_dev);
2365 }
2366
hns_roce_v2_profile(struct hns_roce_dev * hr_dev)2367 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2368 {
2369 struct device *dev = hr_dev->dev;
2370 int ret;
2371
2372 ret = hns_roce_cmq_query_hw_info(hr_dev);
2373 if (ret) {
2374 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2375 return ret;
2376 }
2377
2378 ret = hns_roce_query_fw_ver(hr_dev);
2379 if (ret) {
2380 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2381 return ret;
2382 }
2383
2384 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2385 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2386
2387 if (hr_dev->is_vf)
2388 return hns_roce_v2_vf_profile(hr_dev);
2389 else
2390 return hns_roce_v2_pf_profile(hr_dev);
2391 }
2392
config_llm_table(struct hns_roce_buf * data_buf,void * cfg_buf)2393 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2394 {
2395 u32 i, next_ptr, page_num;
2396 __le64 *entry = cfg_buf;
2397 dma_addr_t addr;
2398 u64 val;
2399
2400 page_num = data_buf->npages;
2401 for (i = 0; i < page_num; i++) {
2402 addr = hns_roce_buf_page(data_buf, i);
2403 if (i == (page_num - 1))
2404 next_ptr = 0;
2405 else
2406 next_ptr = i + 1;
2407
2408 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2409 entry[i] = cpu_to_le64(val);
2410 }
2411 }
2412
set_llm_cfg_to_hw(struct hns_roce_dev * hr_dev,struct hns_roce_link_table * table)2413 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2414 struct hns_roce_link_table *table)
2415 {
2416 struct hns_roce_cmq_desc desc[2];
2417 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2418 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2419 struct hns_roce_buf *buf = table->buf;
2420 enum hns_roce_opcode_type opcode;
2421 dma_addr_t addr;
2422
2423 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2424 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2425 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2426 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2427
2428 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2429 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2430 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2431 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2432 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2433
2434 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2435 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2436 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2437 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2438 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2439
2440 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2441 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2442 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2443 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2444
2445 return hns_roce_cmq_send(hr_dev, desc, 2);
2446 }
2447
2448 static struct hns_roce_link_table *
alloc_link_table_buf(struct hns_roce_dev * hr_dev)2449 alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2450 {
2451 u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num;
2452 struct hns_roce_v2_priv *priv = hr_dev->priv;
2453 struct hns_roce_link_table *link_tbl;
2454 u32 pg_shift, size, min_size;
2455
2456 link_tbl = &priv->ext_llm;
2457 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2458 size = hr_dev->caps.num_qps * hr_dev->func_num *
2459 HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2460 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift;
2461
2462 /* Alloc data table */
2463 size = max(size, min_size);
2464 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2465 if (IS_ERR(link_tbl->buf))
2466 return ERR_PTR(-ENOMEM);
2467
2468 /* Alloc config table */
2469 size = link_tbl->buf->npages * sizeof(u64);
2470 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2471 &link_tbl->table.map,
2472 GFP_KERNEL);
2473 if (!link_tbl->table.buf) {
2474 hns_roce_buf_free(hr_dev, link_tbl->buf);
2475 return ERR_PTR(-ENOMEM);
2476 }
2477
2478 return link_tbl;
2479 }
2480
free_link_table_buf(struct hns_roce_dev * hr_dev,struct hns_roce_link_table * tbl)2481 static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2482 struct hns_roce_link_table *tbl)
2483 {
2484 if (tbl->buf) {
2485 u32 size = tbl->buf->npages * sizeof(u64);
2486
2487 dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2488 tbl->table.map);
2489 }
2490
2491 hns_roce_buf_free(hr_dev, tbl->buf);
2492 }
2493
hns_roce_init_link_table(struct hns_roce_dev * hr_dev)2494 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2495 {
2496 struct hns_roce_link_table *link_tbl;
2497 int ret;
2498
2499 link_tbl = alloc_link_table_buf(hr_dev);
2500 if (IS_ERR(link_tbl))
2501 return -ENOMEM;
2502
2503 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2504 ret = -EINVAL;
2505 goto err_alloc;
2506 }
2507
2508 config_llm_table(link_tbl->buf, link_tbl->table.buf);
2509 ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2510 if (ret)
2511 goto err_alloc;
2512
2513 return 0;
2514
2515 err_alloc:
2516 free_link_table_buf(hr_dev, link_tbl);
2517 return ret;
2518 }
2519
hns_roce_free_link_table(struct hns_roce_dev * hr_dev)2520 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2521 {
2522 struct hns_roce_v2_priv *priv = hr_dev->priv;
2523
2524 free_link_table_buf(hr_dev, &priv->ext_llm);
2525 }
2526
free_dip_list(struct hns_roce_dev * hr_dev)2527 static void free_dip_list(struct hns_roce_dev *hr_dev)
2528 {
2529 struct hns_roce_dip *hr_dip;
2530 struct hns_roce_dip *tmp;
2531 unsigned long flags;
2532
2533 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2534
2535 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2536 list_del(&hr_dip->node);
2537 kfree(hr_dip);
2538 }
2539
2540 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2541 }
2542
free_mr_init_pd(struct hns_roce_dev * hr_dev)2543 static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
2544 {
2545 struct hns_roce_v2_priv *priv = hr_dev->priv;
2546 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2547 struct ib_device *ibdev = &hr_dev->ib_dev;
2548 struct hns_roce_pd *hr_pd;
2549 struct ib_pd *pd;
2550
2551 hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
2552 if (ZERO_OR_NULL_PTR(hr_pd))
2553 return NULL;
2554 pd = &hr_pd->ibpd;
2555 pd->device = ibdev;
2556
2557 if (hns_roce_alloc_pd(pd, NULL)) {
2558 ibdev_err(ibdev, "failed to create pd for free mr.\n");
2559 kfree(hr_pd);
2560 return NULL;
2561 }
2562 free_mr->rsv_pd = to_hr_pd(pd);
2563 free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev;
2564 free_mr->rsv_pd->ibpd.uobject = NULL;
2565 free_mr->rsv_pd->ibpd.__internal_mr = NULL;
2566 atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0);
2567
2568 return pd;
2569 }
2570
free_mr_init_cq(struct hns_roce_dev * hr_dev)2571 static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
2572 {
2573 struct hns_roce_v2_priv *priv = hr_dev->priv;
2574 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2575 struct ib_device *ibdev = &hr_dev->ib_dev;
2576 struct ib_cq_init_attr cq_init_attr = {};
2577 struct hns_roce_cq *hr_cq;
2578 struct ib_cq *cq;
2579
2580 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
2581
2582 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
2583 if (ZERO_OR_NULL_PTR(hr_cq))
2584 return NULL;
2585
2586 cq = &hr_cq->ib_cq;
2587 cq->device = ibdev;
2588
2589 if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) {
2590 ibdev_err(ibdev, "failed to create cq for free mr.\n");
2591 kfree(hr_cq);
2592 return NULL;
2593 }
2594 free_mr->rsv_cq = to_hr_cq(cq);
2595 free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev;
2596 free_mr->rsv_cq->ib_cq.uobject = NULL;
2597 free_mr->rsv_cq->ib_cq.comp_handler = NULL;
2598 free_mr->rsv_cq->ib_cq.event_handler = NULL;
2599 free_mr->rsv_cq->ib_cq.cq_context = NULL;
2600 atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0);
2601
2602 return cq;
2603 }
2604
free_mr_init_qp(struct hns_roce_dev * hr_dev,struct ib_cq * cq,struct ib_qp_init_attr * init_attr,int i)2605 static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
2606 struct ib_qp_init_attr *init_attr, int i)
2607 {
2608 struct hns_roce_v2_priv *priv = hr_dev->priv;
2609 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2610 struct ib_device *ibdev = &hr_dev->ib_dev;
2611 struct hns_roce_qp *hr_qp;
2612 struct ib_qp *qp;
2613 int ret;
2614
2615 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
2616 if (ZERO_OR_NULL_PTR(hr_qp))
2617 return -ENOMEM;
2618
2619 qp = &hr_qp->ibqp;
2620 qp->device = ibdev;
2621
2622 ret = hns_roce_create_qp(qp, init_attr, NULL);
2623 if (ret) {
2624 ibdev_err(ibdev, "failed to create qp for free mr.\n");
2625 kfree(hr_qp);
2626 return ret;
2627 }
2628
2629 free_mr->rsv_qp[i] = hr_qp;
2630 free_mr->rsv_qp[i]->ibqp.recv_cq = cq;
2631 free_mr->rsv_qp[i]->ibqp.send_cq = cq;
2632
2633 return 0;
2634 }
2635
free_mr_exit(struct hns_roce_dev * hr_dev)2636 static void free_mr_exit(struct hns_roce_dev *hr_dev)
2637 {
2638 struct hns_roce_v2_priv *priv = hr_dev->priv;
2639 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2640 struct ib_qp *qp;
2641 int i;
2642
2643 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2644 if (free_mr->rsv_qp[i]) {
2645 qp = &free_mr->rsv_qp[i]->ibqp;
2646 hns_roce_v2_destroy_qp(qp, NULL);
2647 kfree(free_mr->rsv_qp[i]);
2648 free_mr->rsv_qp[i] = NULL;
2649 }
2650 }
2651
2652 if (free_mr->rsv_cq) {
2653 hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL);
2654 kfree(free_mr->rsv_cq);
2655 free_mr->rsv_cq = NULL;
2656 }
2657
2658 if (free_mr->rsv_pd) {
2659 hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL);
2660 kfree(free_mr->rsv_pd);
2661 free_mr->rsv_pd = NULL;
2662 }
2663 }
2664
free_mr_alloc_res(struct hns_roce_dev * hr_dev)2665 static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
2666 {
2667 struct hns_roce_v2_priv *priv = hr_dev->priv;
2668 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2669 struct ib_qp_init_attr qp_init_attr = {};
2670 struct ib_pd *pd;
2671 struct ib_cq *cq;
2672 int ret;
2673 int i;
2674
2675 pd = free_mr_init_pd(hr_dev);
2676 if (!pd)
2677 return -ENOMEM;
2678
2679 cq = free_mr_init_cq(hr_dev);
2680 if (!cq) {
2681 ret = -ENOMEM;
2682 goto create_failed_cq;
2683 }
2684
2685 qp_init_attr.qp_type = IB_QPT_RC;
2686 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2687 qp_init_attr.send_cq = cq;
2688 qp_init_attr.recv_cq = cq;
2689 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2690 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
2691 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
2692 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
2693 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
2694
2695 ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i);
2696 if (ret)
2697 goto create_failed_qp;
2698 }
2699
2700 return 0;
2701
2702 create_failed_qp:
2703 for (i--; i >= 0; i--) {
2704 hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL);
2705 kfree(free_mr->rsv_qp[i]);
2706 }
2707 hns_roce_destroy_cq(cq, NULL);
2708 kfree(cq);
2709
2710 create_failed_cq:
2711 hns_roce_dealloc_pd(pd, NULL);
2712 kfree(pd);
2713
2714 return ret;
2715 }
2716
free_mr_modify_rsv_qp(struct hns_roce_dev * hr_dev,struct ib_qp_attr * attr,int sl_num)2717 static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
2718 struct ib_qp_attr *attr, int sl_num)
2719 {
2720 struct hns_roce_v2_priv *priv = hr_dev->priv;
2721 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2722 struct ib_device *ibdev = &hr_dev->ib_dev;
2723 struct hns_roce_qp *hr_qp;
2724 int loopback;
2725 int mask;
2726 int ret;
2727
2728 hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp);
2729 hr_qp->free_mr_en = 1;
2730 hr_qp->ibqp.device = ibdev;
2731 hr_qp->ibqp.qp_type = IB_QPT_RC;
2732
2733 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
2734 attr->qp_state = IB_QPS_INIT;
2735 attr->port_num = 1;
2736 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
2737 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2738 IB_QPS_INIT, NULL);
2739 if (ret) {
2740 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
2741 ret);
2742 return ret;
2743 }
2744
2745 loopback = hr_dev->loop_idc;
2746 /* Set qpc lbi = 1 incidate loopback IO */
2747 hr_dev->loop_idc = 1;
2748
2749 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
2750 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
2751 attr->qp_state = IB_QPS_RTR;
2752 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2753 attr->path_mtu = IB_MTU_256;
2754 attr->dest_qp_num = hr_qp->qpn;
2755 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2756
2757 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
2758
2759 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
2760 IB_QPS_RTR, NULL);
2761 hr_dev->loop_idc = loopback;
2762 if (ret) {
2763 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
2764 ret);
2765 return ret;
2766 }
2767
2768 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
2769 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
2770 attr->qp_state = IB_QPS_RTS;
2771 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2772 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
2773 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
2774 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
2775 IB_QPS_RTS, NULL);
2776 if (ret)
2777 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
2778 ret);
2779
2780 return ret;
2781 }
2782
free_mr_modify_qp(struct hns_roce_dev * hr_dev)2783 static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
2784 {
2785 struct hns_roce_v2_priv *priv = hr_dev->priv;
2786 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2787 struct ib_qp_attr attr = {};
2788 int ret;
2789 int i;
2790
2791 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
2792 rdma_ah_set_static_rate(&attr.ah_attr, 3);
2793 rdma_ah_set_port_num(&attr.ah_attr, 1);
2794
2795 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2796 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
2797 if (ret)
2798 return ret;
2799 }
2800
2801 return 0;
2802 }
2803
free_mr_init(struct hns_roce_dev * hr_dev)2804 static int free_mr_init(struct hns_roce_dev *hr_dev)
2805 {
2806 struct hns_roce_v2_priv *priv = hr_dev->priv;
2807 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2808 int ret;
2809
2810 mutex_init(&free_mr->mutex);
2811
2812 ret = free_mr_alloc_res(hr_dev);
2813 if (ret)
2814 return ret;
2815
2816 ret = free_mr_modify_qp(hr_dev);
2817 if (ret)
2818 goto err_modify_qp;
2819
2820 return 0;
2821
2822 err_modify_qp:
2823 free_mr_exit(hr_dev);
2824
2825 return ret;
2826 }
2827
get_hem_table(struct hns_roce_dev * hr_dev)2828 static int get_hem_table(struct hns_roce_dev *hr_dev)
2829 {
2830 unsigned int qpc_count;
2831 unsigned int cqc_count;
2832 unsigned int gmv_count;
2833 int ret;
2834 int i;
2835
2836 /* Alloc memory for source address table buffer space chunk */
2837 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2838 gmv_count++) {
2839 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2840 if (ret)
2841 goto err_gmv_failed;
2842 }
2843
2844 if (hr_dev->is_vf)
2845 return 0;
2846
2847 /* Alloc memory for QPC Timer buffer space chunk */
2848 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2849 qpc_count++) {
2850 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2851 qpc_count);
2852 if (ret) {
2853 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2854 goto err_qpc_timer_failed;
2855 }
2856 }
2857
2858 /* Alloc memory for CQC Timer buffer space chunk */
2859 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2860 cqc_count++) {
2861 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2862 cqc_count);
2863 if (ret) {
2864 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2865 goto err_cqc_timer_failed;
2866 }
2867 }
2868
2869 return 0;
2870
2871 err_cqc_timer_failed:
2872 for (i = 0; i < cqc_count; i++)
2873 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2874
2875 err_qpc_timer_failed:
2876 for (i = 0; i < qpc_count; i++)
2877 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2878
2879 err_gmv_failed:
2880 for (i = 0; i < gmv_count; i++)
2881 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2882
2883 return ret;
2884 }
2885
put_hem_table(struct hns_roce_dev * hr_dev)2886 static void put_hem_table(struct hns_roce_dev *hr_dev)
2887 {
2888 int i;
2889
2890 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2891 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2892
2893 if (hr_dev->is_vf)
2894 return;
2895
2896 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2897 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2898
2899 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2900 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2901 }
2902
hns_roce_v2_init(struct hns_roce_dev * hr_dev)2903 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2904 {
2905 int ret;
2906
2907 /* The hns ROCEE requires the extdb info to be cleared before using */
2908 ret = hns_roce_clear_extdb_list_info(hr_dev);
2909 if (ret)
2910 return ret;
2911
2912 ret = get_hem_table(hr_dev);
2913 if (ret)
2914 return ret;
2915
2916 if (hr_dev->is_vf)
2917 return 0;
2918
2919 ret = hns_roce_init_link_table(hr_dev);
2920 if (ret) {
2921 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
2922 goto err_llm_init_failed;
2923 }
2924
2925 return 0;
2926
2927 err_llm_init_failed:
2928 put_hem_table(hr_dev);
2929
2930 return ret;
2931 }
2932
hns_roce_v2_exit(struct hns_roce_dev * hr_dev)2933 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2934 {
2935 hns_roce_function_clear(hr_dev);
2936
2937 if (!hr_dev->is_vf)
2938 hns_roce_free_link_table(hr_dev);
2939
2940 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2941 free_dip_list(hr_dev);
2942 }
2943
hns_roce_mbox_post(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)2944 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
2945 struct hns_roce_mbox_msg *mbox_msg)
2946 {
2947 struct hns_roce_cmq_desc desc;
2948 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2949
2950 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2951
2952 mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
2953 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
2954 mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
2955 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
2956 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
2957 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
2958 mbox_msg->token);
2959
2960 return hns_roce_cmq_send(hr_dev, &desc, 1);
2961 }
2962
v2_wait_mbox_complete(struct hns_roce_dev * hr_dev,u32 timeout,u8 * complete_status)2963 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2964 u8 *complete_status)
2965 {
2966 struct hns_roce_mbox_status *mb_st;
2967 struct hns_roce_cmq_desc desc;
2968 unsigned long end;
2969 int ret = -EBUSY;
2970 u32 status;
2971 bool busy;
2972
2973 mb_st = (struct hns_roce_mbox_status *)desc.data;
2974 end = msecs_to_jiffies(timeout) + jiffies;
2975 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
2976 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
2977 return -EIO;
2978
2979 status = 0;
2980 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
2981 true);
2982 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
2983 if (!ret) {
2984 status = le32_to_cpu(mb_st->mb_status_hw_run);
2985 /* No pending message exists in ROCEE mbox. */
2986 if (!(status & MB_ST_HW_RUN_M))
2987 break;
2988 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2989 break;
2990 }
2991
2992 if (time_after(jiffies, end)) {
2993 dev_err_ratelimited(hr_dev->dev,
2994 "failed to wait mbox status 0x%x\n",
2995 status);
2996 return -ETIMEDOUT;
2997 }
2998
2999 cond_resched();
3000 ret = -EBUSY;
3001 }
3002
3003 if (!ret) {
3004 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
3005 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3006 /* Ignore all errors if the mbox is unavailable. */
3007 ret = 0;
3008 *complete_status = MB_ST_COMPLETE_M;
3009 }
3010
3011 return ret;
3012 }
3013
v2_post_mbox(struct hns_roce_dev * hr_dev,struct hns_roce_mbox_msg * mbox_msg)3014 static int v2_post_mbox(struct hns_roce_dev *hr_dev,
3015 struct hns_roce_mbox_msg *mbox_msg)
3016 {
3017 u8 status = 0;
3018 int ret;
3019
3020 /* Waiting for the mbox to be idle */
3021 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
3022 &status);
3023 if (unlikely(ret)) {
3024 dev_err_ratelimited(hr_dev->dev,
3025 "failed to check post mbox status = 0x%x, ret = %d.\n",
3026 status, ret);
3027 return ret;
3028 }
3029
3030 /* Post new message to mbox */
3031 ret = hns_roce_mbox_post(hr_dev, mbox_msg);
3032 if (ret)
3033 dev_err_ratelimited(hr_dev->dev,
3034 "failed to post mailbox, ret = %d.\n", ret);
3035
3036 return ret;
3037 }
3038
v2_poll_mbox_done(struct hns_roce_dev * hr_dev)3039 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
3040 {
3041 u8 status = 0;
3042 int ret;
3043
3044 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
3045 &status);
3046 if (!ret) {
3047 if (status != MB_ST_COMPLETE_SUCC)
3048 return -EBUSY;
3049 } else {
3050 dev_err_ratelimited(hr_dev->dev,
3051 "failed to check mbox status = 0x%x, ret = %d.\n",
3052 status, ret);
3053 }
3054
3055 return ret;
3056 }
3057
copy_gid(void * dest,const union ib_gid * gid)3058 static void copy_gid(void *dest, const union ib_gid *gid)
3059 {
3060 #define GID_SIZE 4
3061 const union ib_gid *src = gid;
3062 __le32 (*p)[GID_SIZE] = dest;
3063 int i;
3064
3065 if (!gid)
3066 src = &zgid;
3067
3068 for (i = 0; i < GID_SIZE; i++)
3069 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
3070 }
3071
config_sgid_table(struct hns_roce_dev * hr_dev,int gid_index,const union ib_gid * gid,enum hns_roce_sgid_type sgid_type)3072 static int config_sgid_table(struct hns_roce_dev *hr_dev,
3073 int gid_index, const union ib_gid *gid,
3074 enum hns_roce_sgid_type sgid_type)
3075 {
3076 struct hns_roce_cmq_desc desc;
3077 struct hns_roce_cfg_sgid_tb *sgid_tb =
3078 (struct hns_roce_cfg_sgid_tb *)desc.data;
3079
3080 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
3081
3082 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
3083 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
3084
3085 copy_gid(&sgid_tb->vf_sgid_l, gid);
3086
3087 return hns_roce_cmq_send(hr_dev, &desc, 1);
3088 }
3089
config_gmv_table(struct hns_roce_dev * hr_dev,int gid_index,const union ib_gid * gid,enum hns_roce_sgid_type sgid_type,const struct ib_gid_attr * attr)3090 static int config_gmv_table(struct hns_roce_dev *hr_dev,
3091 int gid_index, const union ib_gid *gid,
3092 enum hns_roce_sgid_type sgid_type,
3093 const struct ib_gid_attr *attr)
3094 {
3095 struct hns_roce_cmq_desc desc[2];
3096 struct hns_roce_cfg_gmv_tb_a *tb_a =
3097 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
3098 struct hns_roce_cfg_gmv_tb_b *tb_b =
3099 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
3100
3101 u16 vlan_id = VLAN_CFI_MASK;
3102 u8 mac[ETH_ALEN] = {};
3103 int ret;
3104
3105 if (gid) {
3106 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
3107 if (ret)
3108 return ret;
3109 }
3110
3111 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3112 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
3113
3114 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3115
3116 copy_gid(&tb_a->vf_sgid_l, gid);
3117
3118 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
3119 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
3120 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
3121
3122 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
3123
3124 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
3125 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
3126
3127 return hns_roce_cmq_send(hr_dev, desc, 2);
3128 }
3129
hns_roce_v2_set_gid(struct hns_roce_dev * hr_dev,int gid_index,const union ib_gid * gid,const struct ib_gid_attr * attr)3130 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
3131 const union ib_gid *gid,
3132 const struct ib_gid_attr *attr)
3133 {
3134 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
3135 int ret;
3136
3137 if (gid) {
3138 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3139 if (ipv6_addr_v4mapped((void *)gid))
3140 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
3141 else
3142 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
3143 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
3144 sgid_type = GID_TYPE_FLAG_ROCE_V1;
3145 }
3146 }
3147
3148 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3149 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3150 else
3151 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3152
3153 if (ret)
3154 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3155 ret);
3156
3157 return ret;
3158 }
3159
hns_roce_v2_set_mac(struct hns_roce_dev * hr_dev,u8 phy_port,const u8 * addr)3160 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3161 const u8 *addr)
3162 {
3163 struct hns_roce_cmq_desc desc;
3164 struct hns_roce_cfg_smac_tb *smac_tb =
3165 (struct hns_roce_cfg_smac_tb *)desc.data;
3166 u16 reg_smac_h;
3167 u32 reg_smac_l;
3168
3169 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3170
3171 reg_smac_l = *(u32 *)(&addr[0]);
3172 reg_smac_h = *(u16 *)(&addr[4]);
3173
3174 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
3175 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
3176 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3177
3178 return hns_roce_cmq_send(hr_dev, &desc, 1);
3179 }
3180
set_mtpt_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_v2_mpt_entry * mpt_entry,struct hns_roce_mr * mr)3181 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3182 struct hns_roce_v2_mpt_entry *mpt_entry,
3183 struct hns_roce_mr *mr)
3184 {
3185 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3186 struct ib_device *ibdev = &hr_dev->ib_dev;
3187 dma_addr_t pbl_ba;
3188 int i, count;
3189
3190 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3191 min_t(int, ARRAY_SIZE(pages), mr->npages),
3192 &pbl_ba);
3193 if (count < 1) {
3194 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3195 count);
3196 return -ENOBUFS;
3197 }
3198
3199 /* Aligned to the hardware address access unit */
3200 for (i = 0; i < count; i++)
3201 pages[i] >>= 6;
3202
3203 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3204 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3205 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3206
3207 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3208 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
3209
3210 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3211 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
3212 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3213 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3214
3215 return 0;
3216 }
3217
hns_roce_v2_write_mtpt(struct hns_roce_dev * hr_dev,void * mb_buf,struct hns_roce_mr * mr)3218 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3219 void *mb_buf, struct hns_roce_mr *mr)
3220 {
3221 struct hns_roce_v2_mpt_entry *mpt_entry;
3222
3223 mpt_entry = mb_buf;
3224 memset(mpt_entry, 0, sizeof(*mpt_entry));
3225
3226 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3227 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3228
3229 hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3230 mr->access & IB_ACCESS_MW_BIND);
3231 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3232 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3233 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3234 mr->access & IB_ACCESS_REMOTE_READ);
3235 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3236 mr->access & IB_ACCESS_REMOTE_WRITE);
3237 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3238 mr->access & IB_ACCESS_LOCAL_WRITE);
3239
3240 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3241 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3242 mpt_entry->lkey = cpu_to_le32(mr->key);
3243 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3244 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3245
3246 if (mr->type != MR_TYPE_MR)
3247 hr_reg_enable(mpt_entry, MPT_PA);
3248
3249 if (mr->type == MR_TYPE_DMA)
3250 return 0;
3251
3252 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3253 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3254
3255 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3256 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3257 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3258
3259 return set_mtpt_pbl(hr_dev, mpt_entry, mr);
3260 }
3261
hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,int flags,void * mb_buf)3262 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3263 struct hns_roce_mr *mr, int flags,
3264 void *mb_buf)
3265 {
3266 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3267 u32 mr_access_flags = mr->access;
3268 int ret = 0;
3269
3270 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3271 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3272
3273 if (flags & IB_MR_REREG_ACCESS) {
3274 hr_reg_write(mpt_entry, MPT_BIND_EN,
3275 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3276 hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
3277 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3278 hr_reg_write(mpt_entry, MPT_RR_EN,
3279 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3280 hr_reg_write(mpt_entry, MPT_RW_EN,
3281 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3282 hr_reg_write(mpt_entry, MPT_LW_EN,
3283 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3284 }
3285
3286 if (flags & IB_MR_REREG_TRANS) {
3287 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3288 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3289 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3290 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3291
3292 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3293 }
3294
3295 return ret;
3296 }
3297
hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev * hr_dev,void * mb_buf,struct hns_roce_mr * mr)3298 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3299 void *mb_buf, struct hns_roce_mr *mr)
3300 {
3301 struct ib_device *ibdev = &hr_dev->ib_dev;
3302 struct hns_roce_v2_mpt_entry *mpt_entry;
3303 dma_addr_t pbl_ba = 0;
3304
3305 mpt_entry = mb_buf;
3306 memset(mpt_entry, 0, sizeof(*mpt_entry));
3307
3308 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3309 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3310 return -ENOBUFS;
3311 }
3312
3313 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3314 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3315
3316 hr_reg_enable(mpt_entry, MPT_RA_EN);
3317 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3318
3319 hr_reg_enable(mpt_entry, MPT_FRE);
3320 hr_reg_clear(mpt_entry, MPT_MR_MW);
3321 hr_reg_enable(mpt_entry, MPT_BPD);
3322 hr_reg_clear(mpt_entry, MPT_PA);
3323
3324 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
3325 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3326 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3327 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3328 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3329
3330 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3331
3332 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3333 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3334
3335 return 0;
3336 }
3337
hns_roce_v2_mw_write_mtpt(void * mb_buf,struct hns_roce_mw * mw)3338 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3339 {
3340 struct hns_roce_v2_mpt_entry *mpt_entry;
3341
3342 mpt_entry = mb_buf;
3343 memset(mpt_entry, 0, sizeof(*mpt_entry));
3344
3345 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3346 hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
3347
3348 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3349 hr_reg_enable(mpt_entry, MPT_LW_EN);
3350
3351 hr_reg_enable(mpt_entry, MPT_MR_MW);
3352 hr_reg_enable(mpt_entry, MPT_BPD);
3353 hr_reg_clear(mpt_entry, MPT_PA);
3354 hr_reg_write(mpt_entry, MPT_BQP,
3355 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3356
3357 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3358
3359 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
3360 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3361 mw->pbl_hop_num);
3362 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3363 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3364 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3365 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3366
3367 return 0;
3368 }
3369
free_mr_post_send_lp_wqe(struct hns_roce_qp * hr_qp)3370 static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
3371 {
3372 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
3373 struct ib_device *ibdev = &hr_dev->ib_dev;
3374 const struct ib_send_wr *bad_wr;
3375 struct ib_rdma_wr rdma_wr = {};
3376 struct ib_send_wr *send_wr;
3377 int ret;
3378
3379 send_wr = &rdma_wr.wr;
3380 send_wr->opcode = IB_WR_RDMA_WRITE;
3381
3382 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
3383 if (ret) {
3384 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
3385 ret);
3386 return ret;
3387 }
3388
3389 return 0;
3390 }
3391
3392 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3393 struct ib_wc *wc);
3394
free_mr_send_cmd_to_hw(struct hns_roce_dev * hr_dev)3395 static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
3396 {
3397 struct hns_roce_v2_priv *priv = hr_dev->priv;
3398 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
3399 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
3400 struct ib_device *ibdev = &hr_dev->ib_dev;
3401 struct hns_roce_qp *hr_qp;
3402 unsigned long end;
3403 int cqe_cnt = 0;
3404 int npolled;
3405 int ret;
3406 int i;
3407
3408 /*
3409 * If the device initialization is not complete or in the uninstall
3410 * process, then there is no need to execute free mr.
3411 */
3412 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
3413 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
3414 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
3415 return;
3416
3417 mutex_lock(&free_mr->mutex);
3418
3419 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
3420 hr_qp = free_mr->rsv_qp[i];
3421
3422 ret = free_mr_post_send_lp_wqe(hr_qp);
3423 if (ret) {
3424 ibdev_err(ibdev,
3425 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3426 hr_qp->qpn, ret);
3427 break;
3428 }
3429
3430 cqe_cnt++;
3431 }
3432
3433 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
3434 while (cqe_cnt) {
3435 npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc);
3436 if (npolled < 0) {
3437 ibdev_err(ibdev,
3438 "failed to poll cqe for free mr, remain %d cqe.\n",
3439 cqe_cnt);
3440 goto out;
3441 }
3442
3443 if (time_after(jiffies, end)) {
3444 ibdev_err(ibdev,
3445 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3446 cqe_cnt);
3447 goto out;
3448 }
3449 cqe_cnt -= npolled;
3450 }
3451
3452 out:
3453 mutex_unlock(&free_mr->mutex);
3454 }
3455
hns_roce_v2_dereg_mr(struct hns_roce_dev * hr_dev)3456 static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
3457 {
3458 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3459 free_mr_send_cmd_to_hw(hr_dev);
3460 }
3461
get_cqe_v2(struct hns_roce_cq * hr_cq,int n)3462 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3463 {
3464 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3465 }
3466
get_sw_cqe_v2(struct hns_roce_cq * hr_cq,unsigned int n)3467 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3468 {
3469 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3470
3471 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3472 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3473 NULL;
3474 }
3475
update_cq_db(struct hns_roce_dev * hr_dev,struct hns_roce_cq * hr_cq)3476 static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3477 struct hns_roce_cq *hr_cq)
3478 {
3479 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3480 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3481 } else {
3482 struct hns_roce_v2_db cq_db = {};
3483
3484 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3485 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3486 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3487 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3488
3489 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3490 }
3491 }
3492
__hns_roce_v2_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)3493 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3494 struct hns_roce_srq *srq)
3495 {
3496 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3497 struct hns_roce_v2_cqe *cqe, *dest;
3498 u32 prod_index;
3499 int nfreed = 0;
3500 int wqe_index;
3501 u8 owner_bit;
3502
3503 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3504 ++prod_index) {
3505 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3506 break;
3507 }
3508
3509 /*
3510 * Now backwards through the CQ, removing CQ entries
3511 * that match our QP by overwriting them with next entries.
3512 */
3513 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3514 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3515 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3516 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3517 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3518 hns_roce_free_srq_wqe(srq, wqe_index);
3519 }
3520 ++nfreed;
3521 } else if (nfreed) {
3522 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3523 hr_cq->ib_cq.cqe);
3524 owner_bit = hr_reg_read(dest, CQE_OWNER);
3525 memcpy(dest, cqe, hr_cq->cqe_size);
3526 hr_reg_write(dest, CQE_OWNER, owner_bit);
3527 }
3528 }
3529
3530 if (nfreed) {
3531 hr_cq->cons_index += nfreed;
3532 update_cq_db(hr_dev, hr_cq);
3533 }
3534 }
3535
hns_roce_v2_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)3536 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3537 struct hns_roce_srq *srq)
3538 {
3539 spin_lock_irq(&hr_cq->lock);
3540 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3541 spin_unlock_irq(&hr_cq->lock);
3542 }
3543
hns_roce_v2_write_cqc(struct hns_roce_dev * hr_dev,struct hns_roce_cq * hr_cq,void * mb_buf,u64 * mtts,dma_addr_t dma_handle)3544 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3545 struct hns_roce_cq *hr_cq, void *mb_buf,
3546 u64 *mtts, dma_addr_t dma_handle)
3547 {
3548 struct hns_roce_v2_cq_context *cq_context;
3549
3550 cq_context = mb_buf;
3551 memset(cq_context, 0, sizeof(*cq_context));
3552
3553 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3554 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3555 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3556 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3557 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3558
3559 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3560 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3561
3562 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3563 hr_reg_enable(cq_context, CQC_STASH);
3564
3565 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3566 to_hr_hw_page_addr(mtts[0]));
3567 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3568 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3569 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3570 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3571 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3572 to_hr_hw_page_addr(mtts[1]));
3573 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3574 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3575 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3576 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3577 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3578 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3579 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3580 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3581 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3582 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3583 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3584 ((u32)hr_cq->db.dma) >> 1);
3585 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3586 hr_cq->db.dma >> 32);
3587 hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3588 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3589 hr_reg_write(cq_context, CQC_CQ_PERIOD,
3590 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3591 }
3592
hns_roce_v2_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)3593 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3594 enum ib_cq_notify_flags flags)
3595 {
3596 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3597 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3598 struct hns_roce_v2_db cq_db = {};
3599 u32 notify_flag;
3600
3601 /*
3602 * flags = 0, then notify_flag : next
3603 * flags = 1, then notify flag : solocited
3604 */
3605 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3606 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3607
3608 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3609 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3610 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3611 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3612 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3613
3614 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3615
3616 return 0;
3617 }
3618
sw_comp(struct hns_roce_qp * hr_qp,struct hns_roce_wq * wq,int num_entries,struct ib_wc * wc)3619 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3620 int num_entries, struct ib_wc *wc)
3621 {
3622 unsigned int left;
3623 int npolled = 0;
3624
3625 left = wq->head - wq->tail;
3626 if (left == 0)
3627 return 0;
3628
3629 left = min_t(unsigned int, (unsigned int)num_entries, left);
3630 while (npolled < left) {
3631 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3632 wc->status = IB_WC_WR_FLUSH_ERR;
3633 wc->vendor_err = 0;
3634 wc->qp = &hr_qp->ibqp;
3635
3636 wq->tail++;
3637 wc++;
3638 npolled++;
3639 }
3640
3641 return npolled;
3642 }
3643
hns_roce_v2_sw_poll_cq(struct hns_roce_cq * hr_cq,int num_entries,struct ib_wc * wc)3644 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3645 struct ib_wc *wc)
3646 {
3647 struct hns_roce_qp *hr_qp;
3648 int npolled = 0;
3649
3650 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3651 npolled += sw_comp(hr_qp, &hr_qp->sq,
3652 num_entries - npolled, wc + npolled);
3653 if (npolled >= num_entries)
3654 goto out;
3655 }
3656
3657 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3658 npolled += sw_comp(hr_qp, &hr_qp->rq,
3659 num_entries - npolled, wc + npolled);
3660 if (npolled >= num_entries)
3661 goto out;
3662 }
3663
3664 out:
3665 return npolled;
3666 }
3667
get_cqe_status(struct hns_roce_dev * hr_dev,struct hns_roce_qp * qp,struct hns_roce_cq * cq,struct hns_roce_v2_cqe * cqe,struct ib_wc * wc)3668 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3669 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3670 struct ib_wc *wc)
3671 {
3672 static const struct {
3673 u32 cqe_status;
3674 enum ib_wc_status wc_status;
3675 } map[] = {
3676 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3677 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3678 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3679 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3680 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3681 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3682 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3683 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3684 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3685 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3686 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3687 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3688 IB_WC_RETRY_EXC_ERR },
3689 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3690 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3691 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3692 };
3693
3694 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3695 int i;
3696
3697 wc->status = IB_WC_GENERAL_ERR;
3698 for (i = 0; i < ARRAY_SIZE(map); i++)
3699 if (cqe_status == map[i].cqe_status) {
3700 wc->status = map[i].wc_status;
3701 break;
3702 }
3703
3704 if (likely(wc->status == IB_WC_SUCCESS ||
3705 wc->status == IB_WC_WR_FLUSH_ERR))
3706 return;
3707
3708 ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
3709 cqe_status);
3710 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3711 cq->cqe_size, false);
3712 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3713
3714 /*
3715 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3716 * the standard protocol, the driver must ignore it and needn't to set
3717 * the QP to an error state.
3718 */
3719 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3720 return;
3721
3722 flush_cqe(hr_dev, qp);
3723 }
3724
get_cur_qp(struct hns_roce_cq * hr_cq,struct hns_roce_v2_cqe * cqe,struct hns_roce_qp ** cur_qp)3725 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3726 struct hns_roce_qp **cur_qp)
3727 {
3728 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3729 struct hns_roce_qp *hr_qp = *cur_qp;
3730 u32 qpn;
3731
3732 qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3733
3734 if (!hr_qp || qpn != hr_qp->qpn) {
3735 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3736 if (unlikely(!hr_qp)) {
3737 ibdev_err(&hr_dev->ib_dev,
3738 "CQ %06lx with entry for unknown QPN %06x\n",
3739 hr_cq->cqn, qpn);
3740 return -EINVAL;
3741 }
3742 *cur_qp = hr_qp;
3743 }
3744
3745 return 0;
3746 }
3747
3748 /*
3749 * mapped-value = 1 + real-value
3750 * The ib wc opcode's real value is start from 0, In order to distinguish
3751 * between initialized and uninitialized map values, we plus 1 to the actual
3752 * value when defining the mapping, so that the validity can be identified by
3753 * checking whether the mapped value is greater than 0.
3754 */
3755 #define HR_WC_OP_MAP(hr_key, ib_key) \
3756 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3757
3758 static const u32 wc_send_op_map[] = {
3759 HR_WC_OP_MAP(SEND, SEND),
3760 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3761 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3762 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3763 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3764 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3765 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3766 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3767 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3768 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3769 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3770 HR_WC_OP_MAP(BIND_MW, REG_MR),
3771 };
3772
to_ib_wc_send_op(u32 hr_opcode)3773 static int to_ib_wc_send_op(u32 hr_opcode)
3774 {
3775 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3776 return -EINVAL;
3777
3778 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3779 -EINVAL;
3780 }
3781
3782 static const u32 wc_recv_op_map[] = {
3783 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3784 HR_WC_OP_MAP(SEND, RECV),
3785 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3786 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3787 };
3788
to_ib_wc_recv_op(u32 hr_opcode)3789 static int to_ib_wc_recv_op(u32 hr_opcode)
3790 {
3791 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3792 return -EINVAL;
3793
3794 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3795 -EINVAL;
3796 }
3797
fill_send_wc(struct ib_wc * wc,struct hns_roce_v2_cqe * cqe)3798 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3799 {
3800 u32 hr_opcode;
3801 int ib_opcode;
3802
3803 wc->wc_flags = 0;
3804
3805 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3806 switch (hr_opcode) {
3807 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3808 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3809 break;
3810 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3811 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3812 wc->wc_flags |= IB_WC_WITH_IMM;
3813 break;
3814 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3815 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3816 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3817 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3818 wc->byte_len = 8;
3819 break;
3820 default:
3821 break;
3822 }
3823
3824 ib_opcode = to_ib_wc_send_op(hr_opcode);
3825 if (ib_opcode < 0)
3826 wc->status = IB_WC_GENERAL_ERR;
3827 else
3828 wc->opcode = ib_opcode;
3829 }
3830
fill_recv_wc(struct ib_wc * wc,struct hns_roce_v2_cqe * cqe)3831 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3832 {
3833 u32 hr_opcode;
3834 int ib_opcode;
3835
3836 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3837
3838 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3839 switch (hr_opcode) {
3840 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3841 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3842 wc->wc_flags = IB_WC_WITH_IMM;
3843 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3844 break;
3845 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3846 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3847 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3848 break;
3849 default:
3850 wc->wc_flags = 0;
3851 }
3852
3853 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3854 if (ib_opcode < 0)
3855 wc->status = IB_WC_GENERAL_ERR;
3856 else
3857 wc->opcode = ib_opcode;
3858
3859 wc->sl = hr_reg_read(cqe, CQE_SL);
3860 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3861 wc->slid = 0;
3862 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3863 wc->port_num = hr_reg_read(cqe, CQE_PORTN);
3864 wc->pkey_index = 0;
3865
3866 if (hr_reg_read(cqe, CQE_VID_VLD)) {
3867 wc->vlan_id = hr_reg_read(cqe, CQE_VID);
3868 wc->wc_flags |= IB_WC_WITH_VLAN;
3869 } else {
3870 wc->vlan_id = 0xffff;
3871 }
3872
3873 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
3874
3875 return 0;
3876 }
3877
hns_roce_v2_poll_one(struct hns_roce_cq * hr_cq,struct hns_roce_qp ** cur_qp,struct ib_wc * wc)3878 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3879 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3880 {
3881 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3882 struct hns_roce_qp *qp = *cur_qp;
3883 struct hns_roce_srq *srq = NULL;
3884 struct hns_roce_v2_cqe *cqe;
3885 struct hns_roce_wq *wq;
3886 int is_send;
3887 u16 wqe_idx;
3888 int ret;
3889
3890 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3891 if (!cqe)
3892 return -EAGAIN;
3893
3894 ++hr_cq->cons_index;
3895 /* Memory barrier */
3896 rmb();
3897
3898 ret = get_cur_qp(hr_cq, cqe, &qp);
3899 if (ret)
3900 return ret;
3901
3902 wc->qp = &qp->ibqp;
3903 wc->vendor_err = 0;
3904
3905 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
3906
3907 is_send = !hr_reg_read(cqe, CQE_S_R);
3908 if (is_send) {
3909 wq = &qp->sq;
3910
3911 /* If sg_signal_bit is set, tail pointer will be updated to
3912 * the WQE corresponding to the current CQE.
3913 */
3914 if (qp->sq_signal_bits)
3915 wq->tail += (wqe_idx - (u16)wq->tail) &
3916 (wq->wqe_cnt - 1);
3917
3918 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3919 ++wq->tail;
3920
3921 fill_send_wc(wc, cqe);
3922 } else {
3923 if (qp->ibqp.srq) {
3924 srq = to_hr_srq(qp->ibqp.srq);
3925 wc->wr_id = srq->wrid[wqe_idx];
3926 hns_roce_free_srq_wqe(srq, wqe_idx);
3927 } else {
3928 wq = &qp->rq;
3929 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3930 ++wq->tail;
3931 }
3932
3933 ret = fill_recv_wc(wc, cqe);
3934 }
3935
3936 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
3937 if (unlikely(wc->status != IB_WC_SUCCESS))
3938 return 0;
3939
3940 return ret;
3941 }
3942
hns_roce_v2_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)3943 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3944 struct ib_wc *wc)
3945 {
3946 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3947 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3948 struct hns_roce_qp *cur_qp = NULL;
3949 unsigned long flags;
3950 int npolled;
3951
3952 spin_lock_irqsave(&hr_cq->lock, flags);
3953
3954 /*
3955 * When the device starts to reset, the state is RST_DOWN. At this time,
3956 * there may still be some valid CQEs in the hardware that are not
3957 * polled. Therefore, it is not allowed to switch to the software mode
3958 * immediately. When the state changes to UNINIT, CQE no longer exists
3959 * in the hardware, and then switch to software mode.
3960 */
3961 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3962 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3963 goto out;
3964 }
3965
3966 for (npolled = 0; npolled < num_entries; ++npolled) {
3967 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3968 break;
3969 }
3970
3971 if (npolled)
3972 update_cq_db(hr_dev, hr_cq);
3973
3974 out:
3975 spin_unlock_irqrestore(&hr_cq->lock, flags);
3976
3977 return npolled;
3978 }
3979
get_op_for_set_hem(struct hns_roce_dev * hr_dev,u32 type,u32 step_idx,u8 * mbox_cmd)3980 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3981 u32 step_idx, u8 *mbox_cmd)
3982 {
3983 u8 cmd;
3984
3985 switch (type) {
3986 case HEM_TYPE_QPC:
3987 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
3988 break;
3989 case HEM_TYPE_MTPT:
3990 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
3991 break;
3992 case HEM_TYPE_CQC:
3993 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
3994 break;
3995 case HEM_TYPE_SRQC:
3996 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3997 break;
3998 case HEM_TYPE_SCCC:
3999 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
4000 break;
4001 case HEM_TYPE_QPC_TIMER:
4002 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
4003 break;
4004 case HEM_TYPE_CQC_TIMER:
4005 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
4006 break;
4007 default:
4008 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
4009 return -EINVAL;
4010 }
4011
4012 *mbox_cmd = cmd + step_idx;
4013
4014 return 0;
4015 }
4016
config_gmv_ba_to_hw(struct hns_roce_dev * hr_dev,unsigned long obj,dma_addr_t base_addr)4017 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
4018 dma_addr_t base_addr)
4019 {
4020 struct hns_roce_cmq_desc desc;
4021 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
4022 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
4023 u64 addr = to_hr_hw_page_addr(base_addr);
4024
4025 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
4026
4027 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
4028 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
4029 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
4030
4031 return hns_roce_cmq_send(hr_dev, &desc, 1);
4032 }
4033
set_hem_to_hw(struct hns_roce_dev * hr_dev,int obj,dma_addr_t base_addr,u32 hem_type,u32 step_idx)4034 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
4035 dma_addr_t base_addr, u32 hem_type, u32 step_idx)
4036 {
4037 int ret;
4038 u8 cmd;
4039
4040 if (unlikely(hem_type == HEM_TYPE_GMV))
4041 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
4042
4043 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
4044 return 0;
4045
4046 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
4047 if (ret < 0)
4048 return ret;
4049
4050 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
4051 }
4052
hns_roce_v2_set_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,int obj,u32 step_idx)4053 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
4054 struct hns_roce_hem_table *table, int obj,
4055 u32 step_idx)
4056 {
4057 struct hns_roce_hem_iter iter;
4058 struct hns_roce_hem_mhop mhop;
4059 struct hns_roce_hem *hem;
4060 unsigned long mhop_obj = obj;
4061 int i, j, k;
4062 int ret = 0;
4063 u64 hem_idx = 0;
4064 u64 l1_idx = 0;
4065 u64 bt_ba = 0;
4066 u32 chunk_ba_num;
4067 u32 hop_num;
4068
4069 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4070 return 0;
4071
4072 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
4073 i = mhop.l0_idx;
4074 j = mhop.l1_idx;
4075 k = mhop.l2_idx;
4076 hop_num = mhop.hop_num;
4077 chunk_ba_num = mhop.bt_chunk_size / 8;
4078
4079 if (hop_num == 2) {
4080 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
4081 k;
4082 l1_idx = i * chunk_ba_num + j;
4083 } else if (hop_num == 1) {
4084 hem_idx = i * chunk_ba_num + j;
4085 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
4086 hem_idx = i;
4087 }
4088
4089 if (table->type == HEM_TYPE_SCCC)
4090 obj = mhop.l0_idx;
4091
4092 if (check_whether_last_step(hop_num, step_idx)) {
4093 hem = table->hem[hem_idx];
4094 for (hns_roce_hem_first(hem, &iter);
4095 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
4096 bt_ba = hns_roce_hem_addr(&iter);
4097 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
4098 step_idx);
4099 }
4100 } else {
4101 if (step_idx == 0)
4102 bt_ba = table->bt_l0_dma_addr[i];
4103 else if (step_idx == 1 && hop_num == 2)
4104 bt_ba = table->bt_l1_dma_addr[l1_idx];
4105
4106 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
4107 }
4108
4109 return ret;
4110 }
4111
hns_roce_v2_clear_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,int tag,u32 step_idx)4112 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
4113 struct hns_roce_hem_table *table,
4114 int tag, u32 step_idx)
4115 {
4116 struct hns_roce_cmd_mailbox *mailbox;
4117 struct device *dev = hr_dev->dev;
4118 u8 cmd = 0xff;
4119 int ret;
4120
4121 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4122 return 0;
4123
4124 switch (table->type) {
4125 case HEM_TYPE_QPC:
4126 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
4127 break;
4128 case HEM_TYPE_MTPT:
4129 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
4130 break;
4131 case HEM_TYPE_CQC:
4132 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
4133 break;
4134 case HEM_TYPE_SRQC:
4135 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
4136 break;
4137 case HEM_TYPE_SCCC:
4138 case HEM_TYPE_QPC_TIMER:
4139 case HEM_TYPE_CQC_TIMER:
4140 case HEM_TYPE_GMV:
4141 return 0;
4142 default:
4143 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
4144 table->type);
4145 return 0;
4146 }
4147
4148 cmd += step_idx;
4149
4150 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4151 if (IS_ERR(mailbox))
4152 return PTR_ERR(mailbox);
4153
4154 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
4155
4156 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4157 return ret;
4158 }
4159
hns_roce_v2_qp_modify(struct hns_roce_dev * hr_dev,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask,struct hns_roce_qp * hr_qp)4160 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4161 struct hns_roce_v2_qp_context *context,
4162 struct hns_roce_v2_qp_context *qpc_mask,
4163 struct hns_roce_qp *hr_qp)
4164 {
4165 struct hns_roce_cmd_mailbox *mailbox;
4166 int qpc_size;
4167 int ret;
4168
4169 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4170 if (IS_ERR(mailbox))
4171 return PTR_ERR(mailbox);
4172
4173 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
4174 qpc_size = hr_dev->caps.qpc_sz;
4175 memcpy(mailbox->buf, context, qpc_size);
4176 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4177
4178 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
4179 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
4180
4181 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4182
4183 return ret;
4184 }
4185
set_access_flags(struct hns_roce_qp * hr_qp,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask,const struct ib_qp_attr * attr,int attr_mask)4186 static void set_access_flags(struct hns_roce_qp *hr_qp,
4187 struct hns_roce_v2_qp_context *context,
4188 struct hns_roce_v2_qp_context *qpc_mask,
4189 const struct ib_qp_attr *attr, int attr_mask)
4190 {
4191 u8 dest_rd_atomic;
4192 u32 access_flags;
4193
4194 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4195 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4196
4197 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4198 attr->qp_access_flags : hr_qp->atomic_rd_en;
4199
4200 if (!dest_rd_atomic)
4201 access_flags &= IB_ACCESS_REMOTE_WRITE;
4202
4203 hr_reg_write_bool(context, QPC_RRE,
4204 access_flags & IB_ACCESS_REMOTE_READ);
4205 hr_reg_clear(qpc_mask, QPC_RRE);
4206
4207 hr_reg_write_bool(context, QPC_RWE,
4208 access_flags & IB_ACCESS_REMOTE_WRITE);
4209 hr_reg_clear(qpc_mask, QPC_RWE);
4210
4211 hr_reg_write_bool(context, QPC_ATE,
4212 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4213 hr_reg_clear(qpc_mask, QPC_ATE);
4214 hr_reg_write_bool(context, QPC_EXT_ATE,
4215 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4216 hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4217 }
4218
set_qpc_wqe_cnt(struct hns_roce_qp * hr_qp,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4219 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4220 struct hns_roce_v2_qp_context *context,
4221 struct hns_roce_v2_qp_context *qpc_mask)
4222 {
4223 hr_reg_write(context, QPC_SGE_SHIFT,
4224 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4225 hr_qp->sge.sge_shift));
4226
4227 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4228
4229 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4230 }
4231
get_cqn(struct ib_cq * ib_cq)4232 static inline int get_cqn(struct ib_cq *ib_cq)
4233 {
4234 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4235 }
4236
get_pdn(struct ib_pd * ib_pd)4237 static inline int get_pdn(struct ib_pd *ib_pd)
4238 {
4239 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4240 }
4241
modify_qp_reset_to_init(struct ib_qp * ibqp,const struct ib_qp_attr * attr,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4242 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4243 const struct ib_qp_attr *attr,
4244 struct hns_roce_v2_qp_context *context,
4245 struct hns_roce_v2_qp_context *qpc_mask)
4246 {
4247 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4248 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4249
4250 /*
4251 * In v2 engine, software pass context and context mask to hardware
4252 * when modifying qp. If software need modify some fields in context,
4253 * we should set all bits of the relevant fields in context mask to
4254 * 0 at the same time, else set them to 0x1.
4255 */
4256 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4257
4258 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4259
4260 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4261
4262 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4263
4264 /* No VLAN need to set 0xFFF */
4265 hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4266
4267 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4268 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4269
4270 hr_reg_enable(context, QPC_XRC_QP_TYPE);
4271 }
4272
4273 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4274 hr_reg_enable(context, QPC_RQ_RECORD_EN);
4275
4276 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
4277 hr_reg_enable(context, QPC_OWNER_MODE);
4278
4279 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4280 lower_32_bits(hr_qp->rdb.dma) >> 1);
4281 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4282 upper_32_bits(hr_qp->rdb.dma));
4283
4284 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4285
4286 if (ibqp->srq) {
4287 hr_reg_enable(context, QPC_SRQ_EN);
4288 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4289 }
4290
4291 hr_reg_enable(context, QPC_FRE);
4292
4293 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4294
4295 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4296 return;
4297
4298 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4299 hr_reg_enable(&context->ext, QPCEX_STASH);
4300 }
4301
modify_qp_init_to_init(struct ib_qp * ibqp,const struct ib_qp_attr * attr,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4302 static void modify_qp_init_to_init(struct ib_qp *ibqp,
4303 const struct ib_qp_attr *attr,
4304 struct hns_roce_v2_qp_context *context,
4305 struct hns_roce_v2_qp_context *qpc_mask)
4306 {
4307 /*
4308 * In v2 engine, software pass context and context mask to hardware
4309 * when modifying qp. If software need modify some fields in context,
4310 * we should set all bits of the relevant fields in context mask to
4311 * 0 at the same time, else set them to 0x1.
4312 */
4313 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4314 hr_reg_clear(qpc_mask, QPC_TST);
4315
4316 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4317 hr_reg_clear(qpc_mask, QPC_PD);
4318
4319 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4320 hr_reg_clear(qpc_mask, QPC_RX_CQN);
4321
4322 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4323 hr_reg_clear(qpc_mask, QPC_TX_CQN);
4324
4325 if (ibqp->srq) {
4326 hr_reg_enable(context, QPC_SRQ_EN);
4327 hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4328 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4329 hr_reg_clear(qpc_mask, QPC_SRQN);
4330 }
4331 }
4332
config_qp_rq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4333 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4334 struct hns_roce_qp *hr_qp,
4335 struct hns_roce_v2_qp_context *context,
4336 struct hns_roce_v2_qp_context *qpc_mask)
4337 {
4338 u64 mtts[MTT_MIN_COUNT] = { 0 };
4339 u64 wqe_sge_ba;
4340 int count;
4341
4342 /* Search qp buf's mtts */
4343 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4344 MTT_MIN_COUNT, &wqe_sge_ba);
4345 if (hr_qp->rq.wqe_cnt && count < 1) {
4346 ibdev_err(&hr_dev->ib_dev,
4347 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4348 return -EINVAL;
4349 }
4350
4351 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4352 qpc_mask->wqe_sge_ba = 0;
4353
4354 /*
4355 * In v2 engine, software pass context and context mask to hardware
4356 * when modifying qp. If software need modify some fields in context,
4357 * we should set all bits of the relevant fields in context mask to
4358 * 0 at the same time, else set them to 0x1.
4359 */
4360 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4361 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4362
4363 hr_reg_write(context, QPC_SQ_HOP_NUM,
4364 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4365 hr_qp->sq.wqe_cnt));
4366 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4367
4368 hr_reg_write(context, QPC_SGE_HOP_NUM,
4369 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4370 hr_qp->sge.sge_cnt));
4371 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4372
4373 hr_reg_write(context, QPC_RQ_HOP_NUM,
4374 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4375 hr_qp->rq.wqe_cnt));
4376
4377 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4378
4379 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4380 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4381 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4382
4383 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4384 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4385 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4386
4387 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4388 qpc_mask->rq_cur_blk_addr = 0;
4389
4390 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4391 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4392 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4393
4394 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4395 qpc_mask->rq_nxt_blk_addr = 0;
4396
4397 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4398 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4399 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4400
4401 return 0;
4402 }
4403
config_qp_sq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4404 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4405 struct hns_roce_qp *hr_qp,
4406 struct hns_roce_v2_qp_context *context,
4407 struct hns_roce_v2_qp_context *qpc_mask)
4408 {
4409 struct ib_device *ibdev = &hr_dev->ib_dev;
4410 u64 sge_cur_blk = 0;
4411 u64 sq_cur_blk = 0;
4412 int count;
4413
4414 /* search qp buf's mtts */
4415 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4416 if (count < 1) {
4417 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4418 hr_qp->qpn);
4419 return -EINVAL;
4420 }
4421 if (hr_qp->sge.sge_cnt > 0) {
4422 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4423 hr_qp->sge.offset,
4424 &sge_cur_blk, 1, NULL);
4425 if (count < 1) {
4426 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4427 hr_qp->qpn);
4428 return -EINVAL;
4429 }
4430 }
4431
4432 /*
4433 * In v2 engine, software pass context and context mask to hardware
4434 * when modifying qp. If software need modify some fields in context,
4435 * we should set all bits of the relevant fields in context mask to
4436 * 0 at the same time, else set them to 0x1.
4437 */
4438 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4439 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4440 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4441 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4442 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4443 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4444
4445 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4446 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4447 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4448 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4449 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4450 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4451
4452 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4453 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4454 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4455 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4456 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4457 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4458
4459 return 0;
4460 }
4461
get_mtu(struct ib_qp * ibqp,const struct ib_qp_attr * attr)4462 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4463 const struct ib_qp_attr *attr)
4464 {
4465 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4466 return IB_MTU_4096;
4467
4468 return attr->path_mtu;
4469 }
4470
modify_qp_init_to_rtr(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask,struct ib_udata * udata)4471 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4472 const struct ib_qp_attr *attr, int attr_mask,
4473 struct hns_roce_v2_qp_context *context,
4474 struct hns_roce_v2_qp_context *qpc_mask,
4475 struct ib_udata *udata)
4476 {
4477 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
4478 struct hns_roce_ucontext, ibucontext);
4479 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4480 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4481 struct ib_device *ibdev = &hr_dev->ib_dev;
4482 dma_addr_t trrl_ba;
4483 dma_addr_t irrl_ba;
4484 enum ib_mtu ib_mtu;
4485 const u8 *smac;
4486 u8 lp_pktn_ini;
4487 u64 *mtts;
4488 u8 *dmac;
4489 u32 port;
4490 int mtu;
4491 int ret;
4492
4493 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4494 if (ret) {
4495 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4496 return ret;
4497 }
4498
4499 /* Search IRRL's mtts */
4500 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4501 hr_qp->qpn, &irrl_ba);
4502 if (!mtts) {
4503 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4504 return -EINVAL;
4505 }
4506
4507 /* Search TRRL's mtts */
4508 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4509 hr_qp->qpn, &trrl_ba);
4510 if (!mtts) {
4511 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4512 return -EINVAL;
4513 }
4514
4515 if (attr_mask & IB_QP_ALT_PATH) {
4516 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4517 attr_mask);
4518 return -EINVAL;
4519 }
4520
4521 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4522 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4523 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4524 qpc_mask->trrl_ba = 0;
4525 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4526 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4527
4528 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4529 qpc_mask->irrl_ba = 0;
4530 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4531 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4532
4533 hr_reg_enable(context, QPC_RMT_E2E);
4534 hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4535
4536 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4537 hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4538
4539 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4540
4541 smac = (const u8 *)hr_dev->dev_addr[port];
4542 dmac = (u8 *)attr->ah_attr.roce.dmac;
4543 /* when dmac equals smac or loop_idc is 1, it should loopback */
4544 if (ether_addr_equal_unaligned(dmac, smac) ||
4545 hr_dev->loop_idc == 0x1) {
4546 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4547 hr_reg_clear(qpc_mask, QPC_LBI);
4548 }
4549
4550 if (attr_mask & IB_QP_DEST_QPN) {
4551 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4552 hr_reg_clear(qpc_mask, QPC_DQPN);
4553 }
4554
4555 memcpy(&context->dmac, dmac, sizeof(u32));
4556 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4557 qpc_mask->dmac = 0;
4558 hr_reg_clear(qpc_mask, QPC_DMAC_H);
4559
4560 ib_mtu = get_mtu(ibqp, attr);
4561 hr_qp->path_mtu = ib_mtu;
4562
4563 mtu = ib_mtu_enum_to_int(ib_mtu);
4564 if (WARN_ON(mtu <= 0))
4565 return -EINVAL;
4566 #define MIN_LP_MSG_LEN 1024
4567 /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
4568 lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
4569
4570 if (attr_mask & IB_QP_PATH_MTU) {
4571 hr_reg_write(context, QPC_MTU, ib_mtu);
4572 hr_reg_clear(qpc_mask, QPC_MTU);
4573 }
4574
4575 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4576 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4577
4578 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4579 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4580 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4581
4582 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4583 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4584 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4585
4586 context->rq_rnr_timer = 0;
4587 qpc_mask->rq_rnr_timer = 0;
4588
4589 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4590 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4591
4592 /* rocee send 2^lp_sgen_ini segs every time */
4593 hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4594 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4595
4596 if (udata && ibqp->qp_type == IB_QPT_RC &&
4597 (uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) {
4598 hr_reg_write_bool(context, QPC_RQIE,
4599 hr_dev->caps.flags &
4600 HNS_ROCE_CAP_FLAG_RQ_INLINE);
4601 hr_reg_clear(qpc_mask, QPC_RQIE);
4602 }
4603
4604 if (udata &&
4605 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) &&
4606 (uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) {
4607 hr_reg_write_bool(context, QPC_CQEIE,
4608 hr_dev->caps.flags &
4609 HNS_ROCE_CAP_FLAG_CQE_INLINE);
4610 hr_reg_clear(qpc_mask, QPC_CQEIE);
4611
4612 hr_reg_write(context, QPC_CQEIS, 0);
4613 hr_reg_clear(qpc_mask, QPC_CQEIS);
4614 }
4615
4616 return 0;
4617 }
4618
modify_qp_rtr_to_rts(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4619 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4620 const struct ib_qp_attr *attr, int attr_mask,
4621 struct hns_roce_v2_qp_context *context,
4622 struct hns_roce_v2_qp_context *qpc_mask)
4623 {
4624 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4625 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4626 struct ib_device *ibdev = &hr_dev->ib_dev;
4627 int ret;
4628
4629 /* Not support alternate path and path migration */
4630 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4631 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4632 return -EINVAL;
4633 }
4634
4635 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4636 if (ret) {
4637 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4638 return ret;
4639 }
4640
4641 /*
4642 * Set some fields in context to zero, Because the default values
4643 * of all fields in context are zero, we need not set them to 0 again.
4644 * but we should set the relevant fields of context mask to 0.
4645 */
4646 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4647
4648 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4649
4650 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4651 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4652 hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4653
4654 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4655
4656 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4657
4658 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4659
4660 hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4661
4662 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4663
4664 return 0;
4665 }
4666
get_dip_ctx_idx(struct ib_qp * ibqp,const struct ib_qp_attr * attr,u32 * dip_idx)4667 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4668 u32 *dip_idx)
4669 {
4670 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4671 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4672 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
4673 u32 *head = &hr_dev->qp_table.idx_table.head;
4674 u32 *tail = &hr_dev->qp_table.idx_table.tail;
4675 struct hns_roce_dip *hr_dip;
4676 unsigned long flags;
4677 int ret = 0;
4678
4679 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4680
4681 spare_idx[*tail] = ibqp->qp_num;
4682 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
4683
4684 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4685 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
4686 *dip_idx = hr_dip->dip_idx;
4687 goto out;
4688 }
4689 }
4690
4691 /* If no dgid is found, a new dip and a mapping between dgid and
4692 * dip_idx will be created.
4693 */
4694 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4695 if (!hr_dip) {
4696 ret = -ENOMEM;
4697 goto out;
4698 }
4699
4700 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4701 hr_dip->dip_idx = *dip_idx = spare_idx[*head];
4702 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
4703 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4704
4705 out:
4706 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4707 return ret;
4708 }
4709
4710 enum {
4711 CONG_DCQCN,
4712 CONG_WINDOW,
4713 };
4714
4715 enum {
4716 UNSUPPORT_CONG_LEVEL,
4717 SUPPORT_CONG_LEVEL,
4718 };
4719
4720 enum {
4721 CONG_LDCP,
4722 CONG_HC3,
4723 };
4724
4725 enum {
4726 DIP_INVALID,
4727 DIP_VALID,
4728 };
4729
4730 enum {
4731 WND_LIMIT,
4732 WND_UNLIMIT,
4733 };
4734
check_cong_type(struct ib_qp * ibqp,struct hns_roce_congestion_algorithm * cong_alg)4735 static int check_cong_type(struct ib_qp *ibqp,
4736 struct hns_roce_congestion_algorithm *cong_alg)
4737 {
4738 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4739 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4740
4741 if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
4742 hr_qp->cong_type = CONG_TYPE_DCQCN;
4743 else
4744 hr_qp->cong_type = hr_dev->caps.cong_type;
4745
4746 /* different congestion types match different configurations */
4747 switch (hr_qp->cong_type) {
4748 case CONG_TYPE_DCQCN:
4749 cong_alg->alg_sel = CONG_DCQCN;
4750 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4751 cong_alg->dip_vld = DIP_INVALID;
4752 cong_alg->wnd_mode_sel = WND_LIMIT;
4753 break;
4754 case CONG_TYPE_LDCP:
4755 cong_alg->alg_sel = CONG_WINDOW;
4756 cong_alg->alg_sub_sel = CONG_LDCP;
4757 cong_alg->dip_vld = DIP_INVALID;
4758 cong_alg->wnd_mode_sel = WND_UNLIMIT;
4759 break;
4760 case CONG_TYPE_HC3:
4761 cong_alg->alg_sel = CONG_WINDOW;
4762 cong_alg->alg_sub_sel = CONG_HC3;
4763 cong_alg->dip_vld = DIP_INVALID;
4764 cong_alg->wnd_mode_sel = WND_LIMIT;
4765 break;
4766 case CONG_TYPE_DIP:
4767 cong_alg->alg_sel = CONG_DCQCN;
4768 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4769 cong_alg->dip_vld = DIP_VALID;
4770 cong_alg->wnd_mode_sel = WND_LIMIT;
4771 break;
4772 default:
4773 ibdev_warn(&hr_dev->ib_dev,
4774 "invalid type(%u) for congestion selection.\n",
4775 hr_qp->cong_type);
4776 hr_qp->cong_type = CONG_TYPE_DCQCN;
4777 cong_alg->alg_sel = CONG_DCQCN;
4778 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4779 cong_alg->dip_vld = DIP_INVALID;
4780 cong_alg->wnd_mode_sel = WND_LIMIT;
4781 break;
4782 }
4783
4784 return 0;
4785 }
4786
fill_cong_field(struct ib_qp * ibqp,const struct ib_qp_attr * attr,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4787 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4788 struct hns_roce_v2_qp_context *context,
4789 struct hns_roce_v2_qp_context *qpc_mask)
4790 {
4791 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4792 struct hns_roce_congestion_algorithm cong_field;
4793 struct ib_device *ibdev = ibqp->device;
4794 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4795 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4796 u32 dip_idx = 0;
4797 int ret;
4798
4799 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4800 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4801 return 0;
4802
4803 ret = check_cong_type(ibqp, &cong_field);
4804 if (ret)
4805 return ret;
4806
4807 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4808 hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
4809 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4810 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4811 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4812 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4813 cong_field.alg_sub_sel);
4814 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4815 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4816 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4817 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4818 cong_field.wnd_mode_sel);
4819 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4820
4821 /* if dip is disabled, there is no need to set dip idx */
4822 if (cong_field.dip_vld == 0)
4823 return 0;
4824
4825 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4826 if (ret) {
4827 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4828 return ret;
4829 }
4830
4831 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4832 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4833
4834 return 0;
4835 }
4836
hns_roce_v2_set_path(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)4837 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4838 const struct ib_qp_attr *attr,
4839 int attr_mask,
4840 struct hns_roce_v2_qp_context *context,
4841 struct hns_roce_v2_qp_context *qpc_mask)
4842 {
4843 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4844 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4845 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4846 struct ib_device *ibdev = &hr_dev->ib_dev;
4847 const struct ib_gid_attr *gid_attr = NULL;
4848 u8 sl = rdma_ah_get_sl(&attr->ah_attr);
4849 int is_roce_protocol;
4850 u16 vlan_id = 0xffff;
4851 bool is_udp = false;
4852 u32 max_sl;
4853 u8 ib_port;
4854 u8 hr_port;
4855 int ret;
4856
4857 max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
4858 if (unlikely(sl > max_sl)) {
4859 ibdev_err_ratelimited(ibdev,
4860 "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
4861 sl, max_sl);
4862 return -EINVAL;
4863 }
4864
4865 /*
4866 * If free_mr_en of qp is set, it means that this qp comes from
4867 * free mr. This qp will perform the loopback operation.
4868 * In the loopback scenario, only sl needs to be set.
4869 */
4870 if (hr_qp->free_mr_en) {
4871 hr_reg_write(context, QPC_SL, sl);
4872 hr_reg_clear(qpc_mask, QPC_SL);
4873 hr_qp->sl = sl;
4874 return 0;
4875 }
4876
4877 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4878 hr_port = ib_port - 1;
4879 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4880 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4881
4882 if (is_roce_protocol) {
4883 gid_attr = attr->ah_attr.grh.sgid_attr;
4884 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4885 if (ret)
4886 return ret;
4887
4888 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
4889 }
4890
4891 /* Only HIP08 needs to set the vlan_en bits in QPC */
4892 if (vlan_id < VLAN_N_VID &&
4893 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4894 hr_reg_enable(context, QPC_RQ_VLAN_EN);
4895 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4896 hr_reg_enable(context, QPC_SQ_VLAN_EN);
4897 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4898 }
4899
4900 hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4901 hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4902
4903 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4904 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4905 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4906 return -EINVAL;
4907 }
4908
4909 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4910 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4911 return -EINVAL;
4912 }
4913
4914 hr_reg_write(context, QPC_UDPSPN,
4915 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
4916 attr->dest_qp_num) :
4917 0);
4918
4919 hr_reg_clear(qpc_mask, QPC_UDPSPN);
4920
4921 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
4922
4923 hr_reg_clear(qpc_mask, QPC_GMV_IDX);
4924
4925 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
4926 hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
4927
4928 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
4929 if (ret)
4930 return ret;
4931
4932 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
4933 hr_reg_clear(qpc_mask, QPC_TC);
4934
4935 hr_reg_write(context, QPC_FL, grh->flow_label);
4936 hr_reg_clear(qpc_mask, QPC_FL);
4937 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4938 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4939
4940 hr_qp->sl = sl;
4941 hr_reg_write(context, QPC_SL, hr_qp->sl);
4942 hr_reg_clear(qpc_mask, QPC_SL);
4943
4944 return 0;
4945 }
4946
check_qp_state(enum ib_qp_state cur_state,enum ib_qp_state new_state)4947 static bool check_qp_state(enum ib_qp_state cur_state,
4948 enum ib_qp_state new_state)
4949 {
4950 static const bool sm[][IB_QPS_ERR + 1] = {
4951 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4952 [IB_QPS_INIT] = true },
4953 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4954 [IB_QPS_INIT] = true,
4955 [IB_QPS_RTR] = true,
4956 [IB_QPS_ERR] = true },
4957 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4958 [IB_QPS_RTS] = true,
4959 [IB_QPS_ERR] = true },
4960 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4961 [IB_QPS_RTS] = true,
4962 [IB_QPS_ERR] = true },
4963 [IB_QPS_SQD] = {},
4964 [IB_QPS_SQE] = {},
4965 [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
4966 [IB_QPS_ERR] = true }
4967 };
4968
4969 return sm[cur_state][new_state];
4970 }
4971
hns_roce_v2_set_abs_fields(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask,struct ib_udata * udata)4972 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4973 const struct ib_qp_attr *attr,
4974 int attr_mask,
4975 enum ib_qp_state cur_state,
4976 enum ib_qp_state new_state,
4977 struct hns_roce_v2_qp_context *context,
4978 struct hns_roce_v2_qp_context *qpc_mask,
4979 struct ib_udata *udata)
4980 {
4981 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4982 int ret = 0;
4983
4984 if (!check_qp_state(cur_state, new_state)) {
4985 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4986 return -EINVAL;
4987 }
4988
4989 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4990 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4991 modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
4992 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4993 modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
4994 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4995 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4996 qpc_mask, udata);
4997 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4998 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4999 qpc_mask);
5000 }
5001
5002 return ret;
5003 }
5004
check_qp_timeout_cfg_range(struct hns_roce_dev * hr_dev,u8 * timeout)5005 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
5006 {
5007 #define QP_ACK_TIMEOUT_MAX_HIP08 20
5008 #define QP_ACK_TIMEOUT_MAX 31
5009
5010 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5011 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
5012 ibdev_warn(&hr_dev->ib_dev,
5013 "local ACK timeout shall be 0 to 20.\n");
5014 return false;
5015 }
5016 *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
5017 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
5018 if (*timeout > QP_ACK_TIMEOUT_MAX) {
5019 ibdev_warn(&hr_dev->ib_dev,
5020 "local ACK timeout shall be 0 to 31.\n");
5021 return false;
5022 }
5023 }
5024
5025 return true;
5026 }
5027
hns_roce_v2_set_opt_fields(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)5028 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
5029 const struct ib_qp_attr *attr,
5030 int attr_mask,
5031 struct hns_roce_v2_qp_context *context,
5032 struct hns_roce_v2_qp_context *qpc_mask)
5033 {
5034 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5035 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5036 int ret = 0;
5037 u8 timeout;
5038
5039 if (attr_mask & IB_QP_AV) {
5040 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
5041 qpc_mask);
5042 if (ret)
5043 return ret;
5044 }
5045
5046 if (attr_mask & IB_QP_TIMEOUT) {
5047 timeout = attr->timeout;
5048 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
5049 hr_reg_write(context, QPC_AT, timeout);
5050 hr_reg_clear(qpc_mask, QPC_AT);
5051 }
5052 }
5053
5054 if (attr_mask & IB_QP_RETRY_CNT) {
5055 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
5056 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
5057
5058 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
5059 hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
5060 }
5061
5062 if (attr_mask & IB_QP_RNR_RETRY) {
5063 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
5064 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
5065
5066 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
5067 hr_reg_clear(qpc_mask, QPC_RNR_CNT);
5068 }
5069
5070 if (attr_mask & IB_QP_SQ_PSN) {
5071 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
5072 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
5073
5074 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
5075 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
5076
5077 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
5078 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
5079
5080 hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
5081 attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
5082 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
5083
5084 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
5085 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
5086
5087 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
5088 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
5089 }
5090
5091 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
5092 attr->max_dest_rd_atomic) {
5093 hr_reg_write(context, QPC_RR_MAX,
5094 fls(attr->max_dest_rd_atomic - 1));
5095 hr_reg_clear(qpc_mask, QPC_RR_MAX);
5096 }
5097
5098 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
5099 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
5100 hr_reg_clear(qpc_mask, QPC_SR_MAX);
5101 }
5102
5103 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
5104 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
5105
5106 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
5107 hr_reg_write(context, QPC_MIN_RNR_TIME,
5108 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
5109 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
5110 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
5111 }
5112
5113 if (attr_mask & IB_QP_RQ_PSN) {
5114 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
5115 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
5116
5117 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
5118 hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
5119 }
5120
5121 if (attr_mask & IB_QP_QKEY) {
5122 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5123 qpc_mask->qkey_xrcd = 0;
5124 hr_qp->qkey = attr->qkey;
5125 }
5126
5127 return ret;
5128 }
5129
hns_roce_v2_record_opt_fields(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask)5130 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
5131 const struct ib_qp_attr *attr,
5132 int attr_mask)
5133 {
5134 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5135 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5136
5137 if (attr_mask & IB_QP_ACCESS_FLAGS)
5138 hr_qp->atomic_rd_en = attr->qp_access_flags;
5139
5140 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
5141 hr_qp->resp_depth = attr->max_dest_rd_atomic;
5142 if (attr_mask & IB_QP_PORT) {
5143 hr_qp->port = attr->port_num - 1;
5144 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
5145 }
5146 }
5147
clear_qp(struct hns_roce_qp * hr_qp)5148 static void clear_qp(struct hns_roce_qp *hr_qp)
5149 {
5150 struct ib_qp *ibqp = &hr_qp->ibqp;
5151
5152 if (ibqp->send_cq)
5153 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
5154 hr_qp->qpn, NULL);
5155
5156 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
5157 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
5158 hr_qp->qpn, ibqp->srq ?
5159 to_hr_srq(ibqp->srq) : NULL);
5160
5161 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
5162 *hr_qp->rdb.db_record = 0;
5163
5164 hr_qp->rq.head = 0;
5165 hr_qp->rq.tail = 0;
5166 hr_qp->sq.head = 0;
5167 hr_qp->sq.tail = 0;
5168 hr_qp->next_sge = 0;
5169 }
5170
v2_set_flushed_fields(struct ib_qp * ibqp,struct hns_roce_v2_qp_context * context,struct hns_roce_v2_qp_context * qpc_mask)5171 static void v2_set_flushed_fields(struct ib_qp *ibqp,
5172 struct hns_roce_v2_qp_context *context,
5173 struct hns_roce_v2_qp_context *qpc_mask)
5174 {
5175 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5176 unsigned long sq_flag = 0;
5177 unsigned long rq_flag = 0;
5178
5179 if (ibqp->qp_type == IB_QPT_XRC_TGT)
5180 return;
5181
5182 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
5183 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
5184 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
5185 hr_qp->state = IB_QPS_ERR;
5186 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
5187
5188 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
5189 return;
5190
5191 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
5192 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
5193 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
5194 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5195 }
5196
hns_roce_v2_modify_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state,struct ib_udata * udata)5197 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5198 const struct ib_qp_attr *attr,
5199 int attr_mask, enum ib_qp_state cur_state,
5200 enum ib_qp_state new_state, struct ib_udata *udata)
5201 {
5202 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5203 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5204 struct hns_roce_v2_qp_context ctx[2];
5205 struct hns_roce_v2_qp_context *context = ctx;
5206 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5207 struct ib_device *ibdev = &hr_dev->ib_dev;
5208 int ret;
5209
5210 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5211 return -EOPNOTSUPP;
5212
5213 /*
5214 * In v2 engine, software pass context and context mask to hardware
5215 * when modifying qp. If software need modify some fields in context,
5216 * we should set all bits of the relevant fields in context mask to
5217 * 0 at the same time, else set them to 0x1.
5218 */
5219 memset(context, 0, hr_dev->caps.qpc_sz);
5220 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5221
5222 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5223 new_state, context, qpc_mask, udata);
5224 if (ret)
5225 goto out;
5226
5227 /* When QP state is err, SQ and RQ WQE should be flushed */
5228 if (new_state == IB_QPS_ERR)
5229 v2_set_flushed_fields(ibqp, context, qpc_mask);
5230
5231 /* Configure the optional fields */
5232 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5233 qpc_mask);
5234 if (ret)
5235 goto out;
5236
5237 hr_reg_write_bool(context, QPC_INV_CREDIT,
5238 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5239 ibqp->srq);
5240 hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5241
5242 /* Every status migrate must change state */
5243 hr_reg_write(context, QPC_QP_ST, new_state);
5244 hr_reg_clear(qpc_mask, QPC_QP_ST);
5245
5246 /* SW pass context to HW */
5247 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5248 if (ret) {
5249 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5250 goto out;
5251 }
5252
5253 hr_qp->state = new_state;
5254
5255 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5256
5257 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5258 clear_qp(hr_qp);
5259
5260 out:
5261 return ret;
5262 }
5263
to_ib_qp_st(enum hns_roce_v2_qp_state state)5264 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5265 {
5266 static const enum ib_qp_state map[] = {
5267 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5268 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5269 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5270 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5271 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5272 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5273 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5274 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5275 };
5276
5277 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5278 }
5279
hns_roce_v2_query_qpc(struct hns_roce_dev * hr_dev,u32 qpn,void * buffer)5280 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
5281 void *buffer)
5282 {
5283 struct hns_roce_cmd_mailbox *mailbox;
5284 int ret;
5285
5286 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5287 if (IS_ERR(mailbox))
5288 return PTR_ERR(mailbox);
5289
5290 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
5291 qpn);
5292 if (ret)
5293 goto out;
5294
5295 memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
5296
5297 out:
5298 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5299 return ret;
5300 }
5301
get_qp_timeout_attr(struct hns_roce_dev * hr_dev,struct hns_roce_v2_qp_context * context)5302 static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
5303 struct hns_roce_v2_qp_context *context)
5304 {
5305 u8 timeout;
5306
5307 timeout = (u8)hr_reg_read(context, QPC_AT);
5308 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
5309 timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
5310
5311 return timeout;
5312 }
5313
hns_roce_v2_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)5314 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5315 int qp_attr_mask,
5316 struct ib_qp_init_attr *qp_init_attr)
5317 {
5318 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5319 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5320 struct hns_roce_v2_qp_context context = {};
5321 struct ib_device *ibdev = &hr_dev->ib_dev;
5322 int tmp_qp_state;
5323 int state;
5324 int ret;
5325
5326 memset(qp_attr, 0, sizeof(*qp_attr));
5327 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5328
5329 mutex_lock(&hr_qp->mutex);
5330
5331 if (hr_qp->state == IB_QPS_RESET) {
5332 qp_attr->qp_state = IB_QPS_RESET;
5333 ret = 0;
5334 goto done;
5335 }
5336
5337 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
5338 if (ret) {
5339 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5340 ret = -EINVAL;
5341 goto out;
5342 }
5343
5344 state = hr_reg_read(&context, QPC_QP_ST);
5345 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5346 if (tmp_qp_state == -1) {
5347 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5348 ret = -EINVAL;
5349 goto out;
5350 }
5351 hr_qp->state = (u8)tmp_qp_state;
5352 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5353 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5354 qp_attr->path_mig_state = IB_MIG_ARMED;
5355 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5356 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5357 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5358
5359 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5360 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5361 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
5362 qp_attr->qp_access_flags =
5363 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5364 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5365 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5366
5367 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5368 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5369 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5370 struct ib_global_route *grh =
5371 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5372
5373 rdma_ah_set_sl(&qp_attr->ah_attr,
5374 hr_reg_read(&context, QPC_SL));
5375 rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1);
5376 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
5377 grh->flow_label = hr_reg_read(&context, QPC_FL);
5378 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5379 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5380 grh->traffic_class = hr_reg_read(&context, QPC_TC);
5381
5382 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5383 }
5384
5385 qp_attr->port_num = hr_qp->port + 1;
5386 qp_attr->sq_draining = 0;
5387 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5388 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5389
5390 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5391 qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
5392 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5393 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5394
5395 done:
5396 qp_attr->cur_qp_state = qp_attr->qp_state;
5397 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5398 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5399 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5400
5401 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5402 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5403
5404 qp_init_attr->qp_context = ibqp->qp_context;
5405 qp_init_attr->qp_type = ibqp->qp_type;
5406 qp_init_attr->recv_cq = ibqp->recv_cq;
5407 qp_init_attr->send_cq = ibqp->send_cq;
5408 qp_init_attr->srq = ibqp->srq;
5409 qp_init_attr->cap = qp_attr->cap;
5410 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5411
5412 out:
5413 mutex_unlock(&hr_qp->mutex);
5414 return ret;
5415 }
5416
modify_qp_is_ok(struct hns_roce_qp * hr_qp)5417 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5418 {
5419 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5420 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5421 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5422 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5423 hr_qp->state != IB_QPS_RESET);
5424 }
5425
hns_roce_v2_destroy_qp_common(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_udata * udata)5426 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5427 struct hns_roce_qp *hr_qp,
5428 struct ib_udata *udata)
5429 {
5430 struct ib_device *ibdev = &hr_dev->ib_dev;
5431 struct hns_roce_cq *send_cq, *recv_cq;
5432 unsigned long flags;
5433 int ret = 0;
5434
5435 if (modify_qp_is_ok(hr_qp)) {
5436 /* Modify qp to reset before destroying qp */
5437 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5438 hr_qp->state, IB_QPS_RESET, udata);
5439 if (ret)
5440 ibdev_err(ibdev,
5441 "failed to modify QP to RST, ret = %d.\n",
5442 ret);
5443 }
5444
5445 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5446 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5447
5448 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5449 hns_roce_lock_cqs(send_cq, recv_cq);
5450
5451 if (!udata) {
5452 if (recv_cq)
5453 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5454 (hr_qp->ibqp.srq ?
5455 to_hr_srq(hr_qp->ibqp.srq) :
5456 NULL));
5457
5458 if (send_cq && send_cq != recv_cq)
5459 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5460 }
5461
5462 hns_roce_qp_remove(hr_dev, hr_qp);
5463
5464 hns_roce_unlock_cqs(send_cq, recv_cq);
5465 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5466
5467 return ret;
5468 }
5469
hns_roce_v2_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)5470 int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5471 {
5472 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5473 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5474 int ret;
5475
5476 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5477 if (ret)
5478 ibdev_err(&hr_dev->ib_dev,
5479 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5480 hr_qp->qpn, ret);
5481
5482 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5483
5484 return 0;
5485 }
5486
hns_roce_v2_qp_flow_control_init(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)5487 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5488 struct hns_roce_qp *hr_qp)
5489 {
5490 struct ib_device *ibdev = &hr_dev->ib_dev;
5491 struct hns_roce_sccc_clr_done *resp;
5492 struct hns_roce_sccc_clr *clr;
5493 struct hns_roce_cmq_desc desc;
5494 int ret, i;
5495
5496 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5497 return 0;
5498
5499 mutex_lock(&hr_dev->qp_table.scc_mutex);
5500
5501 /* set scc ctx clear done flag */
5502 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5503 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5504 if (ret) {
5505 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5506 goto out;
5507 }
5508
5509 /* clear scc context */
5510 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5511 clr = (struct hns_roce_sccc_clr *)desc.data;
5512 clr->qpn = cpu_to_le32(hr_qp->qpn);
5513 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5514 if (ret) {
5515 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5516 goto out;
5517 }
5518
5519 /* query scc context clear is done or not */
5520 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5521 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5522 hns_roce_cmq_setup_basic_desc(&desc,
5523 HNS_ROCE_OPC_QUERY_SCCC, true);
5524 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5525 if (ret) {
5526 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5527 ret);
5528 goto out;
5529 }
5530
5531 if (resp->clr_done)
5532 goto out;
5533
5534 msleep(20);
5535 }
5536
5537 ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
5538 ret = -ETIMEDOUT;
5539
5540 out:
5541 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5542 return ret;
5543 }
5544
5545 #define DMA_IDX_SHIFT 3
5546 #define DMA_WQE_SHIFT 3
5547
hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq * srq,struct hns_roce_srq_context * ctx)5548 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5549 struct hns_roce_srq_context *ctx)
5550 {
5551 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5552 struct ib_device *ibdev = srq->ibsrq.device;
5553 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5554 u64 mtts_idx[MTT_MIN_COUNT] = {};
5555 dma_addr_t dma_handle_idx = 0;
5556 int ret;
5557
5558 /* Get physical address of idx que buf */
5559 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5560 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5561 if (ret < 1) {
5562 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5563 ret);
5564 return -ENOBUFS;
5565 }
5566
5567 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5568 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5569
5570 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5571 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5572 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5573
5574 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5575 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5576 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5577 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5578
5579 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5580 to_hr_hw_page_addr(mtts_idx[0]));
5581 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5582 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5583
5584 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5585 to_hr_hw_page_addr(mtts_idx[1]));
5586 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5587 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5588
5589 return 0;
5590 }
5591
hns_roce_v2_write_srqc(struct hns_roce_srq * srq,void * mb_buf)5592 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5593 {
5594 struct ib_device *ibdev = srq->ibsrq.device;
5595 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5596 struct hns_roce_srq_context *ctx = mb_buf;
5597 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5598 dma_addr_t dma_handle_wqe = 0;
5599 int ret;
5600
5601 memset(ctx, 0, sizeof(*ctx));
5602
5603 /* Get the physical address of srq buf */
5604 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5605 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5606 if (ret < 1) {
5607 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5608 ret);
5609 return -ENOBUFS;
5610 }
5611
5612 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5613 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5614 srq->ibsrq.srq_type == IB_SRQT_XRC);
5615 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5616 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5617 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5618 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5619 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5620 hr_reg_write(ctx, SRQC_RQWS,
5621 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5622
5623 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5624 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5625 srq->wqe_cnt));
5626
5627 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5628 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5629 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5630
5631 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5632 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5633 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5634 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5635
5636 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5637 }
5638
hns_roce_v2_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)5639 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5640 struct ib_srq_attr *srq_attr,
5641 enum ib_srq_attr_mask srq_attr_mask,
5642 struct ib_udata *udata)
5643 {
5644 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5645 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5646 struct hns_roce_srq_context *srq_context;
5647 struct hns_roce_srq_context *srqc_mask;
5648 struct hns_roce_cmd_mailbox *mailbox;
5649 int ret;
5650
5651 /* Resizing SRQs is not supported yet */
5652 if (srq_attr_mask & IB_SRQ_MAX_WR)
5653 return -EOPNOTSUPP;
5654
5655 if (srq_attr_mask & IB_SRQ_LIMIT) {
5656 if (srq_attr->srq_limit > srq->wqe_cnt)
5657 return -EINVAL;
5658
5659 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5660 if (IS_ERR(mailbox))
5661 return PTR_ERR(mailbox);
5662
5663 srq_context = mailbox->buf;
5664 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5665
5666 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5667
5668 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5669 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5670
5671 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5672 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
5673 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5674 if (ret) {
5675 ibdev_err(&hr_dev->ib_dev,
5676 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5677 ret);
5678 return ret;
5679 }
5680 }
5681
5682 return 0;
5683 }
5684
hns_roce_v2_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)5685 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5686 {
5687 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5688 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5689 struct hns_roce_srq_context *srq_context;
5690 struct hns_roce_cmd_mailbox *mailbox;
5691 int ret;
5692
5693 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5694 if (IS_ERR(mailbox))
5695 return PTR_ERR(mailbox);
5696
5697 srq_context = mailbox->buf;
5698 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5699 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
5700 if (ret) {
5701 ibdev_err(&hr_dev->ib_dev,
5702 "failed to process cmd of querying SRQ, ret = %d.\n",
5703 ret);
5704 goto out;
5705 }
5706
5707 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5708 attr->max_wr = srq->wqe_cnt;
5709 attr->max_sge = srq->max_gs - srq->rsv_sge;
5710
5711 out:
5712 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5713 return ret;
5714 }
5715
hns_roce_v2_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)5716 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5717 {
5718 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5719 struct hns_roce_v2_cq_context *cq_context;
5720 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5721 struct hns_roce_v2_cq_context *cqc_mask;
5722 struct hns_roce_cmd_mailbox *mailbox;
5723 int ret;
5724
5725 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5726 if (IS_ERR(mailbox))
5727 return PTR_ERR(mailbox);
5728
5729 cq_context = mailbox->buf;
5730 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5731
5732 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5733
5734 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5735 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5736
5737 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5738 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5739 dev_info(hr_dev->dev,
5740 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
5741 cq_period);
5742 cq_period = HNS_ROCE_MAX_CQ_PERIOD;
5743 }
5744 cq_period *= HNS_ROCE_CLOCK_ADJUST;
5745 }
5746 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5747 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5748
5749 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5750 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
5751 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5752 if (ret)
5753 ibdev_err(&hr_dev->ib_dev,
5754 "failed to process cmd when modifying CQ, ret = %d.\n",
5755 ret);
5756
5757 return ret;
5758 }
5759
hns_roce_v2_query_cqc(struct hns_roce_dev * hr_dev,u32 cqn,void * buffer)5760 static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
5761 void *buffer)
5762 {
5763 struct hns_roce_v2_cq_context *context;
5764 struct hns_roce_cmd_mailbox *mailbox;
5765 int ret;
5766
5767 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5768 if (IS_ERR(mailbox))
5769 return PTR_ERR(mailbox);
5770
5771 context = mailbox->buf;
5772 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5773 HNS_ROCE_CMD_QUERY_CQC, cqn);
5774 if (ret) {
5775 ibdev_err(&hr_dev->ib_dev,
5776 "failed to process cmd when querying CQ, ret = %d.\n",
5777 ret);
5778 goto err_mailbox;
5779 }
5780
5781 memcpy(buffer, context, sizeof(*context));
5782
5783 err_mailbox:
5784 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5785
5786 return ret;
5787 }
5788
hns_roce_v2_query_mpt(struct hns_roce_dev * hr_dev,u32 key,void * buffer)5789 static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
5790 void *buffer)
5791 {
5792 struct hns_roce_v2_mpt_entry *context;
5793 struct hns_roce_cmd_mailbox *mailbox;
5794 int ret;
5795
5796 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5797 if (IS_ERR(mailbox))
5798 return PTR_ERR(mailbox);
5799
5800 context = mailbox->buf;
5801 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
5802 key_to_hw_index(key));
5803 if (ret) {
5804 ibdev_err(&hr_dev->ib_dev,
5805 "failed to process cmd when querying MPT, ret = %d.\n",
5806 ret);
5807 goto err_mailbox;
5808 }
5809
5810 memcpy(buffer, context, sizeof(*context));
5811
5812 err_mailbox:
5813 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5814
5815 return ret;
5816 }
5817
hns_roce_irq_work_handle(struct work_struct * work)5818 static void hns_roce_irq_work_handle(struct work_struct *work)
5819 {
5820 struct hns_roce_work *irq_work =
5821 container_of(work, struct hns_roce_work, work);
5822 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5823
5824 switch (irq_work->event_type) {
5825 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5826 ibdev_info(ibdev, "path migrated succeeded.\n");
5827 break;
5828 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5829 ibdev_warn(ibdev, "path migration failed.\n");
5830 break;
5831 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5832 break;
5833 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5834 ibdev_dbg(ibdev, "send queue drained.\n");
5835 break;
5836 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5837 ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
5838 irq_work->queue_num, irq_work->sub_type);
5839 break;
5840 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5841 ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
5842 irq_work->queue_num);
5843 break;
5844 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5845 ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
5846 irq_work->queue_num, irq_work->sub_type);
5847 break;
5848 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5849 ibdev_dbg(ibdev, "SRQ limit reach.\n");
5850 break;
5851 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5852 ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
5853 break;
5854 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5855 ibdev_err(ibdev, "SRQ catas error.\n");
5856 break;
5857 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5858 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5859 break;
5860 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5861 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5862 break;
5863 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5864 ibdev_warn(ibdev, "DB overflow.\n");
5865 break;
5866 case HNS_ROCE_EVENT_TYPE_FLR:
5867 ibdev_warn(ibdev, "function level reset.\n");
5868 break;
5869 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5870 ibdev_err(ibdev, "xrc domain violation error.\n");
5871 break;
5872 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5873 ibdev_err(ibdev, "invalid xrceth error.\n");
5874 break;
5875 default:
5876 break;
5877 }
5878
5879 kfree(irq_work);
5880 }
5881
hns_roce_v2_init_irq_work(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq,u32 queue_num)5882 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5883 struct hns_roce_eq *eq, u32 queue_num)
5884 {
5885 struct hns_roce_work *irq_work;
5886
5887 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5888 if (!irq_work)
5889 return;
5890
5891 INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
5892 irq_work->hr_dev = hr_dev;
5893 irq_work->event_type = eq->event_type;
5894 irq_work->sub_type = eq->sub_type;
5895 irq_work->queue_num = queue_num;
5896 queue_work(hr_dev->irq_workq, &irq_work->work);
5897 }
5898
update_eq_db(struct hns_roce_eq * eq)5899 static void update_eq_db(struct hns_roce_eq *eq)
5900 {
5901 struct hns_roce_dev *hr_dev = eq->hr_dev;
5902 struct hns_roce_v2_db eq_db = {};
5903
5904 if (eq->type_flag == HNS_ROCE_AEQ) {
5905 hr_reg_write(&eq_db, EQ_DB_CMD,
5906 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5907 HNS_ROCE_EQ_DB_CMD_AEQ :
5908 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5909 } else {
5910 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
5911
5912 hr_reg_write(&eq_db, EQ_DB_CMD,
5913 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5914 HNS_ROCE_EQ_DB_CMD_CEQ :
5915 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5916 }
5917
5918 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
5919
5920 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5921 }
5922
next_aeqe_sw_v2(struct hns_roce_eq * eq)5923 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5924 {
5925 struct hns_roce_aeqe *aeqe;
5926
5927 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5928 (eq->cons_index & (eq->entries - 1)) *
5929 eq->eqe_size);
5930
5931 return (hr_reg_read(aeqe, AEQE_OWNER) ^
5932 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5933 }
5934
hns_roce_v2_aeq_int(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)5935 static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5936 struct hns_roce_eq *eq)
5937 {
5938 struct device *dev = hr_dev->dev;
5939 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5940 irqreturn_t aeqe_found = IRQ_NONE;
5941 int event_type;
5942 u32 queue_num;
5943 int sub_type;
5944
5945 while (aeqe) {
5946 /* Make sure we read AEQ entry after we have checked the
5947 * ownership bit
5948 */
5949 dma_rmb();
5950
5951 event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE);
5952 sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE);
5953 queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM);
5954
5955 switch (event_type) {
5956 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5957 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5958 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5959 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5960 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5961 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5962 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5963 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5964 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5965 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5966 hns_roce_qp_event(hr_dev, queue_num, event_type);
5967 break;
5968 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5969 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5970 hns_roce_srq_event(hr_dev, queue_num, event_type);
5971 break;
5972 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5973 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5974 hns_roce_cq_event(hr_dev, queue_num, event_type);
5975 break;
5976 case HNS_ROCE_EVENT_TYPE_MB:
5977 hns_roce_cmd_event(hr_dev,
5978 le16_to_cpu(aeqe->event.cmd.token),
5979 aeqe->event.cmd.status,
5980 le64_to_cpu(aeqe->event.cmd.out_param));
5981 break;
5982 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5983 case HNS_ROCE_EVENT_TYPE_FLR:
5984 break;
5985 default:
5986 dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
5987 event_type, eq->eqn, eq->cons_index);
5988 break;
5989 }
5990
5991 eq->event_type = event_type;
5992 eq->sub_type = sub_type;
5993 ++eq->cons_index;
5994 aeqe_found = IRQ_HANDLED;
5995
5996 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
5997
5998 aeqe = next_aeqe_sw_v2(eq);
5999 }
6000
6001 update_eq_db(eq);
6002
6003 return IRQ_RETVAL(aeqe_found);
6004 }
6005
next_ceqe_sw_v2(struct hns_roce_eq * eq)6006 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
6007 {
6008 struct hns_roce_ceqe *ceqe;
6009
6010 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
6011 (eq->cons_index & (eq->entries - 1)) *
6012 eq->eqe_size);
6013
6014 return (hr_reg_read(ceqe, CEQE_OWNER) ^
6015 !!(eq->cons_index & eq->entries)) ? ceqe : NULL;
6016 }
6017
hns_roce_v2_ceq_int(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)6018 static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
6019 struct hns_roce_eq *eq)
6020 {
6021 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
6022 irqreturn_t ceqe_found = IRQ_NONE;
6023 u32 cqn;
6024
6025 while (ceqe) {
6026 /* Make sure we read CEQ entry after we have checked the
6027 * ownership bit
6028 */
6029 dma_rmb();
6030
6031 cqn = hr_reg_read(ceqe, CEQE_CQN);
6032
6033 hns_roce_cq_completion(hr_dev, cqn);
6034
6035 ++eq->cons_index;
6036 ceqe_found = IRQ_HANDLED;
6037
6038 ceqe = next_ceqe_sw_v2(eq);
6039 }
6040
6041 update_eq_db(eq);
6042
6043 return IRQ_RETVAL(ceqe_found);
6044 }
6045
hns_roce_v2_msix_interrupt_eq(int irq,void * eq_ptr)6046 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6047 {
6048 struct hns_roce_eq *eq = eq_ptr;
6049 struct hns_roce_dev *hr_dev = eq->hr_dev;
6050 irqreturn_t int_work;
6051
6052 if (eq->type_flag == HNS_ROCE_CEQ)
6053 /* Completion event interrupt */
6054 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
6055 else
6056 /* Asynchronous event interrupt */
6057 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
6058
6059 return IRQ_RETVAL(int_work);
6060 }
6061
abnormal_interrupt_basic(struct hns_roce_dev * hr_dev,u32 int_st)6062 static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
6063 u32 int_st)
6064 {
6065 struct pci_dev *pdev = hr_dev->pci_dev;
6066 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
6067 const struct hnae3_ae_ops *ops = ae_dev->ops;
6068 irqreturn_t int_work = IRQ_NONE;
6069 u32 int_en;
6070
6071 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
6072
6073 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
6074 dev_err(hr_dev->dev, "AEQ overflow!\n");
6075
6076 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
6077 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
6078
6079 /* Set reset level for reset_event() */
6080 if (ops->set_default_reset_request)
6081 ops->set_default_reset_request(ae_dev,
6082 HNAE3_FUNC_RESET);
6083 if (ops->reset_event)
6084 ops->reset_event(pdev, NULL);
6085
6086 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6087 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6088
6089 int_work = IRQ_HANDLED;
6090 } else {
6091 dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
6092 }
6093
6094 return IRQ_RETVAL(int_work);
6095 }
6096
fmea_ram_ecc_query(struct hns_roce_dev * hr_dev,struct fmea_ram_ecc * ecc_info)6097 static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
6098 struct fmea_ram_ecc *ecc_info)
6099 {
6100 struct hns_roce_cmq_desc desc;
6101 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6102 int ret;
6103
6104 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
6105 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6106 if (ret)
6107 return ret;
6108
6109 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
6110 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
6111 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
6112
6113 return 0;
6114 }
6115
fmea_recover_gmv(struct hns_roce_dev * hr_dev,u32 idx)6116 static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
6117 {
6118 struct hns_roce_cmq_desc desc;
6119 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6120 u32 addr_upper;
6121 u32 addr_low;
6122 int ret;
6123
6124 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
6125 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6126
6127 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6128 if (ret) {
6129 dev_err(hr_dev->dev,
6130 "failed to execute cmd to read gmv, ret = %d.\n", ret);
6131 return ret;
6132 }
6133
6134 addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
6135 addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
6136
6137 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
6138 hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
6139 hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
6140 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6141
6142 return hns_roce_cmq_send(hr_dev, &desc, 1);
6143 }
6144
fmea_get_ram_res_addr(u32 res_type,__le64 * data)6145 static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
6146 {
6147 if (res_type == ECC_RESOURCE_QPC_TIMER ||
6148 res_type == ECC_RESOURCE_CQC_TIMER ||
6149 res_type == ECC_RESOURCE_SCCC)
6150 return le64_to_cpu(*data);
6151
6152 return le64_to_cpu(*data) << PAGE_SHIFT;
6153 }
6154
fmea_recover_others(struct hns_roce_dev * hr_dev,u32 res_type,u32 index)6155 static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
6156 u32 index)
6157 {
6158 u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
6159 u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
6160 struct hns_roce_cmd_mailbox *mailbox;
6161 u64 addr;
6162 int ret;
6163
6164 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6165 if (IS_ERR(mailbox))
6166 return PTR_ERR(mailbox);
6167
6168 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
6169 if (ret) {
6170 dev_err(hr_dev->dev,
6171 "failed to execute cmd to read fmea ram, ret = %d.\n",
6172 ret);
6173 goto out;
6174 }
6175
6176 addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
6177
6178 ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
6179 if (ret)
6180 dev_err(hr_dev->dev,
6181 "failed to execute cmd to write fmea ram, ret = %d.\n",
6182 ret);
6183
6184 out:
6185 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6186 return ret;
6187 }
6188
fmea_ram_ecc_recover(struct hns_roce_dev * hr_dev,struct fmea_ram_ecc * ecc_info)6189 static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
6190 struct fmea_ram_ecc *ecc_info)
6191 {
6192 u32 res_type = ecc_info->res_type;
6193 u32 index = ecc_info->index;
6194 int ret;
6195
6196 BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
6197
6198 if (res_type >= ECC_RESOURCE_COUNT) {
6199 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
6200 res_type);
6201 return;
6202 }
6203
6204 if (res_type == ECC_RESOURCE_GMV)
6205 ret = fmea_recover_gmv(hr_dev, index);
6206 else
6207 ret = fmea_recover_others(hr_dev, res_type, index);
6208 if (ret)
6209 dev_err(hr_dev->dev,
6210 "failed to recover %s, index = %u, ret = %d.\n",
6211 fmea_ram_res[res_type].name, index, ret);
6212 }
6213
fmea_ram_ecc_work(struct work_struct * ecc_work)6214 static void fmea_ram_ecc_work(struct work_struct *ecc_work)
6215 {
6216 struct hns_roce_dev *hr_dev =
6217 container_of(ecc_work, struct hns_roce_dev, ecc_work);
6218 struct fmea_ram_ecc ecc_info = {};
6219
6220 if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
6221 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
6222 return;
6223 }
6224
6225 if (!ecc_info.is_ecc_err) {
6226 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
6227 return;
6228 }
6229
6230 fmea_ram_ecc_recover(hr_dev, &ecc_info);
6231 }
6232
hns_roce_v2_msix_interrupt_abn(int irq,void * dev_id)6233 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
6234 {
6235 struct hns_roce_dev *hr_dev = dev_id;
6236 irqreturn_t int_work = IRQ_NONE;
6237 u32 int_st;
6238
6239 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
6240
6241 if (int_st) {
6242 int_work = abnormal_interrupt_basic(hr_dev, int_st);
6243 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
6244 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
6245 int_work = IRQ_HANDLED;
6246 } else {
6247 dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
6248 }
6249
6250 return IRQ_RETVAL(int_work);
6251 }
6252
hns_roce_v2_int_mask_enable(struct hns_roce_dev * hr_dev,int eq_num,u32 enable_flag)6253 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
6254 int eq_num, u32 enable_flag)
6255 {
6256 int i;
6257
6258 for (i = 0; i < eq_num; i++)
6259 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
6260 i * EQ_REG_OFFSET, enable_flag);
6261
6262 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
6263 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
6264 }
6265
free_eq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)6266 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6267 {
6268 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
6269 }
6270
hns_roce_v2_destroy_eqc(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)6271 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev,
6272 struct hns_roce_eq *eq)
6273 {
6274 struct device *dev = hr_dev->dev;
6275 int eqn = eq->eqn;
6276 int ret;
6277 u8 cmd;
6278
6279 if (eqn < hr_dev->caps.num_comp_vectors)
6280 cmd = HNS_ROCE_CMD_DESTROY_CEQC;
6281 else
6282 cmd = HNS_ROCE_CMD_DESTROY_AEQC;
6283
6284 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
6285 if (ret)
6286 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
6287
6288 free_eq_buf(hr_dev, eq);
6289 }
6290
init_eq_config(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)6291 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6292 {
6293 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
6294 eq->cons_index = 0;
6295 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
6296 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
6297 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
6298 eq->shift = ilog2((unsigned int)eq->entries);
6299 }
6300
config_eqc(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq,void * mb_buf)6301 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
6302 void *mb_buf)
6303 {
6304 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
6305 struct hns_roce_eq_context *eqc;
6306 u64 bt_ba = 0;
6307 int count;
6308
6309 eqc = mb_buf;
6310 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
6311
6312 init_eq_config(hr_dev, eq);
6313
6314 /* if not multi-hop, eqe buffer only use one trunk */
6315 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
6316 &bt_ba);
6317 if (count < 1) {
6318 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
6319 return -ENOBUFS;
6320 }
6321
6322 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
6323 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
6324 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
6325 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
6326 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
6327 hr_reg_write(eqc, EQC_EQN, eq->eqn);
6328 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
6329 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
6330 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
6331 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
6332 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
6333 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
6334 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
6335
6336 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6337 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
6338 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
6339 eq->eq_period);
6340 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
6341 }
6342 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
6343 }
6344
6345 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
6346 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
6347 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
6348 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
6349 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
6350 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
6351 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
6352 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
6353 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
6354 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
6355 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
6356 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
6357 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
6358
6359 return 0;
6360 }
6361
alloc_eq_buf(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)6362 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6363 {
6364 struct hns_roce_buf_attr buf_attr = {};
6365 int err;
6366
6367 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
6368 eq->hop_num = 0;
6369 else
6370 eq->hop_num = hr_dev->caps.eqe_hop_num;
6371
6372 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
6373 buf_attr.region[0].size = eq->entries * eq->eqe_size;
6374 buf_attr.region[0].hopnum = eq->hop_num;
6375 buf_attr.region_count = 1;
6376
6377 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
6378 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
6379 0);
6380 if (err)
6381 dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
6382
6383 return err;
6384 }
6385
hns_roce_v2_create_eq(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq,u8 eq_cmd)6386 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6387 struct hns_roce_eq *eq, u8 eq_cmd)
6388 {
6389 struct hns_roce_cmd_mailbox *mailbox;
6390 int ret;
6391
6392 /* Allocate mailbox memory */
6393 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6394 if (IS_ERR(mailbox))
6395 return PTR_ERR(mailbox);
6396
6397 ret = alloc_eq_buf(hr_dev, eq);
6398 if (ret)
6399 goto free_cmd_mbox;
6400
6401 ret = config_eqc(hr_dev, eq, mailbox->buf);
6402 if (ret)
6403 goto err_cmd_mbox;
6404
6405 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
6406 if (ret) {
6407 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6408 goto err_cmd_mbox;
6409 }
6410
6411 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6412
6413 return 0;
6414
6415 err_cmd_mbox:
6416 free_eq_buf(hr_dev, eq);
6417
6418 free_cmd_mbox:
6419 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6420
6421 return ret;
6422 }
6423
__hns_roce_request_irq(struct hns_roce_dev * hr_dev,int irq_num,int comp_num,int aeq_num,int other_num)6424 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6425 int comp_num, int aeq_num, int other_num)
6426 {
6427 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6428 int i, j;
6429 int ret;
6430
6431 for (i = 0; i < irq_num; i++) {
6432 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6433 GFP_KERNEL);
6434 if (!hr_dev->irq_names[i]) {
6435 ret = -ENOMEM;
6436 goto err_kzalloc_failed;
6437 }
6438 }
6439
6440 /* irq contains: abnormal + AEQ + CEQ */
6441 for (j = 0; j < other_num; j++)
6442 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6443 "hns-abn-%d", j);
6444
6445 for (j = other_num; j < (other_num + aeq_num); j++)
6446 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6447 "hns-aeq-%d", j - other_num);
6448
6449 for (j = (other_num + aeq_num); j < irq_num; j++)
6450 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6451 "hns-ceq-%d", j - other_num - aeq_num);
6452
6453 for (j = 0; j < irq_num; j++) {
6454 if (j < other_num)
6455 ret = request_irq(hr_dev->irq[j],
6456 hns_roce_v2_msix_interrupt_abn,
6457 0, hr_dev->irq_names[j], hr_dev);
6458
6459 else if (j < (other_num + comp_num))
6460 ret = request_irq(eq_table->eq[j - other_num].irq,
6461 hns_roce_v2_msix_interrupt_eq,
6462 0, hr_dev->irq_names[j + aeq_num],
6463 &eq_table->eq[j - other_num]);
6464 else
6465 ret = request_irq(eq_table->eq[j - other_num].irq,
6466 hns_roce_v2_msix_interrupt_eq,
6467 0, hr_dev->irq_names[j - comp_num],
6468 &eq_table->eq[j - other_num]);
6469 if (ret) {
6470 dev_err(hr_dev->dev, "request irq error!\n");
6471 goto err_request_failed;
6472 }
6473 }
6474
6475 return 0;
6476
6477 err_request_failed:
6478 for (j -= 1; j >= 0; j--)
6479 if (j < other_num)
6480 free_irq(hr_dev->irq[j], hr_dev);
6481 else
6482 free_irq(eq_table->eq[j - other_num].irq,
6483 &eq_table->eq[j - other_num]);
6484
6485 err_kzalloc_failed:
6486 for (i -= 1; i >= 0; i--)
6487 kfree(hr_dev->irq_names[i]);
6488
6489 return ret;
6490 }
6491
__hns_roce_free_irq(struct hns_roce_dev * hr_dev)6492 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6493 {
6494 int irq_num;
6495 int eq_num;
6496 int i;
6497
6498 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6499 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6500
6501 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6502 free_irq(hr_dev->irq[i], hr_dev);
6503
6504 for (i = 0; i < eq_num; i++)
6505 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6506
6507 for (i = 0; i < irq_num; i++)
6508 kfree(hr_dev->irq_names[i]);
6509 }
6510
hns_roce_v2_init_eq_table(struct hns_roce_dev * hr_dev)6511 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6512 {
6513 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6514 struct device *dev = hr_dev->dev;
6515 struct hns_roce_eq *eq;
6516 int other_num;
6517 int comp_num;
6518 int aeq_num;
6519 int irq_num;
6520 int eq_num;
6521 u8 eq_cmd;
6522 int ret;
6523 int i;
6524
6525 other_num = hr_dev->caps.num_other_vectors;
6526 comp_num = hr_dev->caps.num_comp_vectors;
6527 aeq_num = hr_dev->caps.num_aeq_vectors;
6528
6529 eq_num = comp_num + aeq_num;
6530 irq_num = eq_num + other_num;
6531
6532 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6533 if (!eq_table->eq)
6534 return -ENOMEM;
6535
6536 /* create eq */
6537 for (i = 0; i < eq_num; i++) {
6538 eq = &eq_table->eq[i];
6539 eq->hr_dev = hr_dev;
6540 eq->eqn = i;
6541 if (i < comp_num) {
6542 /* CEQ */
6543 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6544 eq->type_flag = HNS_ROCE_CEQ;
6545 eq->entries = hr_dev->caps.ceqe_depth;
6546 eq->eqe_size = hr_dev->caps.ceqe_size;
6547 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6548 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6549 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6550 } else {
6551 /* AEQ */
6552 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6553 eq->type_flag = HNS_ROCE_AEQ;
6554 eq->entries = hr_dev->caps.aeqe_depth;
6555 eq->eqe_size = hr_dev->caps.aeqe_size;
6556 eq->irq = hr_dev->irq[i - comp_num + other_num];
6557 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6558 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6559 }
6560
6561 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6562 if (ret) {
6563 dev_err(dev, "failed to create eq.\n");
6564 goto err_create_eq_fail;
6565 }
6566 }
6567
6568 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
6569
6570 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6571 if (!hr_dev->irq_workq) {
6572 dev_err(dev, "failed to create irq workqueue.\n");
6573 ret = -ENOMEM;
6574 goto err_create_eq_fail;
6575 }
6576
6577 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
6578 other_num);
6579 if (ret) {
6580 dev_err(dev, "failed to request irq.\n");
6581 goto err_request_irq_fail;
6582 }
6583
6584 /* enable irq */
6585 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6586
6587 return 0;
6588
6589 err_request_irq_fail:
6590 destroy_workqueue(hr_dev->irq_workq);
6591
6592 err_create_eq_fail:
6593 for (i -= 1; i >= 0; i--)
6594 hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
6595 kfree(eq_table->eq);
6596
6597 return ret;
6598 }
6599
hns_roce_v2_cleanup_eq_table(struct hns_roce_dev * hr_dev)6600 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6601 {
6602 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6603 int eq_num;
6604 int i;
6605
6606 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6607
6608 /* Disable irq */
6609 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6610
6611 __hns_roce_free_irq(hr_dev);
6612 destroy_workqueue(hr_dev->irq_workq);
6613
6614 for (i = 0; i < eq_num; i++)
6615 hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]);
6616
6617 kfree(eq_table->eq);
6618 }
6619
6620 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6621 .destroy_qp = hns_roce_v2_destroy_qp,
6622 .modify_cq = hns_roce_v2_modify_cq,
6623 .poll_cq = hns_roce_v2_poll_cq,
6624 .post_recv = hns_roce_v2_post_recv,
6625 .post_send = hns_roce_v2_post_send,
6626 .query_qp = hns_roce_v2_query_qp,
6627 .req_notify_cq = hns_roce_v2_req_notify_cq,
6628 };
6629
6630 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6631 .modify_srq = hns_roce_v2_modify_srq,
6632 .post_srq_recv = hns_roce_v2_post_srq_recv,
6633 .query_srq = hns_roce_v2_query_srq,
6634 };
6635
6636 static const struct hns_roce_hw hns_roce_hw_v2 = {
6637 .cmq_init = hns_roce_v2_cmq_init,
6638 .cmq_exit = hns_roce_v2_cmq_exit,
6639 .hw_profile = hns_roce_v2_profile,
6640 .hw_init = hns_roce_v2_init,
6641 .hw_exit = hns_roce_v2_exit,
6642 .post_mbox = v2_post_mbox,
6643 .poll_mbox_done = v2_poll_mbox_done,
6644 .chk_mbox_avail = v2_chk_mbox_is_avail,
6645 .set_gid = hns_roce_v2_set_gid,
6646 .set_mac = hns_roce_v2_set_mac,
6647 .write_mtpt = hns_roce_v2_write_mtpt,
6648 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6649 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6650 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6651 .write_cqc = hns_roce_v2_write_cqc,
6652 .set_hem = hns_roce_v2_set_hem,
6653 .clear_hem = hns_roce_v2_clear_hem,
6654 .modify_qp = hns_roce_v2_modify_qp,
6655 .dereg_mr = hns_roce_v2_dereg_mr,
6656 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6657 .init_eq = hns_roce_v2_init_eq_table,
6658 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6659 .write_srqc = hns_roce_v2_write_srqc,
6660 .query_cqc = hns_roce_v2_query_cqc,
6661 .query_qpc = hns_roce_v2_query_qpc,
6662 .query_mpt = hns_roce_v2_query_mpt,
6663 .query_hw_counter = hns_roce_hw_v2_query_counter,
6664 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6665 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6666 };
6667
6668 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6669 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6670 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6671 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6672 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6673 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6674 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6675 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6676 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6677 /* required last entry */
6678 {0, }
6679 };
6680
6681 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6682
hns_roce_hw_v2_get_cfg(struct hns_roce_dev * hr_dev,struct hnae3_handle * handle)6683 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6684 struct hnae3_handle *handle)
6685 {
6686 struct hns_roce_v2_priv *priv = hr_dev->priv;
6687 const struct pci_device_id *id;
6688 int i;
6689
6690 hr_dev->pci_dev = handle->pdev;
6691 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6692 hr_dev->is_vf = id->driver_data;
6693 hr_dev->dev = &handle->pdev->dev;
6694 hr_dev->hw = &hns_roce_hw_v2;
6695 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6696 hr_dev->odb_offset = hr_dev->sdb_offset;
6697
6698 /* Get info from NIC driver. */
6699 hr_dev->reg_base = handle->rinfo.roce_io_base;
6700 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6701 hr_dev->caps.num_ports = 1;
6702 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6703 hr_dev->iboe.phy_port[0] = 0;
6704
6705 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6706 hr_dev->iboe.netdevs[0]->dev_addr);
6707
6708 for (i = 0; i < handle->rinfo.num_vectors; i++)
6709 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6710 i + handle->rinfo.base_vector);
6711
6712 /* cmd issue mode: 0 is poll, 1 is event */
6713 hr_dev->cmd_mod = 1;
6714 hr_dev->loop_idc = 0;
6715
6716 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6717 priv->handle = handle;
6718 }
6719
__hns_roce_hw_v2_init_instance(struct hnae3_handle * handle)6720 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6721 {
6722 struct hns_roce_dev *hr_dev;
6723 int ret;
6724
6725 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6726 if (!hr_dev)
6727 return -ENOMEM;
6728
6729 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6730 if (!hr_dev->priv) {
6731 ret = -ENOMEM;
6732 goto error_failed_kzalloc;
6733 }
6734
6735 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6736
6737 ret = hns_roce_init(hr_dev);
6738 if (ret) {
6739 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6740 goto error_failed_roce_init;
6741 }
6742
6743 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6744 ret = free_mr_init(hr_dev);
6745 if (ret) {
6746 dev_err(hr_dev->dev, "failed to init free mr!\n");
6747 goto error_failed_free_mr_init;
6748 }
6749 }
6750
6751 handle->priv = hr_dev;
6752
6753 return 0;
6754
6755 error_failed_free_mr_init:
6756 hns_roce_exit(hr_dev);
6757
6758 error_failed_roce_init:
6759 kfree(hr_dev->priv);
6760
6761 error_failed_kzalloc:
6762 ib_dealloc_device(&hr_dev->ib_dev);
6763
6764 return ret;
6765 }
6766
__hns_roce_hw_v2_uninit_instance(struct hnae3_handle * handle,bool reset)6767 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6768 bool reset)
6769 {
6770 struct hns_roce_dev *hr_dev = handle->priv;
6771
6772 if (!hr_dev)
6773 return;
6774
6775 handle->priv = NULL;
6776
6777 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6778 hns_roce_handle_device_err(hr_dev);
6779
6780 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
6781 free_mr_exit(hr_dev);
6782
6783 hns_roce_exit(hr_dev);
6784 kfree(hr_dev->priv);
6785 ib_dealloc_device(&hr_dev->ib_dev);
6786 }
6787
hns_roce_hw_v2_init_instance(struct hnae3_handle * handle)6788 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6789 {
6790 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6791 const struct pci_device_id *id;
6792 struct device *dev = &handle->pdev->dev;
6793 int ret;
6794
6795 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6796
6797 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6798 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6799 goto reset_chk_err;
6800 }
6801
6802 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6803 if (!id)
6804 return 0;
6805
6806 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
6807 return 0;
6808
6809 ret = __hns_roce_hw_v2_init_instance(handle);
6810 if (ret) {
6811 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6812 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6813 if (ops->ae_dev_resetting(handle) ||
6814 ops->get_hw_reset_stat(handle))
6815 goto reset_chk_err;
6816 else
6817 return ret;
6818 }
6819
6820 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6821
6822 return 0;
6823
6824 reset_chk_err:
6825 dev_err(dev, "Device is busy in resetting state.\n"
6826 "please retry later.\n");
6827
6828 return -EBUSY;
6829 }
6830
hns_roce_hw_v2_uninit_instance(struct hnae3_handle * handle,bool reset)6831 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6832 bool reset)
6833 {
6834 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6835 return;
6836
6837 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6838
6839 __hns_roce_hw_v2_uninit_instance(handle, reset);
6840
6841 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6842 }
hns_roce_hw_v2_reset_notify_down(struct hnae3_handle * handle)6843 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6844 {
6845 struct hns_roce_dev *hr_dev;
6846
6847 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6848 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6849 return 0;
6850 }
6851
6852 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6853 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6854
6855 hr_dev = handle->priv;
6856 if (!hr_dev)
6857 return 0;
6858
6859 hr_dev->active = false;
6860 hr_dev->dis_db = true;
6861 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6862
6863 return 0;
6864 }
6865
hns_roce_hw_v2_reset_notify_init(struct hnae3_handle * handle)6866 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6867 {
6868 struct device *dev = &handle->pdev->dev;
6869 int ret;
6870
6871 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6872 &handle->rinfo.state)) {
6873 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6874 return 0;
6875 }
6876
6877 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6878
6879 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6880 ret = __hns_roce_hw_v2_init_instance(handle);
6881 if (ret) {
6882 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6883 * callback function, RoCE Engine reinitialize. If RoCE reinit
6884 * failed, we should inform NIC driver.
6885 */
6886 handle->priv = NULL;
6887 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6888 } else {
6889 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6890 dev_info(dev, "reset done, RoCE client reinit finished.\n");
6891 }
6892
6893 return ret;
6894 }
6895
hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle * handle)6896 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6897 {
6898 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6899 return 0;
6900
6901 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6902 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6903 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6904 __hns_roce_hw_v2_uninit_instance(handle, false);
6905
6906 return 0;
6907 }
6908
hns_roce_hw_v2_reset_notify(struct hnae3_handle * handle,enum hnae3_reset_notify_type type)6909 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6910 enum hnae3_reset_notify_type type)
6911 {
6912 int ret = 0;
6913
6914 switch (type) {
6915 case HNAE3_DOWN_CLIENT:
6916 ret = hns_roce_hw_v2_reset_notify_down(handle);
6917 break;
6918 case HNAE3_INIT_CLIENT:
6919 ret = hns_roce_hw_v2_reset_notify_init(handle);
6920 break;
6921 case HNAE3_UNINIT_CLIENT:
6922 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6923 break;
6924 default:
6925 break;
6926 }
6927
6928 return ret;
6929 }
6930
6931 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6932 .init_instance = hns_roce_hw_v2_init_instance,
6933 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6934 .reset_notify = hns_roce_hw_v2_reset_notify,
6935 };
6936
6937 static struct hnae3_client hns_roce_hw_v2_client = {
6938 .name = "hns_roce_hw_v2",
6939 .type = HNAE3_CLIENT_ROCE,
6940 .ops = &hns_roce_hw_v2_ops,
6941 };
6942
hns_roce_hw_v2_init(void)6943 static int __init hns_roce_hw_v2_init(void)
6944 {
6945 return hnae3_register_client(&hns_roce_hw_v2_client);
6946 }
6947
hns_roce_hw_v2_exit(void)6948 static void __exit hns_roce_hw_v2_exit(void)
6949 {
6950 hnae3_unregister_client(&hns_roce_hw_v2_client);
6951 }
6952
6953 module_init(hns_roce_hw_v2_init);
6954 module_exit(hns_roce_hw_v2_exit);
6955
6956 MODULE_LICENSE("Dual BSD/GPL");
6957 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6958 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6959 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6960 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6961