1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/acpi.h> 34 #include <linux/etherdevice.h> 35 #include <linux/interrupt.h> 36 #include <linux/iopoll.h> 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <net/addrconf.h> 40 #include <rdma/ib_addr.h> 41 #include <rdma/ib_cache.h> 42 #include <rdma/ib_umem.h> 43 #include <rdma/uverbs_ioctl.h> 44 45 #include "hnae3.h" 46 #include "hns_roce_common.h" 47 #include "hns_roce_device.h" 48 #include "hns_roce_cmd.h" 49 #include "hns_roce_hem.h" 50 #include "hns_roce_hw_v2.h" 51 52 enum { 53 CMD_RST_PRC_OTHERS, 54 CMD_RST_PRC_SUCCESS, 55 CMD_RST_PRC_EBUSY, 56 }; 57 58 enum ecc_resource_type { 59 ECC_RESOURCE_QPC, 60 ECC_RESOURCE_CQC, 61 ECC_RESOURCE_MPT, 62 ECC_RESOURCE_SRQC, 63 ECC_RESOURCE_GMV, 64 ECC_RESOURCE_QPC_TIMER, 65 ECC_RESOURCE_CQC_TIMER, 66 ECC_RESOURCE_SCCC, 67 ECC_RESOURCE_COUNT, 68 }; 69 70 static const struct { 71 const char *name; 72 u8 read_bt0_op; 73 u8 write_bt0_op; 74 } fmea_ram_res[] = { 75 { "ECC_RESOURCE_QPC", 76 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 }, 77 { "ECC_RESOURCE_CQC", 78 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 }, 79 { "ECC_RESOURCE_MPT", 80 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 }, 81 { "ECC_RESOURCE_SRQC", 82 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 }, 83 /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */ 84 { "ECC_RESOURCE_GMV", 85 0, 0 }, 86 { "ECC_RESOURCE_QPC_TIMER", 87 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 }, 88 { "ECC_RESOURCE_CQC_TIMER", 89 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 }, 90 { "ECC_RESOURCE_SCCC", 91 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 }, 92 }; 93 94 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, 95 struct ib_sge *sg) 96 { 97 dseg->lkey = cpu_to_le32(sg->lkey); 98 dseg->addr = cpu_to_le64(sg->addr); 99 dseg->len = cpu_to_le32(sg->length); 100 } 101 102 /* 103 * mapped-value = 1 + real-value 104 * The hns wr opcode real value is start from 0, In order to distinguish between 105 * initialized and uninitialized map values, we plus 1 to the actual value when 106 * defining the mapping, so that the validity can be identified by checking the 107 * mapped value is greater than 0. 108 */ 109 #define HR_OPC_MAP(ib_key, hr_key) \ 110 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key 111 112 static const u32 hns_roce_op_code[] = { 113 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE), 114 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM), 115 HR_OPC_MAP(SEND, SEND), 116 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM), 117 HR_OPC_MAP(RDMA_READ, RDMA_READ), 118 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), 119 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), 120 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), 121 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), 122 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), 123 HR_OPC_MAP(REG_MR, FAST_REG_PMR), 124 }; 125 126 static u32 to_hr_opcode(u32 ib_opcode) 127 { 128 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code)) 129 return HNS_ROCE_V2_WQE_OP_MASK; 130 131 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 : 132 HNS_ROCE_V2_WQE_OP_MASK; 133 } 134 135 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 136 const struct ib_reg_wr *wr) 137 { 138 struct hns_roce_wqe_frmr_seg *fseg = 139 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 140 struct hns_roce_mr *mr = to_hr_mr(wr->mr); 141 u64 pbl_ba; 142 143 /* use ib_access_flags */ 144 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND); 145 hr_reg_write_bool(fseg, FRMR_ATOMIC, 146 wr->access & IB_ACCESS_REMOTE_ATOMIC); 147 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ); 148 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE); 149 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE); 150 151 /* Data structure reuse may lead to confusion */ 152 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; 153 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); 154 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); 155 156 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); 157 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); 158 rc_sq_wqe->rkey = cpu_to_le32(wr->key); 159 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); 160 161 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages); 162 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, 163 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 164 hr_reg_clear(fseg, FRMR_BLK_MODE); 165 } 166 167 static void set_atomic_seg(const struct ib_send_wr *wr, 168 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 169 unsigned int valid_num_sge) 170 { 171 struct hns_roce_v2_wqe_data_seg *dseg = 172 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 173 struct hns_roce_wqe_atomic_seg *aseg = 174 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg); 175 176 set_data_seg_v2(dseg, wr->sg_list); 177 178 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 179 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap); 180 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add); 181 } else { 182 aseg->fetchadd_swap_data = 183 cpu_to_le64(atomic_wr(wr)->compare_add); 184 aseg->cmp_data = 0; 185 } 186 187 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); 188 } 189 190 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, 191 const struct ib_send_wr *wr, 192 unsigned int *sge_idx, u32 msg_len) 193 { 194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; 195 unsigned int left_len_in_pg; 196 unsigned int idx = *sge_idx; 197 unsigned int i = 0; 198 unsigned int len; 199 void *addr; 200 void *dseg; 201 202 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) { 203 ibdev_err(ibdev, 204 "no enough extended sge space for inline data.\n"); 205 return -EINVAL; 206 } 207 208 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); 209 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg; 210 len = wr->sg_list[0].length; 211 addr = (void *)(unsigned long)(wr->sg_list[0].addr); 212 213 /* When copying data to extended sge space, the left length in page may 214 * not long enough for current user's sge. So the data should be 215 * splited into several parts, one in the first page, and the others in 216 * the subsequent pages. 217 */ 218 while (1) { 219 if (len <= left_len_in_pg) { 220 memcpy(dseg, addr, len); 221 222 idx += len / HNS_ROCE_SGE_SIZE; 223 224 i++; 225 if (i >= wr->num_sge) 226 break; 227 228 left_len_in_pg -= len; 229 len = wr->sg_list[i].length; 230 addr = (void *)(unsigned long)(wr->sg_list[i].addr); 231 dseg += len; 232 } else { 233 memcpy(dseg, addr, left_len_in_pg); 234 235 len -= left_len_in_pg; 236 addr += left_len_in_pg; 237 idx += left_len_in_pg / HNS_ROCE_SGE_SIZE; 238 dseg = hns_roce_get_extend_sge(qp, 239 idx & (qp->sge.sge_cnt - 1)); 240 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; 241 } 242 } 243 244 *sge_idx = idx; 245 246 return 0; 247 } 248 249 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge, 250 unsigned int *sge_ind, unsigned int cnt) 251 { 252 struct hns_roce_v2_wqe_data_seg *dseg; 253 unsigned int idx = *sge_ind; 254 255 while (cnt > 0) { 256 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); 257 if (likely(sge->length)) { 258 set_data_seg_v2(dseg, sge); 259 idx++; 260 cnt--; 261 } 262 sge++; 263 } 264 265 *sge_ind = idx; 266 } 267 268 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) 269 { 270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 271 int mtu = ib_mtu_enum_to_int(qp->path_mtu); 272 273 if (mtu < 0 || len > qp->max_inline_data || len > mtu) { 274 ibdev_err(&hr_dev->ib_dev, 275 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n", 276 len, qp->max_inline_data, mtu); 277 return false; 278 } 279 280 return true; 281 } 282 283 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, 284 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 285 unsigned int *sge_idx) 286 { 287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 288 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len); 289 struct ib_device *ibdev = &hr_dev->ib_dev; 290 unsigned int curr_idx = *sge_idx; 291 void *dseg = rc_sq_wqe; 292 unsigned int i; 293 int ret; 294 295 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) { 296 ibdev_err(ibdev, "invalid inline parameters!\n"); 297 return -EINVAL; 298 } 299 300 if (!check_inl_data_len(qp, msg_len)) 301 return -EINVAL; 302 303 dseg += sizeof(struct hns_roce_v2_rc_send_wqe); 304 305 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { 306 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); 307 308 for (i = 0; i < wr->num_sge; i++) { 309 memcpy(dseg, ((void *)wr->sg_list[i].addr), 310 wr->sg_list[i].length); 311 dseg += wr->sg_list[i].length; 312 } 313 } else { 314 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); 315 316 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); 317 if (ret) 318 return ret; 319 320 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); 321 } 322 323 *sge_idx = curr_idx; 324 325 return 0; 326 } 327 328 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, 329 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 330 unsigned int *sge_ind, 331 unsigned int valid_num_sge) 332 { 333 struct hns_roce_v2_wqe_data_seg *dseg = 334 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 335 struct hns_roce_qp *qp = to_hr_qp(ibqp); 336 int j = 0; 337 int i; 338 339 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, 340 (*sge_ind) & (qp->sge.sge_cnt - 1)); 341 342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, 343 !!(wr->send_flags & IB_SEND_INLINE)); 344 if (wr->send_flags & IB_SEND_INLINE) 345 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); 346 347 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) { 348 for (i = 0; i < wr->num_sge; i++) { 349 if (likely(wr->sg_list[i].length)) { 350 set_data_seg_v2(dseg, wr->sg_list + i); 351 dseg++; 352 } 353 } 354 } else { 355 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) { 356 if (likely(wr->sg_list[i].length)) { 357 set_data_seg_v2(dseg, wr->sg_list + i); 358 dseg++; 359 j++; 360 } 361 } 362 363 set_extend_sge(qp, wr->sg_list + i, sge_ind, 364 valid_num_sge - HNS_ROCE_SGE_IN_WQE); 365 } 366 367 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); 368 369 return 0; 370 } 371 372 static int check_send_valid(struct hns_roce_dev *hr_dev, 373 struct hns_roce_qp *hr_qp) 374 { 375 struct ib_device *ibdev = &hr_dev->ib_dev; 376 377 if (unlikely(hr_qp->state == IB_QPS_RESET || 378 hr_qp->state == IB_QPS_INIT || 379 hr_qp->state == IB_QPS_RTR)) { 380 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n", 381 hr_qp->state); 382 return -EINVAL; 383 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { 384 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", 385 hr_dev->state); 386 return -EIO; 387 } 388 389 return 0; 390 } 391 392 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr, 393 unsigned int *sge_len) 394 { 395 unsigned int valid_num = 0; 396 unsigned int len = 0; 397 int i; 398 399 for (i = 0; i < wr->num_sge; i++) { 400 if (likely(wr->sg_list[i].length)) { 401 len += wr->sg_list[i].length; 402 valid_num++; 403 } 404 } 405 406 *sge_len = len; 407 return valid_num; 408 } 409 410 static __le32 get_immtdata(const struct ib_send_wr *wr) 411 { 412 switch (wr->opcode) { 413 case IB_WR_SEND_WITH_IMM: 414 case IB_WR_RDMA_WRITE_WITH_IMM: 415 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); 416 default: 417 return 0; 418 } 419 } 420 421 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, 422 const struct ib_send_wr *wr) 423 { 424 u32 ib_op = wr->opcode; 425 426 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM) 427 return -EINVAL; 428 429 ud_sq_wqe->immtdata = get_immtdata(wr); 430 431 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); 432 433 return 0; 434 } 435 436 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, 437 struct hns_roce_ah *ah) 438 { 439 struct ib_device *ib_dev = ah->ibah.device; 440 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 441 442 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); 443 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); 444 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); 445 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); 446 447 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL)) 448 return -EINVAL; 449 450 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); 451 452 ud_sq_wqe->sgid_index = ah->av.gid_index; 453 454 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN); 455 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2); 456 457 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 458 return 0; 459 460 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); 461 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); 462 463 return 0; 464 } 465 466 static inline int set_ud_wqe(struct hns_roce_qp *qp, 467 const struct ib_send_wr *wr, 468 void *wqe, unsigned int *sge_idx, 469 unsigned int owner_bit) 470 { 471 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); 472 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe; 473 unsigned int curr_idx = *sge_idx; 474 unsigned int valid_num_sge; 475 u32 msg_len = 0; 476 int ret; 477 478 valid_num_sge = calc_wr_sge_num(wr, &msg_len); 479 480 ret = set_ud_opcode(ud_sq_wqe, wr); 481 if (WARN_ON(ret)) 482 return ret; 483 484 ud_sq_wqe->msg_len = cpu_to_le32(msg_len); 485 486 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE, 487 !!(wr->send_flags & IB_SEND_SIGNALED)); 488 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE, 489 !!(wr->send_flags & IB_SEND_SOLICITED)); 490 491 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); 492 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge); 493 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX, 494 curr_idx & (qp->sge.sge_cnt - 1)); 495 496 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? 497 qp->qkey : ud_wr(wr)->remote_qkey); 498 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); 499 500 ret = fill_ud_av(ud_sq_wqe, ah); 501 if (ret) 502 return ret; 503 504 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl; 505 506 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge); 507 508 /* 509 * The pipeline can sequentially post all valid WQEs into WQ buffer, 510 * including new WQEs waiting for the doorbell to update the PI again. 511 * Therefore, the owner bit of WQE MUST be updated after all fields 512 * and extSGEs have been written into DDR instead of cache. 513 */ 514 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 515 dma_wmb(); 516 517 *sge_idx = curr_idx; 518 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit); 519 520 return 0; 521 } 522 523 static int set_rc_opcode(struct hns_roce_dev *hr_dev, 524 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 525 const struct ib_send_wr *wr) 526 { 527 u32 ib_op = wr->opcode; 528 int ret = 0; 529 530 rc_sq_wqe->immtdata = get_immtdata(wr); 531 532 switch (ib_op) { 533 case IB_WR_RDMA_READ: 534 case IB_WR_RDMA_WRITE: 535 case IB_WR_RDMA_WRITE_WITH_IMM: 536 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); 537 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); 538 break; 539 case IB_WR_SEND: 540 case IB_WR_SEND_WITH_IMM: 541 break; 542 case IB_WR_ATOMIC_CMP_AND_SWP: 543 case IB_WR_ATOMIC_FETCH_AND_ADD: 544 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); 545 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); 546 break; 547 case IB_WR_REG_MR: 548 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 549 set_frmr_seg(rc_sq_wqe, reg_wr(wr)); 550 else 551 ret = -EOPNOTSUPP; 552 break; 553 case IB_WR_SEND_WITH_INV: 554 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); 555 break; 556 default: 557 ret = -EINVAL; 558 } 559 560 if (unlikely(ret)) 561 return ret; 562 563 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); 564 565 return ret; 566 } 567 568 static inline int set_rc_wqe(struct hns_roce_qp *qp, 569 const struct ib_send_wr *wr, 570 void *wqe, unsigned int *sge_idx, 571 unsigned int owner_bit) 572 { 573 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 574 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; 575 unsigned int curr_idx = *sge_idx; 576 unsigned int valid_num_sge; 577 u32 msg_len = 0; 578 int ret; 579 580 valid_num_sge = calc_wr_sge_num(wr, &msg_len); 581 582 rc_sq_wqe->msg_len = cpu_to_le32(msg_len); 583 584 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr); 585 if (WARN_ON(ret)) 586 return ret; 587 588 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, 589 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); 590 591 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, 592 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); 593 594 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, 595 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); 596 597 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 598 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 599 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge); 600 else if (wr->opcode != IB_WR_REG_MR) 601 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, 602 &curr_idx, valid_num_sge); 603 604 /* 605 * The pipeline can sequentially post all valid WQEs into WQ buffer, 606 * including new WQEs waiting for the doorbell to update the PI again. 607 * Therefore, the owner bit of WQE MUST be updated after all fields 608 * and extSGEs have been written into DDR instead of cache. 609 */ 610 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 611 dma_wmb(); 612 613 *sge_idx = curr_idx; 614 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit); 615 616 return ret; 617 } 618 619 static inline void update_sq_db(struct hns_roce_dev *hr_dev, 620 struct hns_roce_qp *qp) 621 { 622 if (unlikely(qp->state == IB_QPS_ERR)) { 623 flush_cqe(hr_dev, qp); 624 } else { 625 struct hns_roce_v2_db sq_db = {}; 626 627 hr_reg_write(&sq_db, DB_TAG, qp->qpn); 628 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB); 629 hr_reg_write(&sq_db, DB_PI, qp->sq.head); 630 hr_reg_write(&sq_db, DB_SL, qp->sl); 631 632 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg); 633 } 634 } 635 636 static inline void update_rq_db(struct hns_roce_dev *hr_dev, 637 struct hns_roce_qp *qp) 638 { 639 if (unlikely(qp->state == IB_QPS_ERR)) { 640 flush_cqe(hr_dev, qp); 641 } else { 642 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) { 643 *qp->rdb.db_record = 644 qp->rq.head & V2_DB_PRODUCER_IDX_M; 645 } else { 646 struct hns_roce_v2_db rq_db = {}; 647 648 hr_reg_write(&rq_db, DB_TAG, qp->qpn); 649 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB); 650 hr_reg_write(&rq_db, DB_PI, qp->rq.head); 651 652 hns_roce_write64(hr_dev, (__le32 *)&rq_db, 653 qp->rq.db_reg); 654 } 655 } 656 } 657 658 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, 659 u64 __iomem *dest) 660 { 661 #define HNS_ROCE_WRITE_TIMES 8 662 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; 663 struct hnae3_handle *handle = priv->handle; 664 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 665 int i; 666 667 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) 668 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++) 669 writeq_relaxed(*(val + i), dest + i); 670 } 671 672 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, 673 void *wqe) 674 { 675 #define HNS_ROCE_SL_SHIFT 2 676 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; 677 678 /* All kinds of DirectWQE have the same header field layout */ 679 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); 680 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); 681 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H, 682 qp->sl >> HNS_ROCE_SL_SHIFT); 683 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); 684 685 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); 686 } 687 688 static int hns_roce_v2_post_send(struct ib_qp *ibqp, 689 const struct ib_send_wr *wr, 690 const struct ib_send_wr **bad_wr) 691 { 692 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 693 struct ib_device *ibdev = &hr_dev->ib_dev; 694 struct hns_roce_qp *qp = to_hr_qp(ibqp); 695 unsigned long flags = 0; 696 unsigned int owner_bit; 697 unsigned int sge_idx; 698 unsigned int wqe_idx; 699 void *wqe = NULL; 700 u32 nreq; 701 int ret; 702 703 spin_lock_irqsave(&qp->sq.lock, flags); 704 705 ret = check_send_valid(hr_dev, qp); 706 if (unlikely(ret)) { 707 *bad_wr = wr; 708 nreq = 0; 709 goto out; 710 } 711 712 sge_idx = qp->next_sge; 713 714 for (nreq = 0; wr; ++nreq, wr = wr->next) { 715 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 716 ret = -ENOMEM; 717 *bad_wr = wr; 718 goto out; 719 } 720 721 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); 722 723 if (unlikely(wr->num_sge > qp->sq.max_gs)) { 724 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n", 725 wr->num_sge, qp->sq.max_gs); 726 ret = -EINVAL; 727 *bad_wr = wr; 728 goto out; 729 } 730 731 wqe = hns_roce_get_send_wqe(qp, wqe_idx); 732 qp->sq.wrid[wqe_idx] = wr->wr_id; 733 owner_bit = 734 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 735 736 /* Corresponding to the QP type, wqe process separately */ 737 if (ibqp->qp_type == IB_QPT_RC) 738 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); 739 else 740 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); 741 742 if (unlikely(ret)) { 743 *bad_wr = wr; 744 goto out; 745 } 746 } 747 748 out: 749 if (likely(nreq)) { 750 qp->sq.head += nreq; 751 qp->next_sge = sge_idx; 752 753 if (nreq == 1 && !ret && 754 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)) 755 write_dwqe(hr_dev, qp, wqe); 756 else 757 update_sq_db(hr_dev, qp); 758 } 759 760 spin_unlock_irqrestore(&qp->sq.lock, flags); 761 762 return ret; 763 } 764 765 static int check_recv_valid(struct hns_roce_dev *hr_dev, 766 struct hns_roce_qp *hr_qp) 767 { 768 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) 769 return -EIO; 770 771 if (hr_qp->state == IB_QPS_RESET) 772 return -EINVAL; 773 774 return 0; 775 } 776 777 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe, 778 u32 max_sge, bool rsv) 779 { 780 struct hns_roce_v2_wqe_data_seg *dseg = wqe; 781 u32 i, cnt; 782 783 for (i = 0, cnt = 0; i < wr->num_sge; i++) { 784 /* Skip zero-length sge */ 785 if (!wr->sg_list[i].length) 786 continue; 787 set_data_seg_v2(dseg + cnt, wr->sg_list + i); 788 cnt++; 789 } 790 791 /* Fill a reserved sge to make hw stop reading remaining segments */ 792 if (rsv) { 793 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 794 dseg[cnt].addr = 0; 795 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); 796 } else { 797 /* Clear remaining segments to make ROCEE ignore sges */ 798 if (cnt < max_sge) 799 memset(dseg + cnt, 0, 800 (max_sge - cnt) * HNS_ROCE_SGE_SIZE); 801 } 802 } 803 804 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr, 805 u32 wqe_idx, u32 max_sge) 806 { 807 void *wqe = NULL; 808 809 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); 810 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge); 811 } 812 813 static int hns_roce_v2_post_recv(struct ib_qp *ibqp, 814 const struct ib_recv_wr *wr, 815 const struct ib_recv_wr **bad_wr) 816 { 817 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 818 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 819 struct ib_device *ibdev = &hr_dev->ib_dev; 820 u32 wqe_idx, nreq, max_sge; 821 unsigned long flags; 822 int ret; 823 824 spin_lock_irqsave(&hr_qp->rq.lock, flags); 825 826 ret = check_recv_valid(hr_dev, hr_qp); 827 if (unlikely(ret)) { 828 *bad_wr = wr; 829 nreq = 0; 830 goto out; 831 } 832 833 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 834 for (nreq = 0; wr; ++nreq, wr = wr->next) { 835 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq, 836 hr_qp->ibqp.recv_cq))) { 837 ret = -ENOMEM; 838 *bad_wr = wr; 839 goto out; 840 } 841 842 if (unlikely(wr->num_sge > max_sge)) { 843 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n", 844 wr->num_sge, max_sge); 845 ret = -EINVAL; 846 *bad_wr = wr; 847 goto out; 848 } 849 850 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); 851 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge); 852 hr_qp->rq.wrid[wqe_idx] = wr->wr_id; 853 } 854 855 out: 856 if (likely(nreq)) { 857 hr_qp->rq.head += nreq; 858 859 update_rq_db(hr_dev, hr_qp); 860 } 861 spin_unlock_irqrestore(&hr_qp->rq.lock, flags); 862 863 return ret; 864 } 865 866 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n) 867 { 868 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); 869 } 870 871 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n) 872 { 873 return hns_roce_buf_offset(idx_que->mtr.kmem, 874 n << idx_que->entry_shift); 875 } 876 877 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index) 878 { 879 /* always called with interrupts disabled. */ 880 spin_lock(&srq->lock); 881 882 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); 883 srq->idx_que.tail++; 884 885 spin_unlock(&srq->lock); 886 } 887 888 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq) 889 { 890 struct hns_roce_idx_que *idx_que = &srq->idx_que; 891 892 return idx_que->head - idx_que->tail >= srq->wqe_cnt; 893 } 894 895 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge, 896 const struct ib_recv_wr *wr) 897 { 898 struct ib_device *ib_dev = srq->ibsrq.device; 899 900 if (unlikely(wr->num_sge > max_sge)) { 901 ibdev_err(ib_dev, 902 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n", 903 wr->num_sge, max_sge); 904 return -EINVAL; 905 } 906 907 if (unlikely(hns_roce_srqwq_overflow(srq))) { 908 ibdev_err(ib_dev, 909 "failed to check srqwq status, srqwq is full.\n"); 910 return -ENOMEM; 911 } 912 913 return 0; 914 } 915 916 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx) 917 { 918 struct hns_roce_idx_que *idx_que = &srq->idx_que; 919 u32 pos; 920 921 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt); 922 if (unlikely(pos == srq->wqe_cnt)) 923 return -ENOSPC; 924 925 bitmap_set(idx_que->bitmap, pos, 1); 926 *wqe_idx = pos; 927 return 0; 928 } 929 930 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) 931 { 932 struct hns_roce_idx_que *idx_que = &srq->idx_que; 933 unsigned int head; 934 __le32 *buf; 935 936 head = idx_que->head & (srq->wqe_cnt - 1); 937 938 buf = get_idx_buf(idx_que, head); 939 *buf = cpu_to_le32(wqe_idx); 940 941 idx_que->head++; 942 } 943 944 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq) 945 { 946 hr_reg_write(db, DB_TAG, srq->srqn); 947 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB); 948 hr_reg_write(db, DB_PI, srq->idx_que.head); 949 } 950 951 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, 952 const struct ib_recv_wr *wr, 953 const struct ib_recv_wr **bad_wr) 954 { 955 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 956 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 957 struct hns_roce_v2_db srq_db; 958 unsigned long flags; 959 int ret = 0; 960 u32 max_sge; 961 u32 wqe_idx; 962 void *wqe; 963 u32 nreq; 964 965 spin_lock_irqsave(&srq->lock, flags); 966 967 max_sge = srq->max_gs - srq->rsv_sge; 968 for (nreq = 0; wr; ++nreq, wr = wr->next) { 969 ret = check_post_srq_valid(srq, max_sge, wr); 970 if (ret) { 971 *bad_wr = wr; 972 break; 973 } 974 975 ret = get_srq_wqe_idx(srq, &wqe_idx); 976 if (unlikely(ret)) { 977 *bad_wr = wr; 978 break; 979 } 980 981 wqe = get_srq_wqe_buf(srq, wqe_idx); 982 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge); 983 fill_wqe_idx(srq, wqe_idx); 984 srq->wrid[wqe_idx] = wr->wr_id; 985 } 986 987 if (likely(nreq)) { 988 update_srq_db(&srq_db, srq); 989 990 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg); 991 } 992 993 spin_unlock_irqrestore(&srq->lock, flags); 994 995 return ret; 996 } 997 998 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev, 999 unsigned long instance_stage, 1000 unsigned long reset_stage) 1001 { 1002 /* When hardware reset has been completed once or more, we should stop 1003 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance() 1004 * function, we should exit with error. If now at HNAE3_INIT_CLIENT 1005 * stage of soft reset process, we should exit with error, and then 1006 * HNAE3_INIT_CLIENT related process can rollback the operation like 1007 * notifing hardware to free resources, HNAE3_INIT_CLIENT related 1008 * process will exit with error to notify NIC driver to reschedule soft 1009 * reset process once again. 1010 */ 1011 hr_dev->is_reset = true; 1012 hr_dev->dis_db = true; 1013 1014 if (reset_stage == HNS_ROCE_STATE_RST_INIT || 1015 instance_stage == HNS_ROCE_STATE_INIT) 1016 return CMD_RST_PRC_EBUSY; 1017 1018 return CMD_RST_PRC_SUCCESS; 1019 } 1020 1021 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, 1022 unsigned long instance_stage, 1023 unsigned long reset_stage) 1024 { 1025 #define HW_RESET_TIMEOUT_US 1000000 1026 #define HW_RESET_SLEEP_US 1000 1027 1028 struct hns_roce_v2_priv *priv = hr_dev->priv; 1029 struct hnae3_handle *handle = priv->handle; 1030 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1031 unsigned long val; 1032 int ret; 1033 1034 /* When hardware reset is detected, we should stop sending mailbox&cmq& 1035 * doorbell to hardware. If now in .init_instance() function, we should 1036 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset 1037 * process, we should exit with error, and then HNAE3_INIT_CLIENT 1038 * related process can rollback the operation like notifing hardware to 1039 * free resources, HNAE3_INIT_CLIENT related process will exit with 1040 * error to notify NIC driver to reschedule soft reset process once 1041 * again. 1042 */ 1043 hr_dev->dis_db = true; 1044 1045 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val, 1046 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US, 1047 HW_RESET_TIMEOUT_US, false, handle); 1048 if (!ret) 1049 hr_dev->is_reset = true; 1050 1051 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || 1052 instance_stage == HNS_ROCE_STATE_INIT) 1053 return CMD_RST_PRC_EBUSY; 1054 1055 return CMD_RST_PRC_SUCCESS; 1056 } 1057 1058 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev) 1059 { 1060 struct hns_roce_v2_priv *priv = hr_dev->priv; 1061 struct hnae3_handle *handle = priv->handle; 1062 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1063 1064 /* When software reset is detected at .init_instance() function, we 1065 * should stop sending mailbox&cmq&doorbell to hardware, and exit 1066 * with error. 1067 */ 1068 hr_dev->dis_db = true; 1069 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) 1070 hr_dev->is_reset = true; 1071 1072 return CMD_RST_PRC_EBUSY; 1073 } 1074 1075 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev, 1076 struct hnae3_handle *handle) 1077 { 1078 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1079 unsigned long instance_stage; /* the current instance stage */ 1080 unsigned long reset_stage; /* the current reset stage */ 1081 unsigned long reset_cnt; 1082 bool sw_resetting; 1083 bool hw_resetting; 1084 1085 /* Get information about reset from NIC driver or RoCE driver itself, 1086 * the meaning of the following variables from NIC driver are described 1087 * as below: 1088 * reset_cnt -- The count value of completed hardware reset. 1089 * hw_resetting -- Whether hardware device is resetting now. 1090 * sw_resetting -- Whether NIC's software reset process is running now. 1091 */ 1092 instance_stage = handle->rinfo.instance_state; 1093 reset_stage = handle->rinfo.reset_state; 1094 reset_cnt = ops->ae_dev_reset_cnt(handle); 1095 if (reset_cnt != hr_dev->reset_cnt) 1096 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage, 1097 reset_stage); 1098 1099 hw_resetting = ops->get_cmdq_stat(handle); 1100 if (hw_resetting) 1101 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage, 1102 reset_stage); 1103 1104 sw_resetting = ops->ae_dev_resetting(handle); 1105 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) 1106 return hns_roce_v2_cmd_sw_resetting(hr_dev); 1107 1108 return CMD_RST_PRC_OTHERS; 1109 } 1110 1111 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev) 1112 { 1113 struct hns_roce_v2_priv *priv = hr_dev->priv; 1114 struct hnae3_handle *handle = priv->handle; 1115 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1116 1117 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle)) 1118 return true; 1119 1120 if (ops->get_hw_reset_stat(handle)) 1121 return true; 1122 1123 if (ops->ae_dev_resetting(handle)) 1124 return true; 1125 1126 return false; 1127 } 1128 1129 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy) 1130 { 1131 struct hns_roce_v2_priv *priv = hr_dev->priv; 1132 u32 status; 1133 1134 if (hr_dev->is_reset) 1135 status = CMD_RST_PRC_SUCCESS; 1136 else 1137 status = check_aedev_reset_status(hr_dev, priv->handle); 1138 1139 *busy = (status == CMD_RST_PRC_EBUSY); 1140 1141 return status == CMD_RST_PRC_OTHERS; 1142 } 1143 1144 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev, 1145 struct hns_roce_v2_cmq_ring *ring) 1146 { 1147 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc); 1148 1149 ring->desc = dma_alloc_coherent(hr_dev->dev, size, 1150 &ring->desc_dma_addr, GFP_KERNEL); 1151 if (!ring->desc) 1152 return -ENOMEM; 1153 1154 return 0; 1155 } 1156 1157 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, 1158 struct hns_roce_v2_cmq_ring *ring) 1159 { 1160 dma_free_coherent(hr_dev->dev, 1161 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 1162 ring->desc, ring->desc_dma_addr); 1163 1164 ring->desc_dma_addr = 0; 1165 } 1166 1167 static int init_csq(struct hns_roce_dev *hr_dev, 1168 struct hns_roce_v2_cmq_ring *csq) 1169 { 1170 dma_addr_t dma; 1171 int ret; 1172 1173 csq->desc_num = CMD_CSQ_DESC_NUM; 1174 spin_lock_init(&csq->lock); 1175 csq->flag = TYPE_CSQ; 1176 csq->head = 0; 1177 1178 ret = hns_roce_alloc_cmq_desc(hr_dev, csq); 1179 if (ret) 1180 return ret; 1181 1182 dma = csq->desc_dma_addr; 1183 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma)); 1184 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); 1185 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, 1186 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); 1187 1188 /* Make sure to write CI first and then PI */ 1189 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0); 1190 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0); 1191 1192 return 0; 1193 } 1194 1195 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev) 1196 { 1197 struct hns_roce_v2_priv *priv = hr_dev->priv; 1198 int ret; 1199 1200 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT; 1201 1202 ret = init_csq(hr_dev, &priv->cmq.csq); 1203 if (ret) 1204 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret); 1205 1206 return ret; 1207 } 1208 1209 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev) 1210 { 1211 struct hns_roce_v2_priv *priv = hr_dev->priv; 1212 1213 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq); 1214 } 1215 1216 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, 1217 enum hns_roce_opcode_type opcode, 1218 bool is_read) 1219 { 1220 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc)); 1221 desc->opcode = cpu_to_le16(opcode); 1222 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); 1223 if (is_read) 1224 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR); 1225 else 1226 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); 1227 } 1228 1229 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) 1230 { 1231 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); 1232 struct hns_roce_v2_priv *priv = hr_dev->priv; 1233 1234 return tail == priv->cmq.csq.head; 1235 } 1236 1237 static void update_cmdq_status(struct hns_roce_dev *hr_dev) 1238 { 1239 struct hns_roce_v2_priv *priv = hr_dev->priv; 1240 struct hnae3_handle *handle = priv->handle; 1241 1242 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || 1243 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) 1244 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; 1245 } 1246 1247 static int hns_roce_cmd_err_convert_errno(u16 desc_ret) 1248 { 1249 struct hns_roce_cmd_errcode errcode_table[] = { 1250 {CMD_EXEC_SUCCESS, 0}, 1251 {CMD_NO_AUTH, -EPERM}, 1252 {CMD_NOT_EXIST, -EOPNOTSUPP}, 1253 {CMD_CRQ_FULL, -EXFULL}, 1254 {CMD_NEXT_ERR, -ENOSR}, 1255 {CMD_NOT_EXEC, -ENOTBLK}, 1256 {CMD_PARA_ERR, -EINVAL}, 1257 {CMD_RESULT_ERR, -ERANGE}, 1258 {CMD_TIMEOUT, -ETIME}, 1259 {CMD_HILINK_ERR, -ENOLINK}, 1260 {CMD_INFO_ILLEGAL, -ENXIO}, 1261 {CMD_INVALID, -EBADR}, 1262 }; 1263 u16 i; 1264 1265 for (i = 0; i < ARRAY_SIZE(errcode_table); i++) 1266 if (desc_ret == errcode_table[i].return_status) 1267 return errcode_table[i].errno; 1268 return -EIO; 1269 } 1270 1271 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, 1272 struct hns_roce_cmq_desc *desc, int num) 1273 { 1274 struct hns_roce_v2_priv *priv = hr_dev->priv; 1275 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; 1276 u32 timeout = 0; 1277 u16 desc_ret; 1278 u32 tail; 1279 int ret; 1280 int i; 1281 1282 spin_lock_bh(&csq->lock); 1283 1284 tail = csq->head; 1285 1286 for (i = 0; i < num; i++) { 1287 csq->desc[csq->head++] = desc[i]; 1288 if (csq->head == csq->desc_num) 1289 csq->head = 0; 1290 } 1291 1292 /* Write to hardware */ 1293 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head); 1294 1295 do { 1296 if (hns_roce_cmq_csq_done(hr_dev)) 1297 break; 1298 udelay(1); 1299 } while (++timeout < priv->cmq.tx_timeout); 1300 1301 if (hns_roce_cmq_csq_done(hr_dev)) { 1302 ret = 0; 1303 for (i = 0; i < num; i++) { 1304 /* check the result of hardware write back */ 1305 desc[i] = csq->desc[tail++]; 1306 if (tail == csq->desc_num) 1307 tail = 0; 1308 1309 desc_ret = le16_to_cpu(desc[i].retval); 1310 if (likely(desc_ret == CMD_EXEC_SUCCESS)) 1311 continue; 1312 1313 dev_err_ratelimited(hr_dev->dev, 1314 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", 1315 desc->opcode, desc_ret); 1316 ret = hns_roce_cmd_err_convert_errno(desc_ret); 1317 } 1318 } else { 1319 /* FW/HW reset or incorrect number of desc */ 1320 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); 1321 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", 1322 csq->head, tail); 1323 csq->head = tail; 1324 1325 update_cmdq_status(hr_dev); 1326 1327 ret = -EAGAIN; 1328 } 1329 1330 spin_unlock_bh(&csq->lock); 1331 1332 return ret; 1333 } 1334 1335 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, 1336 struct hns_roce_cmq_desc *desc, int num) 1337 { 1338 bool busy; 1339 int ret; 1340 1341 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 1342 return -EIO; 1343 1344 if (!v2_chk_mbox_is_avail(hr_dev, &busy)) 1345 return busy ? -EBUSY : 0; 1346 1347 ret = __hns_roce_cmq_send(hr_dev, desc, num); 1348 if (ret) { 1349 if (!v2_chk_mbox_is_avail(hr_dev, &busy)) 1350 return busy ? -EBUSY : 0; 1351 } 1352 1353 return ret; 1354 } 1355 1356 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, 1357 dma_addr_t base_addr, u8 cmd, unsigned long tag) 1358 { 1359 struct hns_roce_cmd_mailbox *mbox; 1360 int ret; 1361 1362 mbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1363 if (IS_ERR(mbox)) 1364 return PTR_ERR(mbox); 1365 1366 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); 1367 hns_roce_free_cmd_mailbox(hr_dev, mbox); 1368 return ret; 1369 } 1370 1371 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) 1372 { 1373 struct hns_roce_query_version *resp; 1374 struct hns_roce_cmq_desc desc; 1375 int ret; 1376 1377 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true); 1378 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1379 if (ret) 1380 return ret; 1381 1382 resp = (struct hns_roce_query_version *)desc.data; 1383 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); 1384 hr_dev->vendor_id = hr_dev->pci_dev->vendor; 1385 1386 return 0; 1387 } 1388 1389 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev, 1390 struct hnae3_handle *handle) 1391 { 1392 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1393 unsigned long end; 1394 1395 hr_dev->dis_db = true; 1396 1397 dev_warn(hr_dev->dev, 1398 "func clear is pending, device in resetting state.\n"); 1399 end = HNS_ROCE_V2_HW_RST_TIMEOUT; 1400 while (end) { 1401 if (!ops->get_hw_reset_stat(handle)) { 1402 hr_dev->is_reset = true; 1403 dev_info(hr_dev->dev, 1404 "func clear success after reset.\n"); 1405 return; 1406 } 1407 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); 1408 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; 1409 } 1410 1411 dev_warn(hr_dev->dev, "func clear failed.\n"); 1412 } 1413 1414 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev, 1415 struct hnae3_handle *handle) 1416 { 1417 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1418 unsigned long end; 1419 1420 hr_dev->dis_db = true; 1421 1422 dev_warn(hr_dev->dev, 1423 "func clear is pending, device in resetting state.\n"); 1424 end = HNS_ROCE_V2_HW_RST_TIMEOUT; 1425 while (end) { 1426 if (ops->ae_dev_reset_cnt(handle) != 1427 hr_dev->reset_cnt) { 1428 hr_dev->is_reset = true; 1429 dev_info(hr_dev->dev, 1430 "func clear success after sw reset\n"); 1431 return; 1432 } 1433 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); 1434 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; 1435 } 1436 1437 dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n"); 1438 } 1439 1440 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, 1441 int flag) 1442 { 1443 struct hns_roce_v2_priv *priv = hr_dev->priv; 1444 struct hnae3_handle *handle = priv->handle; 1445 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1446 1447 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { 1448 hr_dev->dis_db = true; 1449 hr_dev->is_reset = true; 1450 dev_info(hr_dev->dev, "func clear success after reset.\n"); 1451 return; 1452 } 1453 1454 if (ops->get_hw_reset_stat(handle)) { 1455 func_clr_hw_resetting_state(hr_dev, handle); 1456 return; 1457 } 1458 1459 if (ops->ae_dev_resetting(handle) && 1460 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) { 1461 func_clr_sw_resetting_state(hr_dev, handle); 1462 return; 1463 } 1464 1465 if (retval && !flag) 1466 dev_warn(hr_dev->dev, 1467 "func clear read failed, ret = %d.\n", retval); 1468 1469 dev_warn(hr_dev->dev, "func clear failed.\n"); 1470 } 1471 1472 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) 1473 { 1474 bool fclr_write_fail_flag = false; 1475 struct hns_roce_func_clear *resp; 1476 struct hns_roce_cmq_desc desc; 1477 unsigned long end; 1478 int ret = 0; 1479 1480 if (check_device_is_in_reset(hr_dev)) 1481 goto out; 1482 1483 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); 1484 resp = (struct hns_roce_func_clear *)desc.data; 1485 resp->rst_funcid_en = cpu_to_le32(vf_id); 1486 1487 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1488 if (ret) { 1489 fclr_write_fail_flag = true; 1490 dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n", 1491 ret); 1492 goto out; 1493 } 1494 1495 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); 1496 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; 1497 while (end) { 1498 if (check_device_is_in_reset(hr_dev)) 1499 goto out; 1500 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); 1501 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; 1502 1503 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, 1504 true); 1505 1506 resp->rst_funcid_en = cpu_to_le32(vf_id); 1507 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1508 if (ret) 1509 continue; 1510 1511 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) { 1512 if (vf_id == 0) 1513 hr_dev->is_reset = true; 1514 return; 1515 } 1516 } 1517 1518 out: 1519 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag); 1520 } 1521 1522 static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) 1523 { 1524 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; 1525 struct hns_roce_cmq_desc desc[2]; 1526 struct hns_roce_cmq_req *req_a; 1527 1528 req_a = (struct hns_roce_cmq_req *)desc[0].data; 1529 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 1530 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1531 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 1532 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id); 1533 1534 return hns_roce_cmq_send(hr_dev, desc, 2); 1535 } 1536 1537 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) 1538 { 1539 int ret; 1540 int i; 1541 1542 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 1543 return; 1544 1545 for (i = hr_dev->func_num - 1; i >= 0; i--) { 1546 __hns_roce_function_clear(hr_dev, i); 1547 1548 if (i == 0) 1549 continue; 1550 1551 ret = hns_roce_free_vf_resource(hr_dev, i); 1552 if (ret) 1553 ibdev_err(&hr_dev->ib_dev, 1554 "failed to free vf resource, vf_id = %d, ret = %d.\n", 1555 i, ret); 1556 } 1557 } 1558 1559 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev) 1560 { 1561 struct hns_roce_cmq_desc desc; 1562 int ret; 1563 1564 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO, 1565 false); 1566 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1567 if (ret) 1568 ibdev_err(&hr_dev->ib_dev, 1569 "failed to clear extended doorbell info, ret = %d.\n", 1570 ret); 1571 1572 return ret; 1573 } 1574 1575 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) 1576 { 1577 struct hns_roce_query_fw_info *resp; 1578 struct hns_roce_cmq_desc desc; 1579 int ret; 1580 1581 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true); 1582 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1583 if (ret) 1584 return ret; 1585 1586 resp = (struct hns_roce_query_fw_info *)desc.data; 1587 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); 1588 1589 return 0; 1590 } 1591 1592 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) 1593 { 1594 struct hns_roce_cmq_desc desc; 1595 int ret; 1596 1597 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 1598 hr_dev->func_num = 1; 1599 return 0; 1600 } 1601 1602 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO, 1603 true); 1604 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1605 if (ret) { 1606 hr_dev->func_num = 1; 1607 return ret; 1608 } 1609 1610 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num); 1611 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id); 1612 1613 return 0; 1614 } 1615 1616 static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev, 1617 u64 *stats, u32 port, int *num_counters) 1618 { 1619 #define CNT_PER_DESC 3 1620 struct hns_roce_cmq_desc *desc; 1621 int bd_idx, cnt_idx; 1622 __le64 *cnt_data; 1623 int desc_num; 1624 int ret; 1625 int i; 1626 1627 if (port > hr_dev->caps.num_ports) 1628 return -EINVAL; 1629 1630 desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC); 1631 desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL); 1632 if (!desc) 1633 return -ENOMEM; 1634 1635 for (i = 0; i < desc_num; i++) { 1636 hns_roce_cmq_setup_basic_desc(&desc[i], 1637 HNS_ROCE_OPC_QUERY_COUNTER, true); 1638 if (i != desc_num - 1) 1639 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1640 } 1641 1642 ret = hns_roce_cmq_send(hr_dev, desc, desc_num); 1643 if (ret) { 1644 ibdev_err(&hr_dev->ib_dev, 1645 "failed to get counter, ret = %d.\n", ret); 1646 goto err_out; 1647 } 1648 1649 for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) { 1650 bd_idx = i / CNT_PER_DESC; 1651 if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) && 1652 bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC) 1653 break; 1654 1655 cnt_data = (__le64 *)&desc[bd_idx].data[0]; 1656 cnt_idx = i % CNT_PER_DESC; 1657 stats[i] = le64_to_cpu(cnt_data[cnt_idx]); 1658 } 1659 *num_counters = i; 1660 1661 err_out: 1662 kfree(desc); 1663 return ret; 1664 } 1665 1666 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) 1667 { 1668 struct hns_roce_cmq_desc desc; 1669 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1670 u32 clock_cycles_of_1us; 1671 1672 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, 1673 false); 1674 1675 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 1676 clock_cycles_of_1us = HNS_ROCE_1NS_CFG; 1677 else 1678 clock_cycles_of_1us = HNS_ROCE_1US_CFG; 1679 1680 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us); 1681 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT); 1682 1683 return hns_roce_cmq_send(hr_dev, &desc, 1); 1684 } 1685 1686 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf) 1687 { 1688 struct hns_roce_cmq_desc desc[2]; 1689 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 1690 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 1691 struct hns_roce_caps *caps = &hr_dev->caps; 1692 enum hns_roce_opcode_type opcode; 1693 u32 func_num; 1694 int ret; 1695 1696 if (is_vf) { 1697 opcode = HNS_ROCE_OPC_QUERY_VF_RES; 1698 func_num = 1; 1699 } else { 1700 opcode = HNS_ROCE_OPC_QUERY_PF_RES; 1701 func_num = hr_dev->func_num; 1702 } 1703 1704 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true); 1705 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1706 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true); 1707 1708 ret = hns_roce_cmq_send(hr_dev, desc, 2); 1709 if (ret) 1710 return ret; 1711 1712 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num; 1713 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num; 1714 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num; 1715 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num; 1716 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num; 1717 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num; 1718 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num; 1719 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num; 1720 1721 if (is_vf) { 1722 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num; 1723 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) / 1724 func_num; 1725 } else { 1726 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num; 1727 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) / 1728 func_num; 1729 } 1730 1731 return 0; 1732 } 1733 1734 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev) 1735 { 1736 struct hns_roce_cmq_desc desc; 1737 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1738 struct hns_roce_caps *caps = &hr_dev->caps; 1739 int ret; 1740 1741 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, 1742 true); 1743 1744 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1745 if (ret) 1746 return ret; 1747 1748 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM); 1749 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM); 1750 1751 return 0; 1752 } 1753 1754 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) 1755 { 1756 struct device *dev = hr_dev->dev; 1757 int ret; 1758 1759 ret = load_func_res_caps(hr_dev, false); 1760 if (ret) { 1761 dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret); 1762 return ret; 1763 } 1764 1765 ret = load_pf_timer_res_caps(hr_dev); 1766 if (ret) 1767 dev_err(dev, "failed to load pf timer resource, ret = %d.\n", 1768 ret); 1769 1770 return ret; 1771 } 1772 1773 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev) 1774 { 1775 struct device *dev = hr_dev->dev; 1776 int ret; 1777 1778 ret = load_func_res_caps(hr_dev, true); 1779 if (ret) 1780 dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret); 1781 1782 return ret; 1783 } 1784 1785 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, 1786 u32 vf_id) 1787 { 1788 struct hns_roce_vf_switch *swt; 1789 struct hns_roce_cmq_desc desc; 1790 int ret; 1791 1792 swt = (struct hns_roce_vf_switch *)desc.data; 1793 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); 1794 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); 1795 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id); 1796 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1797 if (ret) 1798 return ret; 1799 1800 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); 1801 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); 1802 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK); 1803 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK); 1804 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD); 1805 1806 return hns_roce_cmq_send(hr_dev, &desc, 1); 1807 } 1808 1809 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev) 1810 { 1811 u32 vf_id; 1812 int ret; 1813 1814 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) { 1815 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id); 1816 if (ret) 1817 return ret; 1818 } 1819 return 0; 1820 } 1821 1822 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id) 1823 { 1824 struct hns_roce_cmq_desc desc[2]; 1825 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 1826 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 1827 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; 1828 struct hns_roce_caps *caps = &hr_dev->caps; 1829 1830 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 1831 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1832 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 1833 1834 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id); 1835 1836 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num); 1837 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num); 1838 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num); 1839 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num); 1840 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num); 1841 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num); 1842 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num); 1843 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num); 1844 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num); 1845 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num); 1846 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num); 1847 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num); 1848 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num); 1849 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num); 1850 1851 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 1852 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num); 1853 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX, 1854 vf_id * caps->gmv_bt_num); 1855 } else { 1856 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num); 1857 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX, 1858 vf_id * caps->sgid_bt_num); 1859 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num); 1860 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX, 1861 vf_id * caps->smac_bt_num); 1862 } 1863 1864 return hns_roce_cmq_send(hr_dev, desc, 2); 1865 } 1866 1867 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) 1868 { 1869 u32 func_num = max_t(u32, 1, hr_dev->func_num); 1870 u32 vf_id; 1871 int ret; 1872 1873 for (vf_id = 0; vf_id < func_num; vf_id++) { 1874 ret = config_vf_hem_resource(hr_dev, vf_id); 1875 if (ret) { 1876 dev_err(hr_dev->dev, 1877 "failed to config vf-%u hem res, ret = %d.\n", 1878 vf_id, ret); 1879 return ret; 1880 } 1881 } 1882 1883 return 0; 1884 } 1885 1886 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) 1887 { 1888 struct hns_roce_cmq_desc desc; 1889 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1890 struct hns_roce_caps *caps = &hr_dev->caps; 1891 1892 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false); 1893 1894 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ, 1895 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET); 1896 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ, 1897 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET); 1898 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM, 1899 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps)); 1900 1901 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ, 1902 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET); 1903 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ, 1904 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET); 1905 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM, 1906 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs)); 1907 1908 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ, 1909 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET); 1910 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ, 1911 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET); 1912 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM, 1913 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs)); 1914 1915 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ, 1916 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET); 1917 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ, 1918 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET); 1919 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM, 1920 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts)); 1921 1922 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ, 1923 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET); 1924 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ, 1925 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET); 1926 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM, 1927 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps)); 1928 1929 return hns_roce_cmq_send(hr_dev, &desc, 1); 1930 } 1931 1932 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num, 1933 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type) 1934 { 1935 u64 obj_per_chunk; 1936 u64 bt_chunk_size = PAGE_SIZE; 1937 u64 buf_chunk_size = PAGE_SIZE; 1938 u64 obj_per_chunk_default = buf_chunk_size / obj_size; 1939 1940 *buf_page_size = 0; 1941 *bt_page_size = 0; 1942 1943 switch (hop_num) { 1944 case 3: 1945 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 1946 (bt_chunk_size / BA_BYTE_LEN) * 1947 (bt_chunk_size / BA_BYTE_LEN) * 1948 obj_per_chunk_default; 1949 break; 1950 case 2: 1951 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 1952 (bt_chunk_size / BA_BYTE_LEN) * 1953 obj_per_chunk_default; 1954 break; 1955 case 1: 1956 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 1957 obj_per_chunk_default; 1958 break; 1959 case HNS_ROCE_HOP_NUM_0: 1960 obj_per_chunk = ctx_bt_num * obj_per_chunk_default; 1961 break; 1962 default: 1963 pr_err("table %u not support hop_num = %u!\n", hem_type, 1964 hop_num); 1965 return; 1966 } 1967 1968 if (hem_type >= HEM_TYPE_MTT) 1969 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); 1970 else 1971 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); 1972 } 1973 1974 static void set_hem_page_size(struct hns_roce_dev *hr_dev) 1975 { 1976 struct hns_roce_caps *caps = &hr_dev->caps; 1977 1978 /* EQ */ 1979 caps->eqe_ba_pg_sz = 0; 1980 caps->eqe_buf_pg_sz = 0; 1981 1982 /* Link Table */ 1983 caps->llm_buf_pg_sz = 0; 1984 1985 /* MR */ 1986 caps->mpt_ba_pg_sz = 0; 1987 caps->mpt_buf_pg_sz = 0; 1988 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; 1989 caps->pbl_buf_pg_sz = 0; 1990 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, 1991 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, 1992 HEM_TYPE_MTPT); 1993 1994 /* QP */ 1995 caps->qpc_ba_pg_sz = 0; 1996 caps->qpc_buf_pg_sz = 0; 1997 caps->qpc_timer_ba_pg_sz = 0; 1998 caps->qpc_timer_buf_pg_sz = 0; 1999 caps->sccc_ba_pg_sz = 0; 2000 caps->sccc_buf_pg_sz = 0; 2001 caps->mtt_ba_pg_sz = 0; 2002 caps->mtt_buf_pg_sz = 0; 2003 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num, 2004 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, 2005 HEM_TYPE_QPC); 2006 2007 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) 2008 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num, 2009 caps->sccc_bt_num, &caps->sccc_buf_pg_sz, 2010 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC); 2011 2012 /* CQ */ 2013 caps->cqc_ba_pg_sz = 0; 2014 caps->cqc_buf_pg_sz = 0; 2015 caps->cqc_timer_ba_pg_sz = 0; 2016 caps->cqc_timer_buf_pg_sz = 0; 2017 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; 2018 caps->cqe_buf_pg_sz = 0; 2019 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, 2020 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, 2021 HEM_TYPE_CQC); 2022 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num, 2023 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE); 2024 2025 /* SRQ */ 2026 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) { 2027 caps->srqc_ba_pg_sz = 0; 2028 caps->srqc_buf_pg_sz = 0; 2029 caps->srqwqe_ba_pg_sz = 0; 2030 caps->srqwqe_buf_pg_sz = 0; 2031 caps->idx_ba_pg_sz = 0; 2032 caps->idx_buf_pg_sz = 0; 2033 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, 2034 caps->srqc_hop_num, caps->srqc_bt_num, 2035 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz, 2036 HEM_TYPE_SRQC); 2037 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, 2038 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, 2039 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE); 2040 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, 2041 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz, 2042 &caps->idx_ba_pg_sz, HEM_TYPE_IDX); 2043 } 2044 2045 /* GMV */ 2046 caps->gmv_ba_pg_sz = 0; 2047 caps->gmv_buf_pg_sz = 0; 2048 } 2049 2050 /* Apply all loaded caps before setting to hardware */ 2051 static void apply_func_caps(struct hns_roce_dev *hr_dev) 2052 { 2053 struct hns_roce_caps *caps = &hr_dev->caps; 2054 struct hns_roce_v2_priv *priv = hr_dev->priv; 2055 2056 /* The following configurations don't need to be got from firmware. */ 2057 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; 2058 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; 2059 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; 2060 2061 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; 2062 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; 2063 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; 2064 2065 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; 2066 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; 2067 2068 if (!caps->num_comp_vectors) 2069 caps->num_comp_vectors = 2070 min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM, 2071 (u32)priv->handle->rinfo.num_vectors - 2072 (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM)); 2073 2074 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 2075 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; 2076 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; 2077 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; 2078 2079 /* The following configurations will be overwritten */ 2080 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ; 2081 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; 2082 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ; 2083 2084 /* The following configurations are not got from firmware */ 2085 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ; 2086 2087 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0; 2088 caps->gid_table_len[0] = caps->gmv_bt_num * 2089 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz); 2090 2091 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / 2092 caps->gmv_entry_sz); 2093 } else { 2094 u32 func_num = max_t(u32, 1, hr_dev->func_num); 2095 2096 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; 2097 caps->ceqe_size = HNS_ROCE_CEQE_SIZE; 2098 caps->aeqe_size = HNS_ROCE_AEQE_SIZE; 2099 caps->gid_table_len[0] /= func_num; 2100 } 2101 2102 if (hr_dev->is_vf) { 2103 caps->default_aeq_arm_st = 0x3; 2104 caps->default_ceq_arm_st = 0x3; 2105 caps->default_ceq_max_cnt = 0x1; 2106 caps->default_ceq_period = 0x10; 2107 caps->default_aeq_max_cnt = 0x1; 2108 caps->default_aeq_period = 0x10; 2109 } 2110 2111 set_hem_page_size(hr_dev); 2112 } 2113 2114 static int hns_roce_query_caps(struct hns_roce_dev *hr_dev) 2115 { 2116 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; 2117 struct hns_roce_caps *caps = &hr_dev->caps; 2118 struct hns_roce_query_pf_caps_a *resp_a; 2119 struct hns_roce_query_pf_caps_b *resp_b; 2120 struct hns_roce_query_pf_caps_c *resp_c; 2121 struct hns_roce_query_pf_caps_d *resp_d; 2122 struct hns_roce_query_pf_caps_e *resp_e; 2123 enum hns_roce_opcode_type cmd; 2124 int ctx_hop_num; 2125 int pbl_hop_num; 2126 int ret; 2127 int i; 2128 2129 cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM : 2130 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM; 2131 2132 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { 2133 hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true); 2134 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) 2135 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2136 else 2137 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2138 } 2139 2140 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); 2141 if (ret) 2142 return ret; 2143 2144 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data; 2145 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data; 2146 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; 2147 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; 2148 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; 2149 2150 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; 2151 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); 2152 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); 2153 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); 2154 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); 2155 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); 2156 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); 2157 caps->num_aeq_vectors = resp_a->num_aeq_vectors; 2158 caps->num_other_vectors = resp_a->num_other_vectors; 2159 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; 2160 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; 2161 2162 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; 2163 caps->irrl_entry_sz = resp_b->irrl_entry_sz; 2164 caps->trrl_entry_sz = resp_b->trrl_entry_sz; 2165 caps->cqc_entry_sz = resp_b->cqc_entry_sz; 2166 caps->srqc_entry_sz = resp_b->srqc_entry_sz; 2167 caps->idx_entry_sz = resp_b->idx_entry_sz; 2168 caps->sccc_sz = resp_b->sccc_sz; 2169 caps->max_mtu = resp_b->max_mtu; 2170 caps->min_cqes = resp_b->min_cqes; 2171 caps->min_wqes = resp_b->min_wqes; 2172 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); 2173 caps->pkey_table_len[0] = resp_b->pkey_table_len; 2174 caps->phy_num_uars = resp_b->phy_num_uars; 2175 ctx_hop_num = resp_b->ctx_hop_num; 2176 pbl_hop_num = resp_b->pbl_hop_num; 2177 2178 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); 2179 2180 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); 2181 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << 2182 HNS_ROCE_CAP_FLAGS_EX_SHIFT; 2183 2184 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); 2185 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); 2186 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); 2187 caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS); 2188 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); 2189 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); 2190 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); 2191 caps->max_qp_dest_rdma = caps->max_qp_init_rdma; 2192 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); 2193 2194 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); 2195 caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); 2196 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); 2197 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); 2198 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); 2199 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); 2200 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); 2201 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); 2202 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); 2203 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); 2204 2205 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); 2206 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); 2207 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); 2208 caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS); 2209 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); 2210 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); 2211 2212 caps->qpc_hop_num = ctx_hop_num; 2213 caps->sccc_hop_num = ctx_hop_num; 2214 caps->srqc_hop_num = ctx_hop_num; 2215 caps->cqc_hop_num = ctx_hop_num; 2216 caps->mpt_hop_num = ctx_hop_num; 2217 caps->mtt_hop_num = pbl_hop_num; 2218 caps->cqe_hop_num = pbl_hop_num; 2219 caps->srqwqe_hop_num = pbl_hop_num; 2220 caps->idx_hop_num = pbl_hop_num; 2221 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); 2222 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); 2223 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); 2224 2225 if (!(caps->page_size_cap & PAGE_SIZE)) 2226 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; 2227 2228 if (!hr_dev->is_vf) { 2229 caps->cqe_sz = resp_a->cqe_sz; 2230 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz); 2231 caps->default_aeq_arm_st = 2232 hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); 2233 caps->default_ceq_arm_st = 2234 hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); 2235 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); 2236 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); 2237 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); 2238 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); 2239 } 2240 2241 return 0; 2242 } 2243 2244 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val) 2245 { 2246 struct hns_roce_cmq_desc desc; 2247 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 2248 2249 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE, 2250 false); 2251 2252 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type); 2253 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val); 2254 2255 return hns_roce_cmq_send(hr_dev, &desc, 1); 2256 } 2257 2258 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) 2259 { 2260 struct hns_roce_caps *caps = &hr_dev->caps; 2261 int ret; 2262 2263 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 2264 return 0; 2265 2266 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, 2267 caps->qpc_sz); 2268 if (ret) { 2269 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret); 2270 return ret; 2271 } 2272 2273 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE, 2274 caps->sccc_sz); 2275 if (ret) 2276 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret); 2277 2278 return ret; 2279 } 2280 2281 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev) 2282 { 2283 struct device *dev = hr_dev->dev; 2284 int ret; 2285 2286 hr_dev->func_num = 1; 2287 2288 ret = hns_roce_query_caps(hr_dev); 2289 if (ret) { 2290 dev_err(dev, "failed to query VF caps, ret = %d.\n", ret); 2291 return ret; 2292 } 2293 2294 ret = hns_roce_query_vf_resource(hr_dev); 2295 if (ret) { 2296 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret); 2297 return ret; 2298 } 2299 2300 apply_func_caps(hr_dev); 2301 2302 ret = hns_roce_v2_set_bt(hr_dev); 2303 if (ret) 2304 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret); 2305 2306 return ret; 2307 } 2308 2309 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev) 2310 { 2311 struct device *dev = hr_dev->dev; 2312 int ret; 2313 2314 ret = hns_roce_query_func_info(hr_dev); 2315 if (ret) { 2316 dev_err(dev, "failed to query func info, ret = %d.\n", ret); 2317 return ret; 2318 } 2319 2320 ret = hns_roce_config_global_param(hr_dev); 2321 if (ret) { 2322 dev_err(dev, "failed to config global param, ret = %d.\n", ret); 2323 return ret; 2324 } 2325 2326 ret = hns_roce_set_vf_switch_param(hr_dev); 2327 if (ret) { 2328 dev_err(dev, "failed to set switch param, ret = %d.\n", ret); 2329 return ret; 2330 } 2331 2332 ret = hns_roce_query_caps(hr_dev); 2333 if (ret) { 2334 dev_err(dev, "failed to query PF caps, ret = %d.\n", ret); 2335 return ret; 2336 } 2337 2338 ret = hns_roce_query_pf_resource(hr_dev); 2339 if (ret) { 2340 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret); 2341 return ret; 2342 } 2343 2344 apply_func_caps(hr_dev); 2345 2346 ret = hns_roce_alloc_vf_resource(hr_dev); 2347 if (ret) { 2348 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret); 2349 return ret; 2350 } 2351 2352 ret = hns_roce_v2_set_bt(hr_dev); 2353 if (ret) { 2354 dev_err(dev, "failed to config BA table, ret = %d.\n", ret); 2355 return ret; 2356 } 2357 2358 /* Configure the size of QPC, SCCC, etc. */ 2359 return hns_roce_config_entry_size(hr_dev); 2360 } 2361 2362 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) 2363 { 2364 struct device *dev = hr_dev->dev; 2365 int ret; 2366 2367 ret = hns_roce_cmq_query_hw_info(hr_dev); 2368 if (ret) { 2369 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret); 2370 return ret; 2371 } 2372 2373 ret = hns_roce_query_fw_ver(hr_dev); 2374 if (ret) { 2375 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret); 2376 return ret; 2377 } 2378 2379 hr_dev->vendor_part_id = hr_dev->pci_dev->device; 2380 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); 2381 2382 if (hr_dev->is_vf) 2383 return hns_roce_v2_vf_profile(hr_dev); 2384 else 2385 return hns_roce_v2_pf_profile(hr_dev); 2386 } 2387 2388 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf) 2389 { 2390 u32 i, next_ptr, page_num; 2391 __le64 *entry = cfg_buf; 2392 dma_addr_t addr; 2393 u64 val; 2394 2395 page_num = data_buf->npages; 2396 for (i = 0; i < page_num; i++) { 2397 addr = hns_roce_buf_page(data_buf, i); 2398 if (i == (page_num - 1)) 2399 next_ptr = 0; 2400 else 2401 next_ptr = i + 1; 2402 2403 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr); 2404 entry[i] = cpu_to_le64(val); 2405 } 2406 } 2407 2408 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev, 2409 struct hns_roce_link_table *table) 2410 { 2411 struct hns_roce_cmq_desc desc[2]; 2412 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 2413 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 2414 struct hns_roce_buf *buf = table->buf; 2415 enum hns_roce_opcode_type opcode; 2416 dma_addr_t addr; 2417 2418 opcode = HNS_ROCE_OPC_CFG_EXT_LLM; 2419 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 2420 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2421 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 2422 2423 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map)); 2424 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map)); 2425 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages); 2426 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift)); 2427 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN); 2428 2429 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0)); 2430 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr)); 2431 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr)); 2432 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1); 2433 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0); 2434 2435 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1)); 2436 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr)); 2437 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr)); 2438 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1); 2439 2440 return hns_roce_cmq_send(hr_dev, desc, 2); 2441 } 2442 2443 static struct hns_roce_link_table * 2444 alloc_link_table_buf(struct hns_roce_dev *hr_dev) 2445 { 2446 struct hns_roce_v2_priv *priv = hr_dev->priv; 2447 struct hns_roce_link_table *link_tbl; 2448 u32 pg_shift, size, min_size; 2449 2450 link_tbl = &priv->ext_llm; 2451 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT; 2452 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ; 2453 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift; 2454 2455 /* Alloc data table */ 2456 size = max(size, min_size); 2457 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0); 2458 if (IS_ERR(link_tbl->buf)) 2459 return ERR_PTR(-ENOMEM); 2460 2461 /* Alloc config table */ 2462 size = link_tbl->buf->npages * sizeof(u64); 2463 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size, 2464 &link_tbl->table.map, 2465 GFP_KERNEL); 2466 if (!link_tbl->table.buf) { 2467 hns_roce_buf_free(hr_dev, link_tbl->buf); 2468 return ERR_PTR(-ENOMEM); 2469 } 2470 2471 return link_tbl; 2472 } 2473 2474 static void free_link_table_buf(struct hns_roce_dev *hr_dev, 2475 struct hns_roce_link_table *tbl) 2476 { 2477 if (tbl->buf) { 2478 u32 size = tbl->buf->npages * sizeof(u64); 2479 2480 dma_free_coherent(hr_dev->dev, size, tbl->table.buf, 2481 tbl->table.map); 2482 } 2483 2484 hns_roce_buf_free(hr_dev, tbl->buf); 2485 } 2486 2487 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev) 2488 { 2489 struct hns_roce_link_table *link_tbl; 2490 int ret; 2491 2492 link_tbl = alloc_link_table_buf(hr_dev); 2493 if (IS_ERR(link_tbl)) 2494 return -ENOMEM; 2495 2496 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) { 2497 ret = -EINVAL; 2498 goto err_alloc; 2499 } 2500 2501 config_llm_table(link_tbl->buf, link_tbl->table.buf); 2502 ret = set_llm_cfg_to_hw(hr_dev, link_tbl); 2503 if (ret) 2504 goto err_alloc; 2505 2506 return 0; 2507 2508 err_alloc: 2509 free_link_table_buf(hr_dev, link_tbl); 2510 return ret; 2511 } 2512 2513 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev) 2514 { 2515 struct hns_roce_v2_priv *priv = hr_dev->priv; 2516 2517 free_link_table_buf(hr_dev, &priv->ext_llm); 2518 } 2519 2520 static void free_dip_list(struct hns_roce_dev *hr_dev) 2521 { 2522 struct hns_roce_dip *hr_dip; 2523 struct hns_roce_dip *tmp; 2524 unsigned long flags; 2525 2526 spin_lock_irqsave(&hr_dev->dip_list_lock, flags); 2527 2528 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) { 2529 list_del(&hr_dip->node); 2530 kfree(hr_dip); 2531 } 2532 2533 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); 2534 } 2535 2536 static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev) 2537 { 2538 struct hns_roce_v2_priv *priv = hr_dev->priv; 2539 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2540 struct ib_device *ibdev = &hr_dev->ib_dev; 2541 struct hns_roce_pd *hr_pd; 2542 struct ib_pd *pd; 2543 2544 hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL); 2545 if (ZERO_OR_NULL_PTR(hr_pd)) 2546 return NULL; 2547 pd = &hr_pd->ibpd; 2548 pd->device = ibdev; 2549 2550 if (hns_roce_alloc_pd(pd, NULL)) { 2551 ibdev_err(ibdev, "failed to create pd for free mr.\n"); 2552 kfree(hr_pd); 2553 return NULL; 2554 } 2555 free_mr->rsv_pd = to_hr_pd(pd); 2556 free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev; 2557 free_mr->rsv_pd->ibpd.uobject = NULL; 2558 free_mr->rsv_pd->ibpd.__internal_mr = NULL; 2559 atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0); 2560 2561 return pd; 2562 } 2563 2564 static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev) 2565 { 2566 struct hns_roce_v2_priv *priv = hr_dev->priv; 2567 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2568 struct ib_device *ibdev = &hr_dev->ib_dev; 2569 struct ib_cq_init_attr cq_init_attr = {}; 2570 struct hns_roce_cq *hr_cq; 2571 struct ib_cq *cq; 2572 2573 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; 2574 2575 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); 2576 if (ZERO_OR_NULL_PTR(hr_cq)) 2577 return NULL; 2578 2579 cq = &hr_cq->ib_cq; 2580 cq->device = ibdev; 2581 2582 if (hns_roce_create_cq(cq, &cq_init_attr, NULL)) { 2583 ibdev_err(ibdev, "failed to create cq for free mr.\n"); 2584 kfree(hr_cq); 2585 return NULL; 2586 } 2587 free_mr->rsv_cq = to_hr_cq(cq); 2588 free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev; 2589 free_mr->rsv_cq->ib_cq.uobject = NULL; 2590 free_mr->rsv_cq->ib_cq.comp_handler = NULL; 2591 free_mr->rsv_cq->ib_cq.event_handler = NULL; 2592 free_mr->rsv_cq->ib_cq.cq_context = NULL; 2593 atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0); 2594 2595 return cq; 2596 } 2597 2598 static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq, 2599 struct ib_qp_init_attr *init_attr, int i) 2600 { 2601 struct hns_roce_v2_priv *priv = hr_dev->priv; 2602 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2603 struct ib_device *ibdev = &hr_dev->ib_dev; 2604 struct hns_roce_qp *hr_qp; 2605 struct ib_qp *qp; 2606 int ret; 2607 2608 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 2609 if (ZERO_OR_NULL_PTR(hr_qp)) 2610 return -ENOMEM; 2611 2612 qp = &hr_qp->ibqp; 2613 qp->device = ibdev; 2614 2615 ret = hns_roce_create_qp(qp, init_attr, NULL); 2616 if (ret) { 2617 ibdev_err(ibdev, "failed to create qp for free mr.\n"); 2618 kfree(hr_qp); 2619 return ret; 2620 } 2621 2622 free_mr->rsv_qp[i] = hr_qp; 2623 free_mr->rsv_qp[i]->ibqp.recv_cq = cq; 2624 free_mr->rsv_qp[i]->ibqp.send_cq = cq; 2625 2626 return 0; 2627 } 2628 2629 static void free_mr_exit(struct hns_roce_dev *hr_dev) 2630 { 2631 struct hns_roce_v2_priv *priv = hr_dev->priv; 2632 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2633 struct ib_qp *qp; 2634 int i; 2635 2636 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2637 if (free_mr->rsv_qp[i]) { 2638 qp = &free_mr->rsv_qp[i]->ibqp; 2639 hns_roce_v2_destroy_qp(qp, NULL); 2640 kfree(free_mr->rsv_qp[i]); 2641 free_mr->rsv_qp[i] = NULL; 2642 } 2643 } 2644 2645 if (free_mr->rsv_cq) { 2646 hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL); 2647 kfree(free_mr->rsv_cq); 2648 free_mr->rsv_cq = NULL; 2649 } 2650 2651 if (free_mr->rsv_pd) { 2652 hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL); 2653 kfree(free_mr->rsv_pd); 2654 free_mr->rsv_pd = NULL; 2655 } 2656 } 2657 2658 static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) 2659 { 2660 struct hns_roce_v2_priv *priv = hr_dev->priv; 2661 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2662 struct ib_qp_init_attr qp_init_attr = {}; 2663 struct ib_pd *pd; 2664 struct ib_cq *cq; 2665 int ret; 2666 int i; 2667 2668 pd = free_mr_init_pd(hr_dev); 2669 if (!pd) 2670 return -ENOMEM; 2671 2672 cq = free_mr_init_cq(hr_dev); 2673 if (!cq) { 2674 ret = -ENOMEM; 2675 goto create_failed_cq; 2676 } 2677 2678 qp_init_attr.qp_type = IB_QPT_RC; 2679 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2680 qp_init_attr.send_cq = cq; 2681 qp_init_attr.recv_cq = cq; 2682 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2683 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM; 2684 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM; 2685 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM; 2686 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM; 2687 2688 ret = free_mr_init_qp(hr_dev, cq, &qp_init_attr, i); 2689 if (ret) 2690 goto create_failed_qp; 2691 } 2692 2693 return 0; 2694 2695 create_failed_qp: 2696 for (i--; i >= 0; i--) { 2697 hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL); 2698 kfree(free_mr->rsv_qp[i]); 2699 } 2700 hns_roce_destroy_cq(cq, NULL); 2701 kfree(cq); 2702 2703 create_failed_cq: 2704 hns_roce_dealloc_pd(pd, NULL); 2705 kfree(pd); 2706 2707 return ret; 2708 } 2709 2710 static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, 2711 struct ib_qp_attr *attr, int sl_num) 2712 { 2713 struct hns_roce_v2_priv *priv = hr_dev->priv; 2714 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2715 struct ib_device *ibdev = &hr_dev->ib_dev; 2716 struct hns_roce_qp *hr_qp; 2717 int loopback; 2718 int mask; 2719 int ret; 2720 2721 hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp); 2722 hr_qp->free_mr_en = 1; 2723 hr_qp->ibqp.device = ibdev; 2724 hr_qp->ibqp.qp_type = IB_QPT_RC; 2725 2726 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS; 2727 attr->qp_state = IB_QPS_INIT; 2728 attr->port_num = 1; 2729 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 2730 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, 2731 IB_QPS_INIT, NULL); 2732 if (ret) { 2733 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", 2734 ret); 2735 return ret; 2736 } 2737 2738 loopback = hr_dev->loop_idc; 2739 /* Set qpc lbi = 1 incidate loopback IO */ 2740 hr_dev->loop_idc = 1; 2741 2742 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | 2743 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; 2744 attr->qp_state = IB_QPS_RTR; 2745 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 2746 attr->path_mtu = IB_MTU_256; 2747 attr->dest_qp_num = hr_qp->qpn; 2748 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN; 2749 2750 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); 2751 2752 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, 2753 IB_QPS_RTR, NULL); 2754 hr_dev->loop_idc = loopback; 2755 if (ret) { 2756 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n", 2757 ret); 2758 return ret; 2759 } 2760 2761 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT | 2762 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; 2763 attr->qp_state = IB_QPS_RTS; 2764 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; 2765 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; 2766 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; 2767 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR, 2768 IB_QPS_RTS, NULL); 2769 if (ret) 2770 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n", 2771 ret); 2772 2773 return ret; 2774 } 2775 2776 static int free_mr_modify_qp(struct hns_roce_dev *hr_dev) 2777 { 2778 struct hns_roce_v2_priv *priv = hr_dev->priv; 2779 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2780 struct ib_qp_attr attr = {}; 2781 int ret; 2782 int i; 2783 2784 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 2785 rdma_ah_set_static_rate(&attr.ah_attr, 3); 2786 rdma_ah_set_port_num(&attr.ah_attr, 1); 2787 2788 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2789 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i); 2790 if (ret) 2791 return ret; 2792 } 2793 2794 return 0; 2795 } 2796 2797 static int free_mr_init(struct hns_roce_dev *hr_dev) 2798 { 2799 struct hns_roce_v2_priv *priv = hr_dev->priv; 2800 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2801 int ret; 2802 2803 mutex_init(&free_mr->mutex); 2804 2805 ret = free_mr_alloc_res(hr_dev); 2806 if (ret) 2807 return ret; 2808 2809 ret = free_mr_modify_qp(hr_dev); 2810 if (ret) 2811 goto err_modify_qp; 2812 2813 return 0; 2814 2815 err_modify_qp: 2816 free_mr_exit(hr_dev); 2817 2818 return ret; 2819 } 2820 2821 static int get_hem_table(struct hns_roce_dev *hr_dev) 2822 { 2823 unsigned int qpc_count; 2824 unsigned int cqc_count; 2825 unsigned int gmv_count; 2826 int ret; 2827 int i; 2828 2829 /* Alloc memory for source address table buffer space chunk */ 2830 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num; 2831 gmv_count++) { 2832 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count); 2833 if (ret) 2834 goto err_gmv_failed; 2835 } 2836 2837 if (hr_dev->is_vf) 2838 return 0; 2839 2840 /* Alloc memory for QPC Timer buffer space chunk */ 2841 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; 2842 qpc_count++) { 2843 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, 2844 qpc_count); 2845 if (ret) { 2846 dev_err(hr_dev->dev, "QPC Timer get failed\n"); 2847 goto err_qpc_timer_failed; 2848 } 2849 } 2850 2851 /* Alloc memory for CQC Timer buffer space chunk */ 2852 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; 2853 cqc_count++) { 2854 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, 2855 cqc_count); 2856 if (ret) { 2857 dev_err(hr_dev->dev, "CQC Timer get failed\n"); 2858 goto err_cqc_timer_failed; 2859 } 2860 } 2861 2862 return 0; 2863 2864 err_cqc_timer_failed: 2865 for (i = 0; i < cqc_count; i++) 2866 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); 2867 2868 err_qpc_timer_failed: 2869 for (i = 0; i < qpc_count; i++) 2870 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); 2871 2872 err_gmv_failed: 2873 for (i = 0; i < gmv_count; i++) 2874 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); 2875 2876 return ret; 2877 } 2878 2879 static void put_hem_table(struct hns_roce_dev *hr_dev) 2880 { 2881 int i; 2882 2883 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++) 2884 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); 2885 2886 if (hr_dev->is_vf) 2887 return; 2888 2889 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++) 2890 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); 2891 2892 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++) 2893 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); 2894 } 2895 2896 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) 2897 { 2898 int ret; 2899 2900 /* The hns ROCEE requires the extdb info to be cleared before using */ 2901 ret = hns_roce_clear_extdb_list_info(hr_dev); 2902 if (ret) 2903 return ret; 2904 2905 ret = get_hem_table(hr_dev); 2906 if (ret) 2907 return ret; 2908 2909 if (hr_dev->is_vf) 2910 return 0; 2911 2912 ret = hns_roce_init_link_table(hr_dev); 2913 if (ret) { 2914 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret); 2915 goto err_llm_init_failed; 2916 } 2917 2918 return 0; 2919 2920 err_llm_init_failed: 2921 put_hem_table(hr_dev); 2922 2923 return ret; 2924 } 2925 2926 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) 2927 { 2928 hns_roce_function_clear(hr_dev); 2929 2930 if (!hr_dev->is_vf) 2931 hns_roce_free_link_table(hr_dev); 2932 2933 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) 2934 free_dip_list(hr_dev); 2935 } 2936 2937 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, 2938 struct hns_roce_mbox_msg *mbox_msg) 2939 { 2940 struct hns_roce_cmq_desc desc; 2941 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; 2942 2943 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); 2944 2945 mb->in_param_l = cpu_to_le32(mbox_msg->in_param); 2946 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); 2947 mb->out_param_l = cpu_to_le32(mbox_msg->out_param); 2948 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); 2949 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); 2950 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | 2951 mbox_msg->token); 2952 2953 return hns_roce_cmq_send(hr_dev, &desc, 1); 2954 } 2955 2956 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, 2957 u8 *complete_status) 2958 { 2959 struct hns_roce_mbox_status *mb_st; 2960 struct hns_roce_cmq_desc desc; 2961 unsigned long end; 2962 int ret = -EBUSY; 2963 u32 status; 2964 bool busy; 2965 2966 mb_st = (struct hns_roce_mbox_status *)desc.data; 2967 end = msecs_to_jiffies(timeout) + jiffies; 2968 while (v2_chk_mbox_is_avail(hr_dev, &busy)) { 2969 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 2970 return -EIO; 2971 2972 status = 0; 2973 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, 2974 true); 2975 ret = __hns_roce_cmq_send(hr_dev, &desc, 1); 2976 if (!ret) { 2977 status = le32_to_cpu(mb_st->mb_status_hw_run); 2978 /* No pending message exists in ROCEE mbox. */ 2979 if (!(status & MB_ST_HW_RUN_M)) 2980 break; 2981 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) { 2982 break; 2983 } 2984 2985 if (time_after(jiffies, end)) { 2986 dev_err_ratelimited(hr_dev->dev, 2987 "failed to wait mbox status 0x%x\n", 2988 status); 2989 return -ETIMEDOUT; 2990 } 2991 2992 cond_resched(); 2993 ret = -EBUSY; 2994 } 2995 2996 if (!ret) { 2997 *complete_status = (u8)(status & MB_ST_COMPLETE_M); 2998 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) { 2999 /* Ignore all errors if the mbox is unavailable. */ 3000 ret = 0; 3001 *complete_status = MB_ST_COMPLETE_M; 3002 } 3003 3004 return ret; 3005 } 3006 3007 static int v2_post_mbox(struct hns_roce_dev *hr_dev, 3008 struct hns_roce_mbox_msg *mbox_msg) 3009 { 3010 u8 status = 0; 3011 int ret; 3012 3013 /* Waiting for the mbox to be idle */ 3014 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS, 3015 &status); 3016 if (unlikely(ret)) { 3017 dev_err_ratelimited(hr_dev->dev, 3018 "failed to check post mbox status = 0x%x, ret = %d.\n", 3019 status, ret); 3020 return ret; 3021 } 3022 3023 /* Post new message to mbox */ 3024 ret = hns_roce_mbox_post(hr_dev, mbox_msg); 3025 if (ret) 3026 dev_err_ratelimited(hr_dev->dev, 3027 "failed to post mailbox, ret = %d.\n", ret); 3028 3029 return ret; 3030 } 3031 3032 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev) 3033 { 3034 u8 status = 0; 3035 int ret; 3036 3037 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS, 3038 &status); 3039 if (!ret) { 3040 if (status != MB_ST_COMPLETE_SUCC) 3041 return -EBUSY; 3042 } else { 3043 dev_err_ratelimited(hr_dev->dev, 3044 "failed to check mbox status = 0x%x, ret = %d.\n", 3045 status, ret); 3046 } 3047 3048 return ret; 3049 } 3050 3051 static void copy_gid(void *dest, const union ib_gid *gid) 3052 { 3053 #define GID_SIZE 4 3054 const union ib_gid *src = gid; 3055 __le32 (*p)[GID_SIZE] = dest; 3056 int i; 3057 3058 if (!gid) 3059 src = &zgid; 3060 3061 for (i = 0; i < GID_SIZE; i++) 3062 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]); 3063 } 3064 3065 static int config_sgid_table(struct hns_roce_dev *hr_dev, 3066 int gid_index, const union ib_gid *gid, 3067 enum hns_roce_sgid_type sgid_type) 3068 { 3069 struct hns_roce_cmq_desc desc; 3070 struct hns_roce_cfg_sgid_tb *sgid_tb = 3071 (struct hns_roce_cfg_sgid_tb *)desc.data; 3072 3073 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); 3074 3075 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index); 3076 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type); 3077 3078 copy_gid(&sgid_tb->vf_sgid_l, gid); 3079 3080 return hns_roce_cmq_send(hr_dev, &desc, 1); 3081 } 3082 3083 static int config_gmv_table(struct hns_roce_dev *hr_dev, 3084 int gid_index, const union ib_gid *gid, 3085 enum hns_roce_sgid_type sgid_type, 3086 const struct ib_gid_attr *attr) 3087 { 3088 struct hns_roce_cmq_desc desc[2]; 3089 struct hns_roce_cfg_gmv_tb_a *tb_a = 3090 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data; 3091 struct hns_roce_cfg_gmv_tb_b *tb_b = 3092 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data; 3093 3094 u16 vlan_id = VLAN_CFI_MASK; 3095 u8 mac[ETH_ALEN] = {}; 3096 int ret; 3097 3098 if (gid) { 3099 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac); 3100 if (ret) 3101 return ret; 3102 } 3103 3104 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false); 3105 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 3106 3107 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false); 3108 3109 copy_gid(&tb_a->vf_sgid_l, gid); 3110 3111 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type); 3112 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK); 3113 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id); 3114 3115 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); 3116 3117 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]); 3118 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index); 3119 3120 return hns_roce_cmq_send(hr_dev, desc, 2); 3121 } 3122 3123 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, 3124 const union ib_gid *gid, 3125 const struct ib_gid_attr *attr) 3126 { 3127 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; 3128 int ret; 3129 3130 if (gid) { 3131 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 3132 if (ipv6_addr_v4mapped((void *)gid)) 3133 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4; 3134 else 3135 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6; 3136 } else if (attr->gid_type == IB_GID_TYPE_ROCE) { 3137 sgid_type = GID_TYPE_FLAG_ROCE_V1; 3138 } 3139 } 3140 3141 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 3142 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr); 3143 else 3144 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type); 3145 3146 if (ret) 3147 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n", 3148 ret); 3149 3150 return ret; 3151 } 3152 3153 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, 3154 const u8 *addr) 3155 { 3156 struct hns_roce_cmq_desc desc; 3157 struct hns_roce_cfg_smac_tb *smac_tb = 3158 (struct hns_roce_cfg_smac_tb *)desc.data; 3159 u16 reg_smac_h; 3160 u32 reg_smac_l; 3161 3162 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false); 3163 3164 reg_smac_l = *(u32 *)(&addr[0]); 3165 reg_smac_h = *(u16 *)(&addr[4]); 3166 3167 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port); 3168 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h); 3169 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); 3170 3171 return hns_roce_cmq_send(hr_dev, &desc, 1); 3172 } 3173 3174 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, 3175 struct hns_roce_v2_mpt_entry *mpt_entry, 3176 struct hns_roce_mr *mr) 3177 { 3178 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; 3179 struct ib_device *ibdev = &hr_dev->ib_dev; 3180 dma_addr_t pbl_ba; 3181 int i, count; 3182 3183 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, 3184 min_t(int, ARRAY_SIZE(pages), mr->npages), 3185 &pbl_ba); 3186 if (count < 1) { 3187 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", 3188 count); 3189 return -ENOBUFS; 3190 } 3191 3192 /* Aligned to the hardware address access unit */ 3193 for (i = 0; i < count; i++) 3194 pages[i] >>= 6; 3195 3196 mpt_entry->pbl_size = cpu_to_le32(mr->npages); 3197 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); 3198 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); 3199 3200 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); 3201 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); 3202 3203 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); 3204 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); 3205 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3206 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 3207 3208 return 0; 3209 } 3210 3211 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, 3212 void *mb_buf, struct hns_roce_mr *mr) 3213 { 3214 struct hns_roce_v2_mpt_entry *mpt_entry; 3215 3216 mpt_entry = mb_buf; 3217 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3218 3219 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); 3220 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3221 3222 hr_reg_write_bool(mpt_entry, MPT_BIND_EN, 3223 mr->access & IB_ACCESS_MW_BIND); 3224 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN, 3225 mr->access & IB_ACCESS_REMOTE_ATOMIC); 3226 hr_reg_write_bool(mpt_entry, MPT_RR_EN, 3227 mr->access & IB_ACCESS_REMOTE_READ); 3228 hr_reg_write_bool(mpt_entry, MPT_RW_EN, 3229 mr->access & IB_ACCESS_REMOTE_WRITE); 3230 hr_reg_write_bool(mpt_entry, MPT_LW_EN, 3231 mr->access & IB_ACCESS_LOCAL_WRITE); 3232 3233 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 3234 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 3235 mpt_entry->lkey = cpu_to_le32(mr->key); 3236 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); 3237 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); 3238 3239 if (mr->type != MR_TYPE_MR) 3240 hr_reg_enable(mpt_entry, MPT_PA); 3241 3242 if (mr->type == MR_TYPE_DMA) 3243 return 0; 3244 3245 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0) 3246 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num); 3247 3248 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3249 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); 3250 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); 3251 3252 return set_mtpt_pbl(hr_dev, mpt_entry, mr); 3253 } 3254 3255 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, 3256 struct hns_roce_mr *mr, int flags, 3257 void *mb_buf) 3258 { 3259 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; 3260 u32 mr_access_flags = mr->access; 3261 int ret = 0; 3262 3263 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); 3264 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3265 3266 if (flags & IB_MR_REREG_ACCESS) { 3267 hr_reg_write(mpt_entry, MPT_BIND_EN, 3268 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); 3269 hr_reg_write(mpt_entry, MPT_ATOMIC_EN, 3270 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); 3271 hr_reg_write(mpt_entry, MPT_RR_EN, 3272 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); 3273 hr_reg_write(mpt_entry, MPT_RW_EN, 3274 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); 3275 hr_reg_write(mpt_entry, MPT_LW_EN, 3276 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); 3277 } 3278 3279 if (flags & IB_MR_REREG_TRANS) { 3280 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); 3281 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); 3282 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 3283 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 3284 3285 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); 3286 } 3287 3288 return ret; 3289 } 3290 3291 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, 3292 void *mb_buf, struct hns_roce_mr *mr) 3293 { 3294 struct ib_device *ibdev = &hr_dev->ib_dev; 3295 struct hns_roce_v2_mpt_entry *mpt_entry; 3296 dma_addr_t pbl_ba = 0; 3297 3298 mpt_entry = mb_buf; 3299 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3300 3301 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) { 3302 ibdev_err(ibdev, "failed to find frmr mtr.\n"); 3303 return -ENOBUFS; 3304 } 3305 3306 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); 3307 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3308 3309 hr_reg_enable(mpt_entry, MPT_RA_EN); 3310 hr_reg_enable(mpt_entry, MPT_R_INV_EN); 3311 3312 hr_reg_enable(mpt_entry, MPT_FRE); 3313 hr_reg_clear(mpt_entry, MPT_MR_MW); 3314 hr_reg_enable(mpt_entry, MPT_BPD); 3315 hr_reg_clear(mpt_entry, MPT_PA); 3316 3317 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); 3318 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3319 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); 3320 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3321 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 3322 3323 mpt_entry->pbl_size = cpu_to_le32(mr->npages); 3324 3325 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); 3326 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); 3327 3328 return 0; 3329 } 3330 3331 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) 3332 { 3333 struct hns_roce_v2_mpt_entry *mpt_entry; 3334 3335 mpt_entry = mb_buf; 3336 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3337 3338 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); 3339 hr_reg_write(mpt_entry, MPT_PD, mw->pdn); 3340 3341 hr_reg_enable(mpt_entry, MPT_R_INV_EN); 3342 hr_reg_enable(mpt_entry, MPT_LW_EN); 3343 3344 hr_reg_enable(mpt_entry, MPT_MR_MW); 3345 hr_reg_enable(mpt_entry, MPT_BPD); 3346 hr_reg_clear(mpt_entry, MPT_PA); 3347 hr_reg_write(mpt_entry, MPT_BQP, 3348 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); 3349 3350 mpt_entry->lkey = cpu_to_le32(mw->rkey); 3351 3352 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 3353 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : 3354 mw->pbl_hop_num); 3355 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3356 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); 3357 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3358 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); 3359 3360 return 0; 3361 } 3362 3363 static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) 3364 { 3365 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); 3366 struct ib_device *ibdev = &hr_dev->ib_dev; 3367 const struct ib_send_wr *bad_wr; 3368 struct ib_rdma_wr rdma_wr = {}; 3369 struct ib_send_wr *send_wr; 3370 int ret; 3371 3372 send_wr = &rdma_wr.wr; 3373 send_wr->opcode = IB_WR_RDMA_WRITE; 3374 3375 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); 3376 if (ret) { 3377 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n", 3378 ret); 3379 return ret; 3380 } 3381 3382 return 0; 3383 } 3384 3385 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, 3386 struct ib_wc *wc); 3387 3388 static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) 3389 { 3390 struct hns_roce_v2_priv *priv = hr_dev->priv; 3391 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 3392 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)]; 3393 struct ib_device *ibdev = &hr_dev->ib_dev; 3394 struct hns_roce_qp *hr_qp; 3395 unsigned long end; 3396 int cqe_cnt = 0; 3397 int npolled; 3398 int ret; 3399 int i; 3400 3401 /* 3402 * If the device initialization is not complete or in the uninstall 3403 * process, then there is no need to execute free mr. 3404 */ 3405 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || 3406 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT || 3407 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) 3408 return; 3409 3410 mutex_lock(&free_mr->mutex); 3411 3412 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 3413 hr_qp = free_mr->rsv_qp[i]; 3414 3415 ret = free_mr_post_send_lp_wqe(hr_qp); 3416 if (ret) { 3417 ibdev_err(ibdev, 3418 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", 3419 hr_qp->qpn, ret); 3420 break; 3421 } 3422 3423 cqe_cnt++; 3424 } 3425 3426 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies; 3427 while (cqe_cnt) { 3428 npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc); 3429 if (npolled < 0) { 3430 ibdev_err(ibdev, 3431 "failed to poll cqe for free mr, remain %d cqe.\n", 3432 cqe_cnt); 3433 goto out; 3434 } 3435 3436 if (time_after(jiffies, end)) { 3437 ibdev_err(ibdev, 3438 "failed to poll cqe for free mr and timeout, remain %d cqe.\n", 3439 cqe_cnt); 3440 goto out; 3441 } 3442 cqe_cnt -= npolled; 3443 } 3444 3445 out: 3446 mutex_unlock(&free_mr->mutex); 3447 } 3448 3449 static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev) 3450 { 3451 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 3452 free_mr_send_cmd_to_hw(hr_dev); 3453 } 3454 3455 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) 3456 { 3457 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); 3458 } 3459 3460 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) 3461 { 3462 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); 3463 3464 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ 3465 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : 3466 NULL; 3467 } 3468 3469 static inline void update_cq_db(struct hns_roce_dev *hr_dev, 3470 struct hns_roce_cq *hr_cq) 3471 { 3472 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) { 3473 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M; 3474 } else { 3475 struct hns_roce_v2_db cq_db = {}; 3476 3477 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); 3478 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB); 3479 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); 3480 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1); 3481 3482 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); 3483 } 3484 } 3485 3486 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 3487 struct hns_roce_srq *srq) 3488 { 3489 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3490 struct hns_roce_v2_cqe *cqe, *dest; 3491 u32 prod_index; 3492 int nfreed = 0; 3493 int wqe_index; 3494 u8 owner_bit; 3495 3496 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); 3497 ++prod_index) { 3498 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) 3499 break; 3500 } 3501 3502 /* 3503 * Now backwards through the CQ, removing CQ entries 3504 * that match our QP by overwriting them with next entries. 3505 */ 3506 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { 3507 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe); 3508 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) { 3509 if (srq && hr_reg_read(cqe, CQE_S_R)) { 3510 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX); 3511 hns_roce_free_srq_wqe(srq, wqe_index); 3512 } 3513 ++nfreed; 3514 } else if (nfreed) { 3515 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & 3516 hr_cq->ib_cq.cqe); 3517 owner_bit = hr_reg_read(dest, CQE_OWNER); 3518 memcpy(dest, cqe, hr_cq->cqe_size); 3519 hr_reg_write(dest, CQE_OWNER, owner_bit); 3520 } 3521 } 3522 3523 if (nfreed) { 3524 hr_cq->cons_index += nfreed; 3525 update_cq_db(hr_dev, hr_cq); 3526 } 3527 } 3528 3529 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 3530 struct hns_roce_srq *srq) 3531 { 3532 spin_lock_irq(&hr_cq->lock); 3533 __hns_roce_v2_cq_clean(hr_cq, qpn, srq); 3534 spin_unlock_irq(&hr_cq->lock); 3535 } 3536 3537 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, 3538 struct hns_roce_cq *hr_cq, void *mb_buf, 3539 u64 *mtts, dma_addr_t dma_handle) 3540 { 3541 struct hns_roce_v2_cq_context *cq_context; 3542 3543 cq_context = mb_buf; 3544 memset(cq_context, 0, sizeof(*cq_context)); 3545 3546 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID); 3547 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED); 3548 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); 3549 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector); 3550 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn); 3551 3552 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE) 3553 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B); 3554 3555 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) 3556 hr_reg_enable(cq_context, CQC_STASH); 3557 3558 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L, 3559 to_hr_hw_page_addr(mtts[0])); 3560 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H, 3561 upper_32_bits(to_hr_hw_page_addr(mtts[0]))); 3562 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num == 3563 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); 3564 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L, 3565 to_hr_hw_page_addr(mtts[1])); 3566 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H, 3567 upper_32_bits(to_hr_hw_page_addr(mtts[1]))); 3568 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ, 3569 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); 3570 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ, 3571 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); 3572 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3); 3573 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3))); 3574 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN, 3575 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB); 3576 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L, 3577 ((u32)hr_cq->db.dma) >> 1); 3578 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H, 3579 hr_cq->db.dma >> 32); 3580 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, 3581 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM); 3582 hr_reg_write(cq_context, CQC_CQ_PERIOD, 3583 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL); 3584 } 3585 3586 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, 3587 enum ib_cq_notify_flags flags) 3588 { 3589 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); 3590 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 3591 struct hns_roce_v2_db cq_db = {}; 3592 u32 notify_flag; 3593 3594 /* 3595 * flags = 0, then notify_flag : next 3596 * flags = 1, then notify flag : solocited 3597 */ 3598 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 3599 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; 3600 3601 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); 3602 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY); 3603 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); 3604 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn); 3605 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag); 3606 3607 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); 3608 3609 return 0; 3610 } 3611 3612 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq, 3613 int num_entries, struct ib_wc *wc) 3614 { 3615 unsigned int left; 3616 int npolled = 0; 3617 3618 left = wq->head - wq->tail; 3619 if (left == 0) 3620 return 0; 3621 3622 left = min_t(unsigned int, (unsigned int)num_entries, left); 3623 while (npolled < left) { 3624 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3625 wc->status = IB_WC_WR_FLUSH_ERR; 3626 wc->vendor_err = 0; 3627 wc->qp = &hr_qp->ibqp; 3628 3629 wq->tail++; 3630 wc++; 3631 npolled++; 3632 } 3633 3634 return npolled; 3635 } 3636 3637 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, 3638 struct ib_wc *wc) 3639 { 3640 struct hns_roce_qp *hr_qp; 3641 int npolled = 0; 3642 3643 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) { 3644 npolled += sw_comp(hr_qp, &hr_qp->sq, 3645 num_entries - npolled, wc + npolled); 3646 if (npolled >= num_entries) 3647 goto out; 3648 } 3649 3650 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) { 3651 npolled += sw_comp(hr_qp, &hr_qp->rq, 3652 num_entries - npolled, wc + npolled); 3653 if (npolled >= num_entries) 3654 goto out; 3655 } 3656 3657 out: 3658 return npolled; 3659 } 3660 3661 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, 3662 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe, 3663 struct ib_wc *wc) 3664 { 3665 static const struct { 3666 u32 cqe_status; 3667 enum ib_wc_status wc_status; 3668 } map[] = { 3669 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS }, 3670 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR }, 3671 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, 3672 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR }, 3673 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, 3674 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR }, 3675 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, 3676 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, 3677 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, 3678 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, 3679 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR }, 3680 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR, 3681 IB_WC_RETRY_EXC_ERR }, 3682 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR }, 3683 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR }, 3684 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR} 3685 }; 3686 3687 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS); 3688 int i; 3689 3690 wc->status = IB_WC_GENERAL_ERR; 3691 for (i = 0; i < ARRAY_SIZE(map); i++) 3692 if (cqe_status == map[i].cqe_status) { 3693 wc->status = map[i].wc_status; 3694 break; 3695 } 3696 3697 if (likely(wc->status == IB_WC_SUCCESS || 3698 wc->status == IB_WC_WR_FLUSH_ERR)) 3699 return; 3700 3701 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); 3702 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, 3703 cq->cqe_size, false); 3704 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); 3705 3706 /* 3707 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in 3708 * the standard protocol, the driver must ignore it and needn't to set 3709 * the QP to an error state. 3710 */ 3711 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR) 3712 return; 3713 3714 flush_cqe(hr_dev, qp); 3715 } 3716 3717 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe, 3718 struct hns_roce_qp **cur_qp) 3719 { 3720 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3721 struct hns_roce_qp *hr_qp = *cur_qp; 3722 u32 qpn; 3723 3724 qpn = hr_reg_read(cqe, CQE_LCL_QPN); 3725 3726 if (!hr_qp || qpn != hr_qp->qpn) { 3727 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); 3728 if (unlikely(!hr_qp)) { 3729 ibdev_err(&hr_dev->ib_dev, 3730 "CQ %06lx with entry for unknown QPN %06x\n", 3731 hr_cq->cqn, qpn); 3732 return -EINVAL; 3733 } 3734 *cur_qp = hr_qp; 3735 } 3736 3737 return 0; 3738 } 3739 3740 /* 3741 * mapped-value = 1 + real-value 3742 * The ib wc opcode's real value is start from 0, In order to distinguish 3743 * between initialized and uninitialized map values, we plus 1 to the actual 3744 * value when defining the mapping, so that the validity can be identified by 3745 * checking whether the mapped value is greater than 0. 3746 */ 3747 #define HR_WC_OP_MAP(hr_key, ib_key) \ 3748 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key 3749 3750 static const u32 wc_send_op_map[] = { 3751 HR_WC_OP_MAP(SEND, SEND), 3752 HR_WC_OP_MAP(SEND_WITH_INV, SEND), 3753 HR_WC_OP_MAP(SEND_WITH_IMM, SEND), 3754 HR_WC_OP_MAP(RDMA_READ, RDMA_READ), 3755 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE), 3756 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE), 3757 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP), 3758 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD), 3759 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP), 3760 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD), 3761 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR), 3762 HR_WC_OP_MAP(BIND_MW, REG_MR), 3763 }; 3764 3765 static int to_ib_wc_send_op(u32 hr_opcode) 3766 { 3767 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map)) 3768 return -EINVAL; 3769 3770 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 : 3771 -EINVAL; 3772 } 3773 3774 static const u32 wc_recv_op_map[] = { 3775 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM), 3776 HR_WC_OP_MAP(SEND, RECV), 3777 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM), 3778 HR_WC_OP_MAP(SEND_WITH_INV, RECV), 3779 }; 3780 3781 static int to_ib_wc_recv_op(u32 hr_opcode) 3782 { 3783 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map)) 3784 return -EINVAL; 3785 3786 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 : 3787 -EINVAL; 3788 } 3789 3790 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) 3791 { 3792 u32 hr_opcode; 3793 int ib_opcode; 3794 3795 wc->wc_flags = 0; 3796 3797 hr_opcode = hr_reg_read(cqe, CQE_OPCODE); 3798 switch (hr_opcode) { 3799 case HNS_ROCE_V2_WQE_OP_RDMA_READ: 3800 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 3801 break; 3802 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM: 3803 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM: 3804 wc->wc_flags |= IB_WC_WITH_IMM; 3805 break; 3806 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP: 3807 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD: 3808 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP: 3809 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD: 3810 wc->byte_len = 8; 3811 break; 3812 default: 3813 break; 3814 } 3815 3816 ib_opcode = to_ib_wc_send_op(hr_opcode); 3817 if (ib_opcode < 0) 3818 wc->status = IB_WC_GENERAL_ERR; 3819 else 3820 wc->opcode = ib_opcode; 3821 } 3822 3823 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) 3824 { 3825 u32 hr_opcode; 3826 int ib_opcode; 3827 3828 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 3829 3830 hr_opcode = hr_reg_read(cqe, CQE_OPCODE); 3831 switch (hr_opcode) { 3832 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM: 3833 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM: 3834 wc->wc_flags = IB_WC_WITH_IMM; 3835 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata)); 3836 break; 3837 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV: 3838 wc->wc_flags = IB_WC_WITH_INVALIDATE; 3839 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey); 3840 break; 3841 default: 3842 wc->wc_flags = 0; 3843 } 3844 3845 ib_opcode = to_ib_wc_recv_op(hr_opcode); 3846 if (ib_opcode < 0) 3847 wc->status = IB_WC_GENERAL_ERR; 3848 else 3849 wc->opcode = ib_opcode; 3850 3851 wc->sl = hr_reg_read(cqe, CQE_SL); 3852 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN); 3853 wc->slid = 0; 3854 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0; 3855 wc->port_num = hr_reg_read(cqe, CQE_PORTN); 3856 wc->pkey_index = 0; 3857 3858 if (hr_reg_read(cqe, CQE_VID_VLD)) { 3859 wc->vlan_id = hr_reg_read(cqe, CQE_VID); 3860 wc->wc_flags |= IB_WC_WITH_VLAN; 3861 } else { 3862 wc->vlan_id = 0xffff; 3863 } 3864 3865 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE); 3866 3867 return 0; 3868 } 3869 3870 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, 3871 struct hns_roce_qp **cur_qp, struct ib_wc *wc) 3872 { 3873 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3874 struct hns_roce_qp *qp = *cur_qp; 3875 struct hns_roce_srq *srq = NULL; 3876 struct hns_roce_v2_cqe *cqe; 3877 struct hns_roce_wq *wq; 3878 int is_send; 3879 u16 wqe_idx; 3880 int ret; 3881 3882 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index); 3883 if (!cqe) 3884 return -EAGAIN; 3885 3886 ++hr_cq->cons_index; 3887 /* Memory barrier */ 3888 rmb(); 3889 3890 ret = get_cur_qp(hr_cq, cqe, &qp); 3891 if (ret) 3892 return ret; 3893 3894 wc->qp = &qp->ibqp; 3895 wc->vendor_err = 0; 3896 3897 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX); 3898 3899 is_send = !hr_reg_read(cqe, CQE_S_R); 3900 if (is_send) { 3901 wq = &qp->sq; 3902 3903 /* If sg_signal_bit is set, tail pointer will be updated to 3904 * the WQE corresponding to the current CQE. 3905 */ 3906 if (qp->sq_signal_bits) 3907 wq->tail += (wqe_idx - (u16)wq->tail) & 3908 (wq->wqe_cnt - 1); 3909 3910 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3911 ++wq->tail; 3912 3913 fill_send_wc(wc, cqe); 3914 } else { 3915 if (qp->ibqp.srq) { 3916 srq = to_hr_srq(qp->ibqp.srq); 3917 wc->wr_id = srq->wrid[wqe_idx]; 3918 hns_roce_free_srq_wqe(srq, wqe_idx); 3919 } else { 3920 wq = &qp->rq; 3921 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3922 ++wq->tail; 3923 } 3924 3925 ret = fill_recv_wc(wc, cqe); 3926 } 3927 3928 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc); 3929 if (unlikely(wc->status != IB_WC_SUCCESS)) 3930 return 0; 3931 3932 return ret; 3933 } 3934 3935 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, 3936 struct ib_wc *wc) 3937 { 3938 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); 3939 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 3940 struct hns_roce_qp *cur_qp = NULL; 3941 unsigned long flags; 3942 int npolled; 3943 3944 spin_lock_irqsave(&hr_cq->lock, flags); 3945 3946 /* 3947 * When the device starts to reset, the state is RST_DOWN. At this time, 3948 * there may still be some valid CQEs in the hardware that are not 3949 * polled. Therefore, it is not allowed to switch to the software mode 3950 * immediately. When the state changes to UNINIT, CQE no longer exists 3951 * in the hardware, and then switch to software mode. 3952 */ 3953 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) { 3954 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc); 3955 goto out; 3956 } 3957 3958 for (npolled = 0; npolled < num_entries; ++npolled) { 3959 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled)) 3960 break; 3961 } 3962 3963 if (npolled) 3964 update_cq_db(hr_dev, hr_cq); 3965 3966 out: 3967 spin_unlock_irqrestore(&hr_cq->lock, flags); 3968 3969 return npolled; 3970 } 3971 3972 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, 3973 u32 step_idx, u8 *mbox_cmd) 3974 { 3975 u8 cmd; 3976 3977 switch (type) { 3978 case HEM_TYPE_QPC: 3979 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0; 3980 break; 3981 case HEM_TYPE_MTPT: 3982 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0; 3983 break; 3984 case HEM_TYPE_CQC: 3985 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0; 3986 break; 3987 case HEM_TYPE_SRQC: 3988 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0; 3989 break; 3990 case HEM_TYPE_SCCC: 3991 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0; 3992 break; 3993 case HEM_TYPE_QPC_TIMER: 3994 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; 3995 break; 3996 case HEM_TYPE_CQC_TIMER: 3997 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; 3998 break; 3999 default: 4000 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); 4001 return -EINVAL; 4002 } 4003 4004 *mbox_cmd = cmd + step_idx; 4005 4006 return 0; 4007 } 4008 4009 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, 4010 dma_addr_t base_addr) 4011 { 4012 struct hns_roce_cmq_desc desc; 4013 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 4014 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz); 4015 u64 addr = to_hr_hw_page_addr(base_addr); 4016 4017 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false); 4018 4019 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr)); 4020 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr)); 4021 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 4022 4023 return hns_roce_cmq_send(hr_dev, &desc, 1); 4024 } 4025 4026 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, 4027 dma_addr_t base_addr, u32 hem_type, u32 step_idx) 4028 { 4029 int ret; 4030 u8 cmd; 4031 4032 if (unlikely(hem_type == HEM_TYPE_GMV)) 4033 return config_gmv_ba_to_hw(hr_dev, obj, base_addr); 4034 4035 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx)) 4036 return 0; 4037 4038 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd); 4039 if (ret < 0) 4040 return ret; 4041 4042 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj); 4043 } 4044 4045 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, 4046 struct hns_roce_hem_table *table, int obj, 4047 u32 step_idx) 4048 { 4049 struct hns_roce_hem_iter iter; 4050 struct hns_roce_hem_mhop mhop; 4051 struct hns_roce_hem *hem; 4052 unsigned long mhop_obj = obj; 4053 int i, j, k; 4054 int ret = 0; 4055 u64 hem_idx = 0; 4056 u64 l1_idx = 0; 4057 u64 bt_ba = 0; 4058 u32 chunk_ba_num; 4059 u32 hop_num; 4060 4061 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) 4062 return 0; 4063 4064 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); 4065 i = mhop.l0_idx; 4066 j = mhop.l1_idx; 4067 k = mhop.l2_idx; 4068 hop_num = mhop.hop_num; 4069 chunk_ba_num = mhop.bt_chunk_size / 8; 4070 4071 if (hop_num == 2) { 4072 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + 4073 k; 4074 l1_idx = i * chunk_ba_num + j; 4075 } else if (hop_num == 1) { 4076 hem_idx = i * chunk_ba_num + j; 4077 } else if (hop_num == HNS_ROCE_HOP_NUM_0) { 4078 hem_idx = i; 4079 } 4080 4081 if (table->type == HEM_TYPE_SCCC) 4082 obj = mhop.l0_idx; 4083 4084 if (check_whether_last_step(hop_num, step_idx)) { 4085 hem = table->hem[hem_idx]; 4086 for (hns_roce_hem_first(hem, &iter); 4087 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { 4088 bt_ba = hns_roce_hem_addr(&iter); 4089 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, 4090 step_idx); 4091 } 4092 } else { 4093 if (step_idx == 0) 4094 bt_ba = table->bt_l0_dma_addr[i]; 4095 else if (step_idx == 1 && hop_num == 2) 4096 bt_ba = table->bt_l1_dma_addr[l1_idx]; 4097 4098 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx); 4099 } 4100 4101 return ret; 4102 } 4103 4104 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, 4105 struct hns_roce_hem_table *table, 4106 int tag, u32 step_idx) 4107 { 4108 struct hns_roce_cmd_mailbox *mailbox; 4109 struct device *dev = hr_dev->dev; 4110 u8 cmd = 0xff; 4111 int ret; 4112 4113 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) 4114 return 0; 4115 4116 switch (table->type) { 4117 case HEM_TYPE_QPC: 4118 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0; 4119 break; 4120 case HEM_TYPE_MTPT: 4121 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0; 4122 break; 4123 case HEM_TYPE_CQC: 4124 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0; 4125 break; 4126 case HEM_TYPE_SRQC: 4127 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0; 4128 break; 4129 case HEM_TYPE_SCCC: 4130 case HEM_TYPE_QPC_TIMER: 4131 case HEM_TYPE_CQC_TIMER: 4132 case HEM_TYPE_GMV: 4133 return 0; 4134 default: 4135 dev_warn(dev, "table %u not to be destroyed by mailbox!\n", 4136 table->type); 4137 return 0; 4138 } 4139 4140 cmd += step_idx; 4141 4142 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 4143 if (IS_ERR(mailbox)) 4144 return PTR_ERR(mailbox); 4145 4146 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); 4147 4148 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 4149 return ret; 4150 } 4151 4152 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, 4153 struct hns_roce_v2_qp_context *context, 4154 struct hns_roce_v2_qp_context *qpc_mask, 4155 struct hns_roce_qp *hr_qp) 4156 { 4157 struct hns_roce_cmd_mailbox *mailbox; 4158 int qpc_size; 4159 int ret; 4160 4161 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 4162 if (IS_ERR(mailbox)) 4163 return PTR_ERR(mailbox); 4164 4165 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */ 4166 qpc_size = hr_dev->caps.qpc_sz; 4167 memcpy(mailbox->buf, context, qpc_size); 4168 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); 4169 4170 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 4171 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); 4172 4173 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 4174 4175 return ret; 4176 } 4177 4178 static void set_access_flags(struct hns_roce_qp *hr_qp, 4179 struct hns_roce_v2_qp_context *context, 4180 struct hns_roce_v2_qp_context *qpc_mask, 4181 const struct ib_qp_attr *attr, int attr_mask) 4182 { 4183 u8 dest_rd_atomic; 4184 u32 access_flags; 4185 4186 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ? 4187 attr->max_dest_rd_atomic : hr_qp->resp_depth; 4188 4189 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ? 4190 attr->qp_access_flags : hr_qp->atomic_rd_en; 4191 4192 if (!dest_rd_atomic) 4193 access_flags &= IB_ACCESS_REMOTE_WRITE; 4194 4195 hr_reg_write_bool(context, QPC_RRE, 4196 access_flags & IB_ACCESS_REMOTE_READ); 4197 hr_reg_clear(qpc_mask, QPC_RRE); 4198 4199 hr_reg_write_bool(context, QPC_RWE, 4200 access_flags & IB_ACCESS_REMOTE_WRITE); 4201 hr_reg_clear(qpc_mask, QPC_RWE); 4202 4203 hr_reg_write_bool(context, QPC_ATE, 4204 access_flags & IB_ACCESS_REMOTE_ATOMIC); 4205 hr_reg_clear(qpc_mask, QPC_ATE); 4206 hr_reg_write_bool(context, QPC_EXT_ATE, 4207 access_flags & IB_ACCESS_REMOTE_ATOMIC); 4208 hr_reg_clear(qpc_mask, QPC_EXT_ATE); 4209 } 4210 4211 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, 4212 struct hns_roce_v2_qp_context *context, 4213 struct hns_roce_v2_qp_context *qpc_mask) 4214 { 4215 hr_reg_write(context, QPC_SGE_SHIFT, 4216 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, 4217 hr_qp->sge.sge_shift)); 4218 4219 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt)); 4220 4221 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt)); 4222 } 4223 4224 static inline int get_cqn(struct ib_cq *ib_cq) 4225 { 4226 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0; 4227 } 4228 4229 static inline int get_pdn(struct ib_pd *ib_pd) 4230 { 4231 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0; 4232 } 4233 4234 static void modify_qp_reset_to_init(struct ib_qp *ibqp, 4235 const struct ib_qp_attr *attr, 4236 struct hns_roce_v2_qp_context *context, 4237 struct hns_roce_v2_qp_context *qpc_mask) 4238 { 4239 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4240 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4241 4242 /* 4243 * In v2 engine, software pass context and context mask to hardware 4244 * when modifying qp. If software need modify some fields in context, 4245 * we should set all bits of the relevant fields in context mask to 4246 * 0 at the same time, else set them to 0x1. 4247 */ 4248 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); 4249 4250 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); 4251 4252 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs)); 4253 4254 set_qpc_wqe_cnt(hr_qp, context, qpc_mask); 4255 4256 /* No VLAN need to set 0xFFF */ 4257 hr_reg_write(context, QPC_VLAN_ID, 0xfff); 4258 4259 if (ibqp->qp_type == IB_QPT_XRC_TGT) { 4260 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn); 4261 4262 hr_reg_enable(context, QPC_XRC_QP_TYPE); 4263 } 4264 4265 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 4266 hr_reg_enable(context, QPC_RQ_RECORD_EN); 4267 4268 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 4269 hr_reg_enable(context, QPC_OWNER_MODE); 4270 4271 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L, 4272 lower_32_bits(hr_qp->rdb.dma) >> 1); 4273 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H, 4274 upper_32_bits(hr_qp->rdb.dma)); 4275 4276 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); 4277 4278 if (ibqp->srq) { 4279 hr_reg_enable(context, QPC_SRQ_EN); 4280 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); 4281 } 4282 4283 hr_reg_enable(context, QPC_FRE); 4284 4285 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); 4286 4287 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ) 4288 return; 4289 4290 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) 4291 hr_reg_enable(&context->ext, QPCEX_STASH); 4292 } 4293 4294 static void modify_qp_init_to_init(struct ib_qp *ibqp, 4295 const struct ib_qp_attr *attr, 4296 struct hns_roce_v2_qp_context *context, 4297 struct hns_roce_v2_qp_context *qpc_mask) 4298 { 4299 /* 4300 * In v2 engine, software pass context and context mask to hardware 4301 * when modifying qp. If software need modify some fields in context, 4302 * we should set all bits of the relevant fields in context mask to 4303 * 0 at the same time, else set them to 0x1. 4304 */ 4305 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); 4306 hr_reg_clear(qpc_mask, QPC_TST); 4307 4308 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); 4309 hr_reg_clear(qpc_mask, QPC_PD); 4310 4311 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); 4312 hr_reg_clear(qpc_mask, QPC_RX_CQN); 4313 4314 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); 4315 hr_reg_clear(qpc_mask, QPC_TX_CQN); 4316 4317 if (ibqp->srq) { 4318 hr_reg_enable(context, QPC_SRQ_EN); 4319 hr_reg_clear(qpc_mask, QPC_SRQ_EN); 4320 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); 4321 hr_reg_clear(qpc_mask, QPC_SRQN); 4322 } 4323 } 4324 4325 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, 4326 struct hns_roce_qp *hr_qp, 4327 struct hns_roce_v2_qp_context *context, 4328 struct hns_roce_v2_qp_context *qpc_mask) 4329 { 4330 u64 mtts[MTT_MIN_COUNT] = { 0 }; 4331 u64 wqe_sge_ba; 4332 int count; 4333 4334 /* Search qp buf's mtts */ 4335 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, 4336 MTT_MIN_COUNT, &wqe_sge_ba); 4337 if (hr_qp->rq.wqe_cnt && count < 1) { 4338 ibdev_err(&hr_dev->ib_dev, 4339 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn); 4340 return -EINVAL; 4341 } 4342 4343 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); 4344 qpc_mask->wqe_sge_ba = 0; 4345 4346 /* 4347 * In v2 engine, software pass context and context mask to hardware 4348 * when modifying qp. If software need modify some fields in context, 4349 * we should set all bits of the relevant fields in context mask to 4350 * 0 at the same time, else set them to 0x1. 4351 */ 4352 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3)); 4353 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H); 4354 4355 hr_reg_write(context, QPC_SQ_HOP_NUM, 4356 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num, 4357 hr_qp->sq.wqe_cnt)); 4358 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM); 4359 4360 hr_reg_write(context, QPC_SGE_HOP_NUM, 4361 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num, 4362 hr_qp->sge.sge_cnt)); 4363 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM); 4364 4365 hr_reg_write(context, QPC_RQ_HOP_NUM, 4366 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num, 4367 hr_qp->rq.wqe_cnt)); 4368 4369 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM); 4370 4371 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ, 4372 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); 4373 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ); 4374 4375 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ, 4376 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); 4377 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ); 4378 4379 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); 4380 qpc_mask->rq_cur_blk_addr = 0; 4381 4382 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H, 4383 upper_32_bits(to_hr_hw_page_addr(mtts[0]))); 4384 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H); 4385 4386 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); 4387 qpc_mask->rq_nxt_blk_addr = 0; 4388 4389 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, 4390 upper_32_bits(to_hr_hw_page_addr(mtts[1]))); 4391 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); 4392 4393 return 0; 4394 } 4395 4396 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, 4397 struct hns_roce_qp *hr_qp, 4398 struct hns_roce_v2_qp_context *context, 4399 struct hns_roce_v2_qp_context *qpc_mask) 4400 { 4401 struct ib_device *ibdev = &hr_dev->ib_dev; 4402 u64 sge_cur_blk = 0; 4403 u64 sq_cur_blk = 0; 4404 int count; 4405 4406 /* search qp buf's mtts */ 4407 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); 4408 if (count < 1) { 4409 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n", 4410 hr_qp->qpn); 4411 return -EINVAL; 4412 } 4413 if (hr_qp->sge.sge_cnt > 0) { 4414 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 4415 hr_qp->sge.offset, 4416 &sge_cur_blk, 1, NULL); 4417 if (count < 1) { 4418 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", 4419 hr_qp->qpn); 4420 return -EINVAL; 4421 } 4422 } 4423 4424 /* 4425 * In v2 engine, software pass context and context mask to hardware 4426 * when modifying qp. If software need modify some fields in context, 4427 * we should set all bits of the relevant fields in context mask to 4428 * 0 at the same time, else set them to 0x1. 4429 */ 4430 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L, 4431 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4432 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H, 4433 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4434 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L); 4435 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H); 4436 4437 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L, 4438 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk))); 4439 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H, 4440 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk))); 4441 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L); 4442 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H); 4443 4444 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L, 4445 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4446 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H, 4447 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4448 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L); 4449 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H); 4450 4451 return 0; 4452 } 4453 4454 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, 4455 const struct ib_qp_attr *attr) 4456 { 4457 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) 4458 return IB_MTU_4096; 4459 4460 return attr->path_mtu; 4461 } 4462 4463 static int modify_qp_init_to_rtr(struct ib_qp *ibqp, 4464 const struct ib_qp_attr *attr, int attr_mask, 4465 struct hns_roce_v2_qp_context *context, 4466 struct hns_roce_v2_qp_context *qpc_mask, 4467 struct ib_udata *udata) 4468 { 4469 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, 4470 struct hns_roce_ucontext, ibucontext); 4471 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4472 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4473 struct ib_device *ibdev = &hr_dev->ib_dev; 4474 dma_addr_t trrl_ba; 4475 dma_addr_t irrl_ba; 4476 enum ib_mtu ib_mtu; 4477 const u8 *smac; 4478 u8 lp_pktn_ini; 4479 u64 *mtts; 4480 u8 *dmac; 4481 u32 port; 4482 int mtu; 4483 int ret; 4484 4485 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask); 4486 if (ret) { 4487 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret); 4488 return ret; 4489 } 4490 4491 /* Search IRRL's mtts */ 4492 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, 4493 hr_qp->qpn, &irrl_ba); 4494 if (!mtts) { 4495 ibdev_err(ibdev, "failed to find qp irrl_table.\n"); 4496 return -EINVAL; 4497 } 4498 4499 /* Search TRRL's mtts */ 4500 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, 4501 hr_qp->qpn, &trrl_ba); 4502 if (!mtts) { 4503 ibdev_err(ibdev, "failed to find qp trrl_table.\n"); 4504 return -EINVAL; 4505 } 4506 4507 if (attr_mask & IB_QP_ALT_PATH) { 4508 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n", 4509 attr_mask); 4510 return -EINVAL; 4511 } 4512 4513 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4); 4514 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L); 4515 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4)); 4516 qpc_mask->trrl_ba = 0; 4517 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4)); 4518 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H); 4519 4520 context->irrl_ba = cpu_to_le32(irrl_ba >> 6); 4521 qpc_mask->irrl_ba = 0; 4522 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6)); 4523 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H); 4524 4525 hr_reg_enable(context, QPC_RMT_E2E); 4526 hr_reg_clear(qpc_mask, QPC_RMT_E2E); 4527 4528 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits); 4529 hr_reg_clear(qpc_mask, QPC_SIG_TYPE); 4530 4531 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; 4532 4533 smac = (const u8 *)hr_dev->dev_addr[port]; 4534 dmac = (u8 *)attr->ah_attr.roce.dmac; 4535 /* when dmac equals smac or loop_idc is 1, it should loopback */ 4536 if (ether_addr_equal_unaligned(dmac, smac) || 4537 hr_dev->loop_idc == 0x1) { 4538 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc); 4539 hr_reg_clear(qpc_mask, QPC_LBI); 4540 } 4541 4542 if (attr_mask & IB_QP_DEST_QPN) { 4543 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num); 4544 hr_reg_clear(qpc_mask, QPC_DQPN); 4545 } 4546 4547 memcpy(&context->dmac, dmac, sizeof(u32)); 4548 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4]))); 4549 qpc_mask->dmac = 0; 4550 hr_reg_clear(qpc_mask, QPC_DMAC_H); 4551 4552 ib_mtu = get_mtu(ibqp, attr); 4553 hr_qp->path_mtu = ib_mtu; 4554 4555 mtu = ib_mtu_enum_to_int(ib_mtu); 4556 if (WARN_ON(mtu <= 0)) 4557 return -EINVAL; 4558 #define MIN_LP_MSG_LEN 1024 4559 /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ 4560 lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); 4561 4562 if (attr_mask & IB_QP_PATH_MTU) { 4563 hr_reg_write(context, QPC_MTU, ib_mtu); 4564 hr_reg_clear(qpc_mask, QPC_MTU); 4565 } 4566 4567 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini); 4568 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI); 4569 4570 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ 4571 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini); 4572 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ); 4573 4574 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR); 4575 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN); 4576 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE); 4577 4578 context->rq_rnr_timer = 0; 4579 qpc_mask->rq_rnr_timer = 0; 4580 4581 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX); 4582 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX); 4583 4584 /* rocee send 2^lp_sgen_ini segs every time */ 4585 hr_reg_write(context, QPC_LP_SGEN_INI, 3); 4586 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI); 4587 4588 if (udata && ibqp->qp_type == IB_QPT_RC && 4589 (uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) { 4590 hr_reg_write_bool(context, QPC_RQIE, 4591 hr_dev->caps.flags & 4592 HNS_ROCE_CAP_FLAG_RQ_INLINE); 4593 hr_reg_clear(qpc_mask, QPC_RQIE); 4594 } 4595 4596 if (udata && 4597 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) && 4598 (uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) { 4599 hr_reg_write_bool(context, QPC_CQEIE, 4600 hr_dev->caps.flags & 4601 HNS_ROCE_CAP_FLAG_CQE_INLINE); 4602 hr_reg_clear(qpc_mask, QPC_CQEIE); 4603 4604 hr_reg_write(context, QPC_CQEIS, 0); 4605 hr_reg_clear(qpc_mask, QPC_CQEIS); 4606 } 4607 4608 return 0; 4609 } 4610 4611 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, 4612 const struct ib_qp_attr *attr, int attr_mask, 4613 struct hns_roce_v2_qp_context *context, 4614 struct hns_roce_v2_qp_context *qpc_mask) 4615 { 4616 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4617 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4618 struct ib_device *ibdev = &hr_dev->ib_dev; 4619 int ret; 4620 4621 /* Not support alternate path and path migration */ 4622 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) { 4623 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); 4624 return -EINVAL; 4625 } 4626 4627 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask); 4628 if (ret) { 4629 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret); 4630 return ret; 4631 } 4632 4633 /* 4634 * Set some fields in context to zero, Because the default values 4635 * of all fields in context are zero, we need not set them to 0 again. 4636 * but we should set the relevant fields of context mask to 0. 4637 */ 4638 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX); 4639 4640 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN); 4641 4642 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE); 4643 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD); 4644 hr_reg_clear(qpc_mask, QPC_IRRL_PSN); 4645 4646 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL); 4647 4648 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN); 4649 4650 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG); 4651 4652 hr_reg_clear(qpc_mask, QPC_CHECK_FLG); 4653 4654 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD); 4655 4656 return 0; 4657 } 4658 4659 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 4660 u32 *dip_idx) 4661 { 4662 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4663 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4664 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx; 4665 u32 *head = &hr_dev->qp_table.idx_table.head; 4666 u32 *tail = &hr_dev->qp_table.idx_table.tail; 4667 struct hns_roce_dip *hr_dip; 4668 unsigned long flags; 4669 int ret = 0; 4670 4671 spin_lock_irqsave(&hr_dev->dip_list_lock, flags); 4672 4673 spare_idx[*tail] = ibqp->qp_num; 4674 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1); 4675 4676 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) { 4677 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) { 4678 *dip_idx = hr_dip->dip_idx; 4679 goto out; 4680 } 4681 } 4682 4683 /* If no dgid is found, a new dip and a mapping between dgid and 4684 * dip_idx will be created. 4685 */ 4686 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC); 4687 if (!hr_dip) { 4688 ret = -ENOMEM; 4689 goto out; 4690 } 4691 4692 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); 4693 hr_dip->dip_idx = *dip_idx = spare_idx[*head]; 4694 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1); 4695 list_add_tail(&hr_dip->node, &hr_dev->dip_list); 4696 4697 out: 4698 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); 4699 return ret; 4700 } 4701 4702 enum { 4703 CONG_DCQCN, 4704 CONG_WINDOW, 4705 }; 4706 4707 enum { 4708 UNSUPPORT_CONG_LEVEL, 4709 SUPPORT_CONG_LEVEL, 4710 }; 4711 4712 enum { 4713 CONG_LDCP, 4714 CONG_HC3, 4715 }; 4716 4717 enum { 4718 DIP_INVALID, 4719 DIP_VALID, 4720 }; 4721 4722 enum { 4723 WND_LIMIT, 4724 WND_UNLIMIT, 4725 }; 4726 4727 static int check_cong_type(struct ib_qp *ibqp, 4728 struct hns_roce_congestion_algorithm *cong_alg) 4729 { 4730 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4731 4732 if (ibqp->qp_type == IB_QPT_UD) 4733 hr_dev->caps.cong_type = CONG_TYPE_DCQCN; 4734 4735 /* different congestion types match different configurations */ 4736 switch (hr_dev->caps.cong_type) { 4737 case CONG_TYPE_DCQCN: 4738 cong_alg->alg_sel = CONG_DCQCN; 4739 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; 4740 cong_alg->dip_vld = DIP_INVALID; 4741 cong_alg->wnd_mode_sel = WND_LIMIT; 4742 break; 4743 case CONG_TYPE_LDCP: 4744 cong_alg->alg_sel = CONG_WINDOW; 4745 cong_alg->alg_sub_sel = CONG_LDCP; 4746 cong_alg->dip_vld = DIP_INVALID; 4747 cong_alg->wnd_mode_sel = WND_UNLIMIT; 4748 break; 4749 case CONG_TYPE_HC3: 4750 cong_alg->alg_sel = CONG_WINDOW; 4751 cong_alg->alg_sub_sel = CONG_HC3; 4752 cong_alg->dip_vld = DIP_INVALID; 4753 cong_alg->wnd_mode_sel = WND_LIMIT; 4754 break; 4755 case CONG_TYPE_DIP: 4756 cong_alg->alg_sel = CONG_DCQCN; 4757 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; 4758 cong_alg->dip_vld = DIP_VALID; 4759 cong_alg->wnd_mode_sel = WND_LIMIT; 4760 break; 4761 default: 4762 ibdev_warn(&hr_dev->ib_dev, 4763 "invalid type(%u) for congestion selection.\n", 4764 hr_dev->caps.cong_type); 4765 hr_dev->caps.cong_type = CONG_TYPE_DCQCN; 4766 cong_alg->alg_sel = CONG_DCQCN; 4767 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; 4768 cong_alg->dip_vld = DIP_INVALID; 4769 cong_alg->wnd_mode_sel = WND_LIMIT; 4770 break; 4771 } 4772 4773 return 0; 4774 } 4775 4776 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 4777 struct hns_roce_v2_qp_context *context, 4778 struct hns_roce_v2_qp_context *qpc_mask) 4779 { 4780 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4781 struct hns_roce_congestion_algorithm cong_field; 4782 struct ib_device *ibdev = ibqp->device; 4783 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 4784 u32 dip_idx = 0; 4785 int ret; 4786 4787 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 || 4788 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE) 4789 return 0; 4790 4791 ret = check_cong_type(ibqp, &cong_field); 4792 if (ret) 4793 return ret; 4794 4795 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id + 4796 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE); 4797 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID); 4798 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel); 4799 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL); 4800 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL, 4801 cong_field.alg_sub_sel); 4802 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL); 4803 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld); 4804 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD); 4805 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN, 4806 cong_field.wnd_mode_sel); 4807 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN); 4808 4809 /* if dip is disabled, there is no need to set dip idx */ 4810 if (cong_field.dip_vld == 0) 4811 return 0; 4812 4813 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx); 4814 if (ret) { 4815 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret); 4816 return ret; 4817 } 4818 4819 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx); 4820 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0); 4821 4822 return 0; 4823 } 4824 4825 static int hns_roce_v2_set_path(struct ib_qp *ibqp, 4826 const struct ib_qp_attr *attr, 4827 int attr_mask, 4828 struct hns_roce_v2_qp_context *context, 4829 struct hns_roce_v2_qp_context *qpc_mask) 4830 { 4831 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4832 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4833 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4834 struct ib_device *ibdev = &hr_dev->ib_dev; 4835 const struct ib_gid_attr *gid_attr = NULL; 4836 u8 sl = rdma_ah_get_sl(&attr->ah_attr); 4837 int is_roce_protocol; 4838 u16 vlan_id = 0xffff; 4839 bool is_udp = false; 4840 u32 max_sl; 4841 u8 ib_port; 4842 u8 hr_port; 4843 int ret; 4844 4845 max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1); 4846 if (unlikely(sl > max_sl)) { 4847 ibdev_err_ratelimited(ibdev, 4848 "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n", 4849 sl, max_sl); 4850 return -EINVAL; 4851 } 4852 4853 /* 4854 * If free_mr_en of qp is set, it means that this qp comes from 4855 * free mr. This qp will perform the loopback operation. 4856 * In the loopback scenario, only sl needs to be set. 4857 */ 4858 if (hr_qp->free_mr_en) { 4859 hr_reg_write(context, QPC_SL, sl); 4860 hr_reg_clear(qpc_mask, QPC_SL); 4861 hr_qp->sl = sl; 4862 return 0; 4863 } 4864 4865 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; 4866 hr_port = ib_port - 1; 4867 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && 4868 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; 4869 4870 if (is_roce_protocol) { 4871 gid_attr = attr->ah_attr.grh.sgid_attr; 4872 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); 4873 if (ret) 4874 return ret; 4875 4876 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); 4877 } 4878 4879 /* Only HIP08 needs to set the vlan_en bits in QPC */ 4880 if (vlan_id < VLAN_N_VID && 4881 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 4882 hr_reg_enable(context, QPC_RQ_VLAN_EN); 4883 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN); 4884 hr_reg_enable(context, QPC_SQ_VLAN_EN); 4885 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN); 4886 } 4887 4888 hr_reg_write(context, QPC_VLAN_ID, vlan_id); 4889 hr_reg_clear(qpc_mask, QPC_VLAN_ID); 4890 4891 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { 4892 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n", 4893 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); 4894 return -EINVAL; 4895 } 4896 4897 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { 4898 ibdev_err(ibdev, "ah attr is not RDMA roce type\n"); 4899 return -EINVAL; 4900 } 4901 4902 hr_reg_write(context, QPC_UDPSPN, 4903 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, 4904 attr->dest_qp_num) : 4905 0); 4906 4907 hr_reg_clear(qpc_mask, QPC_UDPSPN); 4908 4909 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index); 4910 4911 hr_reg_clear(qpc_mask, QPC_GMV_IDX); 4912 4913 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit); 4914 hr_reg_clear(qpc_mask, QPC_HOPLIMIT); 4915 4916 ret = fill_cong_field(ibqp, attr, context, qpc_mask); 4917 if (ret) 4918 return ret; 4919 4920 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh)); 4921 hr_reg_clear(qpc_mask, QPC_TC); 4922 4923 hr_reg_write(context, QPC_FL, grh->flow_label); 4924 hr_reg_clear(qpc_mask, QPC_FL); 4925 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); 4926 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); 4927 4928 hr_qp->sl = sl; 4929 hr_reg_write(context, QPC_SL, hr_qp->sl); 4930 hr_reg_clear(qpc_mask, QPC_SL); 4931 4932 return 0; 4933 } 4934 4935 static bool check_qp_state(enum ib_qp_state cur_state, 4936 enum ib_qp_state new_state) 4937 { 4938 static const bool sm[][IB_QPS_ERR + 1] = { 4939 [IB_QPS_RESET] = { [IB_QPS_RESET] = true, 4940 [IB_QPS_INIT] = true }, 4941 [IB_QPS_INIT] = { [IB_QPS_RESET] = true, 4942 [IB_QPS_INIT] = true, 4943 [IB_QPS_RTR] = true, 4944 [IB_QPS_ERR] = true }, 4945 [IB_QPS_RTR] = { [IB_QPS_RESET] = true, 4946 [IB_QPS_RTS] = true, 4947 [IB_QPS_ERR] = true }, 4948 [IB_QPS_RTS] = { [IB_QPS_RESET] = true, 4949 [IB_QPS_RTS] = true, 4950 [IB_QPS_ERR] = true }, 4951 [IB_QPS_SQD] = {}, 4952 [IB_QPS_SQE] = {}, 4953 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, 4954 [IB_QPS_ERR] = true } 4955 }; 4956 4957 return sm[cur_state][new_state]; 4958 } 4959 4960 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, 4961 const struct ib_qp_attr *attr, 4962 int attr_mask, 4963 enum ib_qp_state cur_state, 4964 enum ib_qp_state new_state, 4965 struct hns_roce_v2_qp_context *context, 4966 struct hns_roce_v2_qp_context *qpc_mask, 4967 struct ib_udata *udata) 4968 { 4969 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4970 int ret = 0; 4971 4972 if (!check_qp_state(cur_state, new_state)) { 4973 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); 4974 return -EINVAL; 4975 } 4976 4977 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 4978 memset(qpc_mask, 0, hr_dev->caps.qpc_sz); 4979 modify_qp_reset_to_init(ibqp, attr, context, qpc_mask); 4980 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 4981 modify_qp_init_to_init(ibqp, attr, context, qpc_mask); 4982 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 4983 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, 4984 qpc_mask, udata); 4985 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 4986 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context, 4987 qpc_mask); 4988 } 4989 4990 return ret; 4991 } 4992 4993 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) 4994 { 4995 #define QP_ACK_TIMEOUT_MAX_HIP08 20 4996 #define QP_ACK_TIMEOUT_MAX 31 4997 4998 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 4999 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { 5000 ibdev_warn(&hr_dev->ib_dev, 5001 "local ACK timeout shall be 0 to 20.\n"); 5002 return false; 5003 } 5004 *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; 5005 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { 5006 if (*timeout > QP_ACK_TIMEOUT_MAX) { 5007 ibdev_warn(&hr_dev->ib_dev, 5008 "local ACK timeout shall be 0 to 31.\n"); 5009 return false; 5010 } 5011 } 5012 5013 return true; 5014 } 5015 5016 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, 5017 const struct ib_qp_attr *attr, 5018 int attr_mask, 5019 struct hns_roce_v2_qp_context *context, 5020 struct hns_roce_v2_qp_context *qpc_mask) 5021 { 5022 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5023 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5024 int ret = 0; 5025 u8 timeout; 5026 5027 if (attr_mask & IB_QP_AV) { 5028 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, 5029 qpc_mask); 5030 if (ret) 5031 return ret; 5032 } 5033 5034 if (attr_mask & IB_QP_TIMEOUT) { 5035 timeout = attr->timeout; 5036 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) { 5037 hr_reg_write(context, QPC_AT, timeout); 5038 hr_reg_clear(qpc_mask, QPC_AT); 5039 } 5040 } 5041 5042 if (attr_mask & IB_QP_RETRY_CNT) { 5043 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt); 5044 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT); 5045 5046 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt); 5047 hr_reg_clear(qpc_mask, QPC_RETRY_CNT); 5048 } 5049 5050 if (attr_mask & IB_QP_RNR_RETRY) { 5051 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry); 5052 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT); 5053 5054 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry); 5055 hr_reg_clear(qpc_mask, QPC_RNR_CNT); 5056 } 5057 5058 if (attr_mask & IB_QP_SQ_PSN) { 5059 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn); 5060 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN); 5061 5062 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn); 5063 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN); 5064 5065 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn); 5066 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L); 5067 5068 hr_reg_write(context, QPC_RETRY_MSG_PSN_H, 5069 attr->sq_psn >> RETRY_MSG_PSN_SHIFT); 5070 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H); 5071 5072 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn); 5073 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN); 5074 5075 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn); 5076 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN); 5077 } 5078 5079 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && 5080 attr->max_dest_rd_atomic) { 5081 hr_reg_write(context, QPC_RR_MAX, 5082 fls(attr->max_dest_rd_atomic - 1)); 5083 hr_reg_clear(qpc_mask, QPC_RR_MAX); 5084 } 5085 5086 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { 5087 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1)); 5088 hr_reg_clear(qpc_mask, QPC_SR_MAX); 5089 } 5090 5091 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) 5092 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); 5093 5094 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 5095 hr_reg_write(context, QPC_MIN_RNR_TIME, 5096 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? 5097 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer); 5098 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME); 5099 } 5100 5101 if (attr_mask & IB_QP_RQ_PSN) { 5102 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn); 5103 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN); 5104 5105 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1); 5106 hr_reg_clear(qpc_mask, QPC_RAQ_PSN); 5107 } 5108 5109 if (attr_mask & IB_QP_QKEY) { 5110 context->qkey_xrcd = cpu_to_le32(attr->qkey); 5111 qpc_mask->qkey_xrcd = 0; 5112 hr_qp->qkey = attr->qkey; 5113 } 5114 5115 return ret; 5116 } 5117 5118 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, 5119 const struct ib_qp_attr *attr, 5120 int attr_mask) 5121 { 5122 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5123 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5124 5125 if (attr_mask & IB_QP_ACCESS_FLAGS) 5126 hr_qp->atomic_rd_en = attr->qp_access_flags; 5127 5128 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 5129 hr_qp->resp_depth = attr->max_dest_rd_atomic; 5130 if (attr_mask & IB_QP_PORT) { 5131 hr_qp->port = attr->port_num - 1; 5132 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 5133 } 5134 } 5135 5136 static void clear_qp(struct hns_roce_qp *hr_qp) 5137 { 5138 struct ib_qp *ibqp = &hr_qp->ibqp; 5139 5140 if (ibqp->send_cq) 5141 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), 5142 hr_qp->qpn, NULL); 5143 5144 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) 5145 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), 5146 hr_qp->qpn, ibqp->srq ? 5147 to_hr_srq(ibqp->srq) : NULL); 5148 5149 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 5150 *hr_qp->rdb.db_record = 0; 5151 5152 hr_qp->rq.head = 0; 5153 hr_qp->rq.tail = 0; 5154 hr_qp->sq.head = 0; 5155 hr_qp->sq.tail = 0; 5156 hr_qp->next_sge = 0; 5157 } 5158 5159 static void v2_set_flushed_fields(struct ib_qp *ibqp, 5160 struct hns_roce_v2_qp_context *context, 5161 struct hns_roce_v2_qp_context *qpc_mask) 5162 { 5163 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5164 unsigned long sq_flag = 0; 5165 unsigned long rq_flag = 0; 5166 5167 if (ibqp->qp_type == IB_QPT_XRC_TGT) 5168 return; 5169 5170 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); 5171 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head); 5172 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX); 5173 hr_qp->state = IB_QPS_ERR; 5174 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); 5175 5176 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */ 5177 return; 5178 5179 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); 5180 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head); 5181 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX); 5182 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); 5183 } 5184 5185 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, 5186 const struct ib_qp_attr *attr, 5187 int attr_mask, enum ib_qp_state cur_state, 5188 enum ib_qp_state new_state, struct ib_udata *udata) 5189 { 5190 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5191 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5192 struct hns_roce_v2_qp_context ctx[2]; 5193 struct hns_roce_v2_qp_context *context = ctx; 5194 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; 5195 struct ib_device *ibdev = &hr_dev->ib_dev; 5196 int ret; 5197 5198 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 5199 return -EOPNOTSUPP; 5200 5201 /* 5202 * In v2 engine, software pass context and context mask to hardware 5203 * when modifying qp. If software need modify some fields in context, 5204 * we should set all bits of the relevant fields in context mask to 5205 * 0 at the same time, else set them to 0x1. 5206 */ 5207 memset(context, 0, hr_dev->caps.qpc_sz); 5208 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); 5209 5210 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, 5211 new_state, context, qpc_mask, udata); 5212 if (ret) 5213 goto out; 5214 5215 /* When QP state is err, SQ and RQ WQE should be flushed */ 5216 if (new_state == IB_QPS_ERR) 5217 v2_set_flushed_fields(ibqp, context, qpc_mask); 5218 5219 /* Configure the optional fields */ 5220 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, 5221 qpc_mask); 5222 if (ret) 5223 goto out; 5224 5225 hr_reg_write_bool(context, QPC_INV_CREDIT, 5226 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC || 5227 ibqp->srq); 5228 hr_reg_clear(qpc_mask, QPC_INV_CREDIT); 5229 5230 /* Every status migrate must change state */ 5231 hr_reg_write(context, QPC_QP_ST, new_state); 5232 hr_reg_clear(qpc_mask, QPC_QP_ST); 5233 5234 /* SW pass context to HW */ 5235 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); 5236 if (ret) { 5237 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret); 5238 goto out; 5239 } 5240 5241 hr_qp->state = new_state; 5242 5243 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); 5244 5245 if (new_state == IB_QPS_RESET && !ibqp->uobject) 5246 clear_qp(hr_qp); 5247 5248 out: 5249 return ret; 5250 } 5251 5252 static int to_ib_qp_st(enum hns_roce_v2_qp_state state) 5253 { 5254 static const enum ib_qp_state map[] = { 5255 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET, 5256 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT, 5257 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR, 5258 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS, 5259 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD, 5260 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE, 5261 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR, 5262 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD 5263 }; 5264 5265 return (state < ARRAY_SIZE(map)) ? map[state] : -1; 5266 } 5267 5268 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn, 5269 void *buffer) 5270 { 5271 struct hns_roce_cmd_mailbox *mailbox; 5272 int ret; 5273 5274 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5275 if (IS_ERR(mailbox)) 5276 return PTR_ERR(mailbox); 5277 5278 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, 5279 qpn); 5280 if (ret) 5281 goto out; 5282 5283 memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz); 5284 5285 out: 5286 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5287 return ret; 5288 } 5289 5290 static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, 5291 struct hns_roce_v2_qp_context *context) 5292 { 5293 u8 timeout; 5294 5295 timeout = (u8)hr_reg_read(context, QPC_AT); 5296 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 5297 timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; 5298 5299 return timeout; 5300 } 5301 5302 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 5303 int qp_attr_mask, 5304 struct ib_qp_init_attr *qp_init_attr) 5305 { 5306 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5307 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5308 struct hns_roce_v2_qp_context context = {}; 5309 struct ib_device *ibdev = &hr_dev->ib_dev; 5310 int tmp_qp_state; 5311 int state; 5312 int ret; 5313 5314 memset(qp_attr, 0, sizeof(*qp_attr)); 5315 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 5316 5317 mutex_lock(&hr_qp->mutex); 5318 5319 if (hr_qp->state == IB_QPS_RESET) { 5320 qp_attr->qp_state = IB_QPS_RESET; 5321 ret = 0; 5322 goto done; 5323 } 5324 5325 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context); 5326 if (ret) { 5327 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); 5328 ret = -EINVAL; 5329 goto out; 5330 } 5331 5332 state = hr_reg_read(&context, QPC_QP_ST); 5333 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); 5334 if (tmp_qp_state == -1) { 5335 ibdev_err(ibdev, "Illegal ib_qp_state\n"); 5336 ret = -EINVAL; 5337 goto out; 5338 } 5339 hr_qp->state = (u8)tmp_qp_state; 5340 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; 5341 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU); 5342 qp_attr->path_mig_state = IB_MIG_ARMED; 5343 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 5344 if (hr_qp->ibqp.qp_type == IB_QPT_UD) 5345 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); 5346 5347 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN); 5348 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN); 5349 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN); 5350 qp_attr->qp_access_flags = 5351 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) | 5352 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) | 5353 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S); 5354 5355 if (hr_qp->ibqp.qp_type == IB_QPT_RC || 5356 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || 5357 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) { 5358 struct ib_global_route *grh = 5359 rdma_ah_retrieve_grh(&qp_attr->ah_attr); 5360 5361 rdma_ah_set_sl(&qp_attr->ah_attr, 5362 hr_reg_read(&context, QPC_SL)); 5363 rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1); 5364 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); 5365 grh->flow_label = hr_reg_read(&context, QPC_FL); 5366 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); 5367 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); 5368 grh->traffic_class = hr_reg_read(&context, QPC_TC); 5369 5370 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); 5371 } 5372 5373 qp_attr->port_num = hr_qp->port + 1; 5374 qp_attr->sq_draining = 0; 5375 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX); 5376 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); 5377 5378 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); 5379 qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context); 5380 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); 5381 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); 5382 5383 done: 5384 qp_attr->cur_qp_state = qp_attr->qp_state; 5385 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; 5386 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 5387 qp_attr->cap.max_inline_data = hr_qp->max_inline_data; 5388 5389 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; 5390 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; 5391 5392 qp_init_attr->qp_context = ibqp->qp_context; 5393 qp_init_attr->qp_type = ibqp->qp_type; 5394 qp_init_attr->recv_cq = ibqp->recv_cq; 5395 qp_init_attr->send_cq = ibqp->send_cq; 5396 qp_init_attr->srq = ibqp->srq; 5397 qp_init_attr->cap = qp_attr->cap; 5398 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; 5399 5400 out: 5401 mutex_unlock(&hr_qp->mutex); 5402 return ret; 5403 } 5404 5405 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp) 5406 { 5407 return ((hr_qp->ibqp.qp_type == IB_QPT_RC || 5408 hr_qp->ibqp.qp_type == IB_QPT_UD || 5409 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || 5410 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) && 5411 hr_qp->state != IB_QPS_RESET); 5412 } 5413 5414 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, 5415 struct hns_roce_qp *hr_qp, 5416 struct ib_udata *udata) 5417 { 5418 struct ib_device *ibdev = &hr_dev->ib_dev; 5419 struct hns_roce_cq *send_cq, *recv_cq; 5420 unsigned long flags; 5421 int ret = 0; 5422 5423 if (modify_qp_is_ok(hr_qp)) { 5424 /* Modify qp to reset before destroying qp */ 5425 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, 5426 hr_qp->state, IB_QPS_RESET, udata); 5427 if (ret) 5428 ibdev_err(ibdev, 5429 "failed to modify QP to RST, ret = %d.\n", 5430 ret); 5431 } 5432 5433 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; 5434 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; 5435 5436 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 5437 hns_roce_lock_cqs(send_cq, recv_cq); 5438 5439 if (!udata) { 5440 if (recv_cq) 5441 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, 5442 (hr_qp->ibqp.srq ? 5443 to_hr_srq(hr_qp->ibqp.srq) : 5444 NULL)); 5445 5446 if (send_cq && send_cq != recv_cq) 5447 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); 5448 } 5449 5450 hns_roce_qp_remove(hr_dev, hr_qp); 5451 5452 hns_roce_unlock_cqs(send_cq, recv_cq); 5453 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 5454 5455 return ret; 5456 } 5457 5458 int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 5459 { 5460 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5461 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5462 int ret; 5463 5464 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); 5465 if (ret) 5466 ibdev_err(&hr_dev->ib_dev, 5467 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", 5468 hr_qp->qpn, ret); 5469 5470 hns_roce_qp_destroy(hr_dev, hr_qp, udata); 5471 5472 return 0; 5473 } 5474 5475 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, 5476 struct hns_roce_qp *hr_qp) 5477 { 5478 struct ib_device *ibdev = &hr_dev->ib_dev; 5479 struct hns_roce_sccc_clr_done *resp; 5480 struct hns_roce_sccc_clr *clr; 5481 struct hns_roce_cmq_desc desc; 5482 int ret, i; 5483 5484 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 5485 return 0; 5486 5487 mutex_lock(&hr_dev->qp_table.scc_mutex); 5488 5489 /* set scc ctx clear done flag */ 5490 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); 5491 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5492 if (ret) { 5493 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret); 5494 goto out; 5495 } 5496 5497 /* clear scc context */ 5498 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false); 5499 clr = (struct hns_roce_sccc_clr *)desc.data; 5500 clr->qpn = cpu_to_le32(hr_qp->qpn); 5501 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5502 if (ret) { 5503 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret); 5504 goto out; 5505 } 5506 5507 /* query scc context clear is done or not */ 5508 resp = (struct hns_roce_sccc_clr_done *)desc.data; 5509 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { 5510 hns_roce_cmq_setup_basic_desc(&desc, 5511 HNS_ROCE_OPC_QUERY_SCCC, true); 5512 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5513 if (ret) { 5514 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n", 5515 ret); 5516 goto out; 5517 } 5518 5519 if (resp->clr_done) 5520 goto out; 5521 5522 msleep(20); 5523 } 5524 5525 ibdev_err(ibdev, "query SCC clr done flag overtime.\n"); 5526 ret = -ETIMEDOUT; 5527 5528 out: 5529 mutex_unlock(&hr_dev->qp_table.scc_mutex); 5530 return ret; 5531 } 5532 5533 #define DMA_IDX_SHIFT 3 5534 #define DMA_WQE_SHIFT 3 5535 5536 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, 5537 struct hns_roce_srq_context *ctx) 5538 { 5539 struct hns_roce_idx_que *idx_que = &srq->idx_que; 5540 struct ib_device *ibdev = srq->ibsrq.device; 5541 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 5542 u64 mtts_idx[MTT_MIN_COUNT] = {}; 5543 dma_addr_t dma_handle_idx = 0; 5544 int ret; 5545 5546 /* Get physical address of idx que buf */ 5547 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, 5548 ARRAY_SIZE(mtts_idx), &dma_handle_idx); 5549 if (ret < 1) { 5550 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", 5551 ret); 5552 return -ENOBUFS; 5553 } 5554 5555 hr_reg_write(ctx, SRQC_IDX_HOP_NUM, 5556 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); 5557 5558 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT); 5559 hr_reg_write(ctx, SRQC_IDX_BT_BA_H, 5560 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT)); 5561 5562 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ, 5563 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift)); 5564 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ, 5565 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift)); 5566 5567 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L, 5568 to_hr_hw_page_addr(mtts_idx[0])); 5569 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H, 5570 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0]))); 5571 5572 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L, 5573 to_hr_hw_page_addr(mtts_idx[1])); 5574 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H, 5575 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1]))); 5576 5577 return 0; 5578 } 5579 5580 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) 5581 { 5582 struct ib_device *ibdev = srq->ibsrq.device; 5583 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 5584 struct hns_roce_srq_context *ctx = mb_buf; 5585 u64 mtts_wqe[MTT_MIN_COUNT] = {}; 5586 dma_addr_t dma_handle_wqe = 0; 5587 int ret; 5588 5589 memset(ctx, 0, sizeof(*ctx)); 5590 5591 /* Get the physical address of srq buf */ 5592 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, 5593 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe); 5594 if (ret < 1) { 5595 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", 5596 ret); 5597 return -ENOBUFS; 5598 } 5599 5600 hr_reg_write(ctx, SRQC_SRQ_ST, 1); 5601 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, 5602 srq->ibsrq.srq_type == IB_SRQT_XRC); 5603 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn); 5604 hr_reg_write(ctx, SRQC_SRQN, srq->srqn); 5605 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn); 5606 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn); 5607 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt)); 5608 hr_reg_write(ctx, SRQC_RQWS, 5609 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1)); 5610 5611 hr_reg_write(ctx, SRQC_WQE_HOP_NUM, 5612 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num, 5613 srq->wqe_cnt)); 5614 5615 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT); 5616 hr_reg_write(ctx, SRQC_WQE_BT_BA_H, 5617 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT)); 5618 5619 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ, 5620 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); 5621 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, 5622 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); 5623 5624 return hns_roce_v2_write_srqc_index_queue(srq, ctx); 5625 } 5626 5627 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, 5628 struct ib_srq_attr *srq_attr, 5629 enum ib_srq_attr_mask srq_attr_mask, 5630 struct ib_udata *udata) 5631 { 5632 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 5633 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 5634 struct hns_roce_srq_context *srq_context; 5635 struct hns_roce_srq_context *srqc_mask; 5636 struct hns_roce_cmd_mailbox *mailbox; 5637 int ret; 5638 5639 /* Resizing SRQs is not supported yet */ 5640 if (srq_attr_mask & IB_SRQ_MAX_WR) 5641 return -EOPNOTSUPP; 5642 5643 if (srq_attr_mask & IB_SRQ_LIMIT) { 5644 if (srq_attr->srq_limit > srq->wqe_cnt) 5645 return -EINVAL; 5646 5647 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5648 if (IS_ERR(mailbox)) 5649 return PTR_ERR(mailbox); 5650 5651 srq_context = mailbox->buf; 5652 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1; 5653 5654 memset(srqc_mask, 0xff, sizeof(*srqc_mask)); 5655 5656 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); 5657 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL); 5658 5659 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 5660 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); 5661 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5662 if (ret) { 5663 ibdev_err(&hr_dev->ib_dev, 5664 "failed to handle cmd of modifying SRQ, ret = %d.\n", 5665 ret); 5666 return ret; 5667 } 5668 } 5669 5670 return 0; 5671 } 5672 5673 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 5674 { 5675 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 5676 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 5677 struct hns_roce_srq_context *srq_context; 5678 struct hns_roce_cmd_mailbox *mailbox; 5679 int ret; 5680 5681 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5682 if (IS_ERR(mailbox)) 5683 return PTR_ERR(mailbox); 5684 5685 srq_context = mailbox->buf; 5686 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, 5687 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); 5688 if (ret) { 5689 ibdev_err(&hr_dev->ib_dev, 5690 "failed to process cmd of querying SRQ, ret = %d.\n", 5691 ret); 5692 goto out; 5693 } 5694 5695 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL); 5696 attr->max_wr = srq->wqe_cnt; 5697 attr->max_sge = srq->max_gs - srq->rsv_sge; 5698 5699 out: 5700 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5701 return ret; 5702 } 5703 5704 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 5705 { 5706 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); 5707 struct hns_roce_v2_cq_context *cq_context; 5708 struct hns_roce_cq *hr_cq = to_hr_cq(cq); 5709 struct hns_roce_v2_cq_context *cqc_mask; 5710 struct hns_roce_cmd_mailbox *mailbox; 5711 int ret; 5712 5713 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5714 if (IS_ERR(mailbox)) 5715 return PTR_ERR(mailbox); 5716 5717 cq_context = mailbox->buf; 5718 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1; 5719 5720 memset(cqc_mask, 0xff, sizeof(*cqc_mask)); 5721 5722 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count); 5723 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT); 5724 5725 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 5726 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 5727 dev_info(hr_dev->dev, 5728 "cq_period(%u) reached the upper limit, adjusted to 65.\n", 5729 cq_period); 5730 cq_period = HNS_ROCE_MAX_CQ_PERIOD; 5731 } 5732 cq_period *= HNS_ROCE_CLOCK_ADJUST; 5733 } 5734 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); 5735 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); 5736 5737 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 5738 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); 5739 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5740 if (ret) 5741 ibdev_err(&hr_dev->ib_dev, 5742 "failed to process cmd when modifying CQ, ret = %d.\n", 5743 ret); 5744 5745 return ret; 5746 } 5747 5748 static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn, 5749 void *buffer) 5750 { 5751 struct hns_roce_v2_cq_context *context; 5752 struct hns_roce_cmd_mailbox *mailbox; 5753 int ret; 5754 5755 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5756 if (IS_ERR(mailbox)) 5757 return PTR_ERR(mailbox); 5758 5759 context = mailbox->buf; 5760 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, 5761 HNS_ROCE_CMD_QUERY_CQC, cqn); 5762 if (ret) { 5763 ibdev_err(&hr_dev->ib_dev, 5764 "failed to process cmd when querying CQ, ret = %d.\n", 5765 ret); 5766 goto err_mailbox; 5767 } 5768 5769 memcpy(buffer, context, sizeof(*context)); 5770 5771 err_mailbox: 5772 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5773 5774 return ret; 5775 } 5776 5777 static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key, 5778 void *buffer) 5779 { 5780 struct hns_roce_v2_mpt_entry *context; 5781 struct hns_roce_cmd_mailbox *mailbox; 5782 int ret; 5783 5784 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5785 if (IS_ERR(mailbox)) 5786 return PTR_ERR(mailbox); 5787 5788 context = mailbox->buf; 5789 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, 5790 key_to_hw_index(key)); 5791 if (ret) { 5792 ibdev_err(&hr_dev->ib_dev, 5793 "failed to process cmd when querying MPT, ret = %d.\n", 5794 ret); 5795 goto err_mailbox; 5796 } 5797 5798 memcpy(buffer, context, sizeof(*context)); 5799 5800 err_mailbox: 5801 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5802 5803 return ret; 5804 } 5805 5806 static void hns_roce_irq_work_handle(struct work_struct *work) 5807 { 5808 struct hns_roce_work *irq_work = 5809 container_of(work, struct hns_roce_work, work); 5810 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; 5811 5812 switch (irq_work->event_type) { 5813 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 5814 ibdev_info(ibdev, "path migrated succeeded.\n"); 5815 break; 5816 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 5817 ibdev_warn(ibdev, "path migration failed.\n"); 5818 break; 5819 case HNS_ROCE_EVENT_TYPE_COMM_EST: 5820 break; 5821 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 5822 ibdev_dbg(ibdev, "send queue drained.\n"); 5823 break; 5824 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 5825 ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n", 5826 irq_work->queue_num, irq_work->sub_type); 5827 break; 5828 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 5829 ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n", 5830 irq_work->queue_num); 5831 break; 5832 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 5833 ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n", 5834 irq_work->queue_num, irq_work->sub_type); 5835 break; 5836 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: 5837 ibdev_dbg(ibdev, "SRQ limit reach.\n"); 5838 break; 5839 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 5840 ibdev_dbg(ibdev, "SRQ last wqe reach.\n"); 5841 break; 5842 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: 5843 ibdev_err(ibdev, "SRQ catas error.\n"); 5844 break; 5845 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 5846 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num); 5847 break; 5848 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 5849 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num); 5850 break; 5851 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: 5852 ibdev_warn(ibdev, "DB overflow.\n"); 5853 break; 5854 case HNS_ROCE_EVENT_TYPE_FLR: 5855 ibdev_warn(ibdev, "function level reset.\n"); 5856 break; 5857 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 5858 ibdev_err(ibdev, "xrc domain violation error.\n"); 5859 break; 5860 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 5861 ibdev_err(ibdev, "invalid xrceth error.\n"); 5862 break; 5863 default: 5864 break; 5865 } 5866 5867 kfree(irq_work); 5868 } 5869 5870 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, 5871 struct hns_roce_eq *eq, u32 queue_num) 5872 { 5873 struct hns_roce_work *irq_work; 5874 5875 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC); 5876 if (!irq_work) 5877 return; 5878 5879 INIT_WORK(&irq_work->work, hns_roce_irq_work_handle); 5880 irq_work->hr_dev = hr_dev; 5881 irq_work->event_type = eq->event_type; 5882 irq_work->sub_type = eq->sub_type; 5883 irq_work->queue_num = queue_num; 5884 queue_work(hr_dev->irq_workq, &irq_work->work); 5885 } 5886 5887 static void update_eq_db(struct hns_roce_eq *eq) 5888 { 5889 struct hns_roce_dev *hr_dev = eq->hr_dev; 5890 struct hns_roce_v2_db eq_db = {}; 5891 5892 if (eq->type_flag == HNS_ROCE_AEQ) { 5893 hr_reg_write(&eq_db, EQ_DB_CMD, 5894 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? 5895 HNS_ROCE_EQ_DB_CMD_AEQ : 5896 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED); 5897 } else { 5898 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn); 5899 5900 hr_reg_write(&eq_db, EQ_DB_CMD, 5901 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? 5902 HNS_ROCE_EQ_DB_CMD_CEQ : 5903 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED); 5904 } 5905 5906 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index); 5907 5908 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg); 5909 } 5910 5911 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) 5912 { 5913 struct hns_roce_aeqe *aeqe; 5914 5915 aeqe = hns_roce_buf_offset(eq->mtr.kmem, 5916 (eq->cons_index & (eq->entries - 1)) * 5917 eq->eqe_size); 5918 5919 return (hr_reg_read(aeqe, AEQE_OWNER) ^ 5920 !!(eq->cons_index & eq->entries)) ? aeqe : NULL; 5921 } 5922 5923 static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, 5924 struct hns_roce_eq *eq) 5925 { 5926 struct device *dev = hr_dev->dev; 5927 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); 5928 irqreturn_t aeqe_found = IRQ_NONE; 5929 int event_type; 5930 u32 queue_num; 5931 int sub_type; 5932 5933 while (aeqe) { 5934 /* Make sure we read AEQ entry after we have checked the 5935 * ownership bit 5936 */ 5937 dma_rmb(); 5938 5939 event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE); 5940 sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE); 5941 queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); 5942 5943 switch (event_type) { 5944 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 5945 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 5946 case HNS_ROCE_EVENT_TYPE_COMM_EST: 5947 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 5948 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 5949 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 5950 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 5951 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 5952 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 5953 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 5954 hns_roce_qp_event(hr_dev, queue_num, event_type); 5955 break; 5956 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: 5957 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: 5958 hns_roce_srq_event(hr_dev, queue_num, event_type); 5959 break; 5960 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 5961 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 5962 hns_roce_cq_event(hr_dev, queue_num, event_type); 5963 break; 5964 case HNS_ROCE_EVENT_TYPE_MB: 5965 hns_roce_cmd_event(hr_dev, 5966 le16_to_cpu(aeqe->event.cmd.token), 5967 aeqe->event.cmd.status, 5968 le64_to_cpu(aeqe->event.cmd.out_param)); 5969 break; 5970 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: 5971 case HNS_ROCE_EVENT_TYPE_FLR: 5972 break; 5973 default: 5974 dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n", 5975 event_type, eq->eqn, eq->cons_index); 5976 break; 5977 } 5978 5979 eq->event_type = event_type; 5980 eq->sub_type = sub_type; 5981 ++eq->cons_index; 5982 aeqe_found = IRQ_HANDLED; 5983 5984 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); 5985 5986 aeqe = next_aeqe_sw_v2(eq); 5987 } 5988 5989 update_eq_db(eq); 5990 5991 return IRQ_RETVAL(aeqe_found); 5992 } 5993 5994 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) 5995 { 5996 struct hns_roce_ceqe *ceqe; 5997 5998 ceqe = hns_roce_buf_offset(eq->mtr.kmem, 5999 (eq->cons_index & (eq->entries - 1)) * 6000 eq->eqe_size); 6001 6002 return (hr_reg_read(ceqe, CEQE_OWNER) ^ 6003 !!(eq->cons_index & eq->entries)) ? ceqe : NULL; 6004 } 6005 6006 static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, 6007 struct hns_roce_eq *eq) 6008 { 6009 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); 6010 irqreturn_t ceqe_found = IRQ_NONE; 6011 u32 cqn; 6012 6013 while (ceqe) { 6014 /* Make sure we read CEQ entry after we have checked the 6015 * ownership bit 6016 */ 6017 dma_rmb(); 6018 6019 cqn = hr_reg_read(ceqe, CEQE_CQN); 6020 6021 hns_roce_cq_completion(hr_dev, cqn); 6022 6023 ++eq->cons_index; 6024 ceqe_found = IRQ_HANDLED; 6025 6026 ceqe = next_ceqe_sw_v2(eq); 6027 } 6028 6029 update_eq_db(eq); 6030 6031 return IRQ_RETVAL(ceqe_found); 6032 } 6033 6034 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) 6035 { 6036 struct hns_roce_eq *eq = eq_ptr; 6037 struct hns_roce_dev *hr_dev = eq->hr_dev; 6038 irqreturn_t int_work; 6039 6040 if (eq->type_flag == HNS_ROCE_CEQ) 6041 /* Completion event interrupt */ 6042 int_work = hns_roce_v2_ceq_int(hr_dev, eq); 6043 else 6044 /* Asynchronous event interrupt */ 6045 int_work = hns_roce_v2_aeq_int(hr_dev, eq); 6046 6047 return IRQ_RETVAL(int_work); 6048 } 6049 6050 static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, 6051 u32 int_st) 6052 { 6053 struct pci_dev *pdev = hr_dev->pci_dev; 6054 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 6055 const struct hnae3_ae_ops *ops = ae_dev->ops; 6056 irqreturn_t int_work = IRQ_NONE; 6057 u32 int_en; 6058 6059 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); 6060 6061 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { 6062 dev_err(hr_dev->dev, "AEQ overflow!\n"); 6063 6064 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, 6065 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); 6066 6067 /* Set reset level for reset_event() */ 6068 if (ops->set_default_reset_request) 6069 ops->set_default_reset_request(ae_dev, 6070 HNAE3_FUNC_RESET); 6071 if (ops->reset_event) 6072 ops->reset_event(pdev, NULL); 6073 6074 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; 6075 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); 6076 6077 int_work = IRQ_HANDLED; 6078 } else { 6079 dev_err(hr_dev->dev, "there is no basic abn irq found.\n"); 6080 } 6081 6082 return IRQ_RETVAL(int_work); 6083 } 6084 6085 static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev, 6086 struct fmea_ram_ecc *ecc_info) 6087 { 6088 struct hns_roce_cmq_desc desc; 6089 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 6090 int ret; 6091 6092 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true); 6093 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 6094 if (ret) 6095 return ret; 6096 6097 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR); 6098 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE); 6099 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG); 6100 6101 return 0; 6102 } 6103 6104 static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx) 6105 { 6106 struct hns_roce_cmq_desc desc; 6107 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 6108 u32 addr_upper; 6109 u32 addr_low; 6110 int ret; 6111 6112 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true); 6113 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 6114 6115 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 6116 if (ret) { 6117 dev_err(hr_dev->dev, 6118 "failed to execute cmd to read gmv, ret = %d.\n", ret); 6119 return ret; 6120 } 6121 6122 addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L); 6123 addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H); 6124 6125 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false); 6126 hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low); 6127 hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper); 6128 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 6129 6130 return hns_roce_cmq_send(hr_dev, &desc, 1); 6131 } 6132 6133 static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) 6134 { 6135 if (res_type == ECC_RESOURCE_QPC_TIMER || 6136 res_type == ECC_RESOURCE_CQC_TIMER || 6137 res_type == ECC_RESOURCE_SCCC) 6138 return le64_to_cpu(*data); 6139 6140 return le64_to_cpu(*data) << PAGE_SHIFT; 6141 } 6142 6143 static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, 6144 u32 index) 6145 { 6146 u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op; 6147 u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op; 6148 struct hns_roce_cmd_mailbox *mailbox; 6149 u64 addr; 6150 int ret; 6151 6152 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 6153 if (IS_ERR(mailbox)) 6154 return PTR_ERR(mailbox); 6155 6156 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index); 6157 if (ret) { 6158 dev_err(hr_dev->dev, 6159 "failed to execute cmd to read fmea ram, ret = %d.\n", 6160 ret); 6161 goto out; 6162 } 6163 6164 addr = fmea_get_ram_res_addr(res_type, mailbox->buf); 6165 6166 ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index); 6167 if (ret) 6168 dev_err(hr_dev->dev, 6169 "failed to execute cmd to write fmea ram, ret = %d.\n", 6170 ret); 6171 6172 out: 6173 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6174 return ret; 6175 } 6176 6177 static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev, 6178 struct fmea_ram_ecc *ecc_info) 6179 { 6180 u32 res_type = ecc_info->res_type; 6181 u32 index = ecc_info->index; 6182 int ret; 6183 6184 BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT); 6185 6186 if (res_type >= ECC_RESOURCE_COUNT) { 6187 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n", 6188 res_type); 6189 return; 6190 } 6191 6192 if (res_type == ECC_RESOURCE_GMV) 6193 ret = fmea_recover_gmv(hr_dev, index); 6194 else 6195 ret = fmea_recover_others(hr_dev, res_type, index); 6196 if (ret) 6197 dev_err(hr_dev->dev, 6198 "failed to recover %s, index = %u, ret = %d.\n", 6199 fmea_ram_res[res_type].name, index, ret); 6200 } 6201 6202 static void fmea_ram_ecc_work(struct work_struct *ecc_work) 6203 { 6204 struct hns_roce_dev *hr_dev = 6205 container_of(ecc_work, struct hns_roce_dev, ecc_work); 6206 struct fmea_ram_ecc ecc_info = {}; 6207 6208 if (fmea_ram_ecc_query(hr_dev, &ecc_info)) { 6209 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n"); 6210 return; 6211 } 6212 6213 if (!ecc_info.is_ecc_err) { 6214 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n"); 6215 return; 6216 } 6217 6218 fmea_ram_ecc_recover(hr_dev, &ecc_info); 6219 } 6220 6221 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) 6222 { 6223 struct hns_roce_dev *hr_dev = dev_id; 6224 irqreturn_t int_work = IRQ_NONE; 6225 u32 int_st; 6226 6227 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); 6228 6229 if (int_st) { 6230 int_work = abnormal_interrupt_basic(hr_dev, int_st); 6231 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 6232 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work); 6233 int_work = IRQ_HANDLED; 6234 } else { 6235 dev_err(hr_dev->dev, "there is no abnormal irq found.\n"); 6236 } 6237 6238 return IRQ_RETVAL(int_work); 6239 } 6240 6241 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, 6242 int eq_num, u32 enable_flag) 6243 { 6244 int i; 6245 6246 for (i = 0; i < eq_num; i++) 6247 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG + 6248 i * EQ_REG_OFFSET, enable_flag); 6249 6250 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag); 6251 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); 6252 } 6253 6254 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) 6255 { 6256 struct device *dev = hr_dev->dev; 6257 int ret; 6258 u8 cmd; 6259 6260 if (eqn < hr_dev->caps.num_comp_vectors) 6261 cmd = HNS_ROCE_CMD_DESTROY_CEQC; 6262 else 6263 cmd = HNS_ROCE_CMD_DESTROY_AEQC; 6264 6265 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); 6266 if (ret) 6267 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); 6268 } 6269 6270 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6271 { 6272 hns_roce_mtr_destroy(hr_dev, &eq->mtr); 6273 } 6274 6275 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6276 { 6277 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; 6278 eq->cons_index = 0; 6279 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0; 6280 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0; 6281 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; 6282 eq->shift = ilog2((unsigned int)eq->entries); 6283 } 6284 6285 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, 6286 void *mb_buf) 6287 { 6288 u64 eqe_ba[MTT_MIN_COUNT] = { 0 }; 6289 struct hns_roce_eq_context *eqc; 6290 u64 bt_ba = 0; 6291 int count; 6292 6293 eqc = mb_buf; 6294 memset(eqc, 0, sizeof(struct hns_roce_eq_context)); 6295 6296 init_eq_config(hr_dev, eq); 6297 6298 /* if not multi-hop, eqe buffer only use one trunk */ 6299 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT, 6300 &bt_ba); 6301 if (count < 1) { 6302 dev_err(hr_dev->dev, "failed to find EQE mtr\n"); 6303 return -ENOBUFS; 6304 } 6305 6306 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); 6307 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); 6308 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore); 6309 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce); 6310 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st); 6311 hr_reg_write(eqc, EQC_EQN, eq->eqn); 6312 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT); 6313 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ, 6314 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); 6315 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ, 6316 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); 6317 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); 6318 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); 6319 6320 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 6321 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 6322 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n", 6323 eq->eq_period); 6324 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; 6325 } 6326 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; 6327 } 6328 6329 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period); 6330 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER); 6331 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3); 6332 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35); 6333 hr_reg_write(eqc, EQC_SHIFT, eq->shift); 6334 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX); 6335 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12); 6336 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28); 6337 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60); 6338 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX); 6339 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12); 6340 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44); 6341 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE); 6342 6343 return 0; 6344 } 6345 6346 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6347 { 6348 struct hns_roce_buf_attr buf_attr = {}; 6349 int err; 6350 6351 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) 6352 eq->hop_num = 0; 6353 else 6354 eq->hop_num = hr_dev->caps.eqe_hop_num; 6355 6356 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT; 6357 buf_attr.region[0].size = eq->entries * eq->eqe_size; 6358 buf_attr.region[0].hopnum = eq->hop_num; 6359 buf_attr.region_count = 1; 6360 6361 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, 6362 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, 6363 0); 6364 if (err) 6365 dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err); 6366 6367 return err; 6368 } 6369 6370 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, 6371 struct hns_roce_eq *eq, u8 eq_cmd) 6372 { 6373 struct hns_roce_cmd_mailbox *mailbox; 6374 int ret; 6375 6376 /* Allocate mailbox memory */ 6377 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 6378 if (IS_ERR(mailbox)) 6379 return PTR_ERR(mailbox); 6380 6381 ret = alloc_eq_buf(hr_dev, eq); 6382 if (ret) 6383 goto free_cmd_mbox; 6384 6385 ret = config_eqc(hr_dev, eq, mailbox->buf); 6386 if (ret) 6387 goto err_cmd_mbox; 6388 6389 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); 6390 if (ret) { 6391 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); 6392 goto err_cmd_mbox; 6393 } 6394 6395 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6396 6397 return 0; 6398 6399 err_cmd_mbox: 6400 free_eq_buf(hr_dev, eq); 6401 6402 free_cmd_mbox: 6403 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6404 6405 return ret; 6406 } 6407 6408 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, 6409 int comp_num, int aeq_num, int other_num) 6410 { 6411 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6412 int i, j; 6413 int ret; 6414 6415 for (i = 0; i < irq_num; i++) { 6416 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, 6417 GFP_KERNEL); 6418 if (!hr_dev->irq_names[i]) { 6419 ret = -ENOMEM; 6420 goto err_kzalloc_failed; 6421 } 6422 } 6423 6424 /* irq contains: abnormal + AEQ + CEQ */ 6425 for (j = 0; j < other_num; j++) 6426 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6427 "hns-abn-%d", j); 6428 6429 for (j = other_num; j < (other_num + aeq_num); j++) 6430 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6431 "hns-aeq-%d", j - other_num); 6432 6433 for (j = (other_num + aeq_num); j < irq_num; j++) 6434 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6435 "hns-ceq-%d", j - other_num - aeq_num); 6436 6437 for (j = 0; j < irq_num; j++) { 6438 if (j < other_num) 6439 ret = request_irq(hr_dev->irq[j], 6440 hns_roce_v2_msix_interrupt_abn, 6441 0, hr_dev->irq_names[j], hr_dev); 6442 6443 else if (j < (other_num + comp_num)) 6444 ret = request_irq(eq_table->eq[j - other_num].irq, 6445 hns_roce_v2_msix_interrupt_eq, 6446 0, hr_dev->irq_names[j + aeq_num], 6447 &eq_table->eq[j - other_num]); 6448 else 6449 ret = request_irq(eq_table->eq[j - other_num].irq, 6450 hns_roce_v2_msix_interrupt_eq, 6451 0, hr_dev->irq_names[j - comp_num], 6452 &eq_table->eq[j - other_num]); 6453 if (ret) { 6454 dev_err(hr_dev->dev, "request irq error!\n"); 6455 goto err_request_failed; 6456 } 6457 } 6458 6459 return 0; 6460 6461 err_request_failed: 6462 for (j -= 1; j >= 0; j--) 6463 if (j < other_num) 6464 free_irq(hr_dev->irq[j], hr_dev); 6465 else 6466 free_irq(eq_table->eq[j - other_num].irq, 6467 &eq_table->eq[j - other_num]); 6468 6469 err_kzalloc_failed: 6470 for (i -= 1; i >= 0; i--) 6471 kfree(hr_dev->irq_names[i]); 6472 6473 return ret; 6474 } 6475 6476 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) 6477 { 6478 int irq_num; 6479 int eq_num; 6480 int i; 6481 6482 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 6483 irq_num = eq_num + hr_dev->caps.num_other_vectors; 6484 6485 for (i = 0; i < hr_dev->caps.num_other_vectors; i++) 6486 free_irq(hr_dev->irq[i], hr_dev); 6487 6488 for (i = 0; i < eq_num; i++) 6489 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); 6490 6491 for (i = 0; i < irq_num; i++) 6492 kfree(hr_dev->irq_names[i]); 6493 } 6494 6495 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) 6496 { 6497 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6498 struct device *dev = hr_dev->dev; 6499 struct hns_roce_eq *eq; 6500 int other_num; 6501 int comp_num; 6502 int aeq_num; 6503 int irq_num; 6504 int eq_num; 6505 u8 eq_cmd; 6506 int ret; 6507 int i; 6508 6509 other_num = hr_dev->caps.num_other_vectors; 6510 comp_num = hr_dev->caps.num_comp_vectors; 6511 aeq_num = hr_dev->caps.num_aeq_vectors; 6512 6513 eq_num = comp_num + aeq_num; 6514 irq_num = eq_num + other_num; 6515 6516 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); 6517 if (!eq_table->eq) 6518 return -ENOMEM; 6519 6520 /* create eq */ 6521 for (i = 0; i < eq_num; i++) { 6522 eq = &eq_table->eq[i]; 6523 eq->hr_dev = hr_dev; 6524 eq->eqn = i; 6525 if (i < comp_num) { 6526 /* CEQ */ 6527 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; 6528 eq->type_flag = HNS_ROCE_CEQ; 6529 eq->entries = hr_dev->caps.ceqe_depth; 6530 eq->eqe_size = hr_dev->caps.ceqe_size; 6531 eq->irq = hr_dev->irq[i + other_num + aeq_num]; 6532 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; 6533 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; 6534 } else { 6535 /* AEQ */ 6536 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC; 6537 eq->type_flag = HNS_ROCE_AEQ; 6538 eq->entries = hr_dev->caps.aeqe_depth; 6539 eq->eqe_size = hr_dev->caps.aeqe_size; 6540 eq->irq = hr_dev->irq[i - comp_num + other_num]; 6541 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; 6542 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; 6543 } 6544 6545 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd); 6546 if (ret) { 6547 dev_err(dev, "failed to create eq.\n"); 6548 goto err_create_eq_fail; 6549 } 6550 } 6551 6552 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work); 6553 6554 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); 6555 if (!hr_dev->irq_workq) { 6556 dev_err(dev, "failed to create irq workqueue.\n"); 6557 ret = -ENOMEM; 6558 goto err_create_eq_fail; 6559 } 6560 6561 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num, 6562 other_num); 6563 if (ret) { 6564 dev_err(dev, "failed to request irq.\n"); 6565 goto err_request_irq_fail; 6566 } 6567 6568 /* enable irq */ 6569 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); 6570 6571 return 0; 6572 6573 err_request_irq_fail: 6574 destroy_workqueue(hr_dev->irq_workq); 6575 6576 err_create_eq_fail: 6577 for (i -= 1; i >= 0; i--) 6578 free_eq_buf(hr_dev, &eq_table->eq[i]); 6579 kfree(eq_table->eq); 6580 6581 return ret; 6582 } 6583 6584 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) 6585 { 6586 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6587 int eq_num; 6588 int i; 6589 6590 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 6591 6592 /* Disable irq */ 6593 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); 6594 6595 __hns_roce_free_irq(hr_dev); 6596 destroy_workqueue(hr_dev->irq_workq); 6597 6598 for (i = 0; i < eq_num; i++) { 6599 hns_roce_v2_destroy_eqc(hr_dev, i); 6600 6601 free_eq_buf(hr_dev, &eq_table->eq[i]); 6602 } 6603 6604 kfree(eq_table->eq); 6605 } 6606 6607 static const struct ib_device_ops hns_roce_v2_dev_ops = { 6608 .destroy_qp = hns_roce_v2_destroy_qp, 6609 .modify_cq = hns_roce_v2_modify_cq, 6610 .poll_cq = hns_roce_v2_poll_cq, 6611 .post_recv = hns_roce_v2_post_recv, 6612 .post_send = hns_roce_v2_post_send, 6613 .query_qp = hns_roce_v2_query_qp, 6614 .req_notify_cq = hns_roce_v2_req_notify_cq, 6615 }; 6616 6617 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = { 6618 .modify_srq = hns_roce_v2_modify_srq, 6619 .post_srq_recv = hns_roce_v2_post_srq_recv, 6620 .query_srq = hns_roce_v2_query_srq, 6621 }; 6622 6623 static const struct hns_roce_hw hns_roce_hw_v2 = { 6624 .cmq_init = hns_roce_v2_cmq_init, 6625 .cmq_exit = hns_roce_v2_cmq_exit, 6626 .hw_profile = hns_roce_v2_profile, 6627 .hw_init = hns_roce_v2_init, 6628 .hw_exit = hns_roce_v2_exit, 6629 .post_mbox = v2_post_mbox, 6630 .poll_mbox_done = v2_poll_mbox_done, 6631 .chk_mbox_avail = v2_chk_mbox_is_avail, 6632 .set_gid = hns_roce_v2_set_gid, 6633 .set_mac = hns_roce_v2_set_mac, 6634 .write_mtpt = hns_roce_v2_write_mtpt, 6635 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, 6636 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, 6637 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, 6638 .write_cqc = hns_roce_v2_write_cqc, 6639 .set_hem = hns_roce_v2_set_hem, 6640 .clear_hem = hns_roce_v2_clear_hem, 6641 .modify_qp = hns_roce_v2_modify_qp, 6642 .dereg_mr = hns_roce_v2_dereg_mr, 6643 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, 6644 .init_eq = hns_roce_v2_init_eq_table, 6645 .cleanup_eq = hns_roce_v2_cleanup_eq_table, 6646 .write_srqc = hns_roce_v2_write_srqc, 6647 .query_cqc = hns_roce_v2_query_cqc, 6648 .query_qpc = hns_roce_v2_query_qpc, 6649 .query_mpt = hns_roce_v2_query_mpt, 6650 .query_hw_counter = hns_roce_hw_v2_query_counter, 6651 .hns_roce_dev_ops = &hns_roce_v2_dev_ops, 6652 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, 6653 }; 6654 6655 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { 6656 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 6657 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 6658 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 6659 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 6660 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 6661 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 6662 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 6663 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 6664 /* required last entry */ 6665 {0, } 6666 }; 6667 6668 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); 6669 6670 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 6671 struct hnae3_handle *handle) 6672 { 6673 struct hns_roce_v2_priv *priv = hr_dev->priv; 6674 const struct pci_device_id *id; 6675 int i; 6676 6677 hr_dev->pci_dev = handle->pdev; 6678 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); 6679 hr_dev->is_vf = id->driver_data; 6680 hr_dev->dev = &handle->pdev->dev; 6681 hr_dev->hw = &hns_roce_hw_v2; 6682 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; 6683 hr_dev->odb_offset = hr_dev->sdb_offset; 6684 6685 /* Get info from NIC driver. */ 6686 hr_dev->reg_base = handle->rinfo.roce_io_base; 6687 hr_dev->mem_base = handle->rinfo.roce_mem_base; 6688 hr_dev->caps.num_ports = 1; 6689 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; 6690 hr_dev->iboe.phy_port[0] = 0; 6691 6692 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, 6693 hr_dev->iboe.netdevs[0]->dev_addr); 6694 6695 for (i = 0; i < handle->rinfo.num_vectors; i++) 6696 hr_dev->irq[i] = pci_irq_vector(handle->pdev, 6697 i + handle->rinfo.base_vector); 6698 6699 /* cmd issue mode: 0 is poll, 1 is event */ 6700 hr_dev->cmd_mod = 1; 6701 hr_dev->loop_idc = 0; 6702 6703 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); 6704 priv->handle = handle; 6705 } 6706 6707 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) 6708 { 6709 struct hns_roce_dev *hr_dev; 6710 int ret; 6711 6712 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); 6713 if (!hr_dev) 6714 return -ENOMEM; 6715 6716 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL); 6717 if (!hr_dev->priv) { 6718 ret = -ENOMEM; 6719 goto error_failed_kzalloc; 6720 } 6721 6722 hns_roce_hw_v2_get_cfg(hr_dev, handle); 6723 6724 ret = hns_roce_init(hr_dev); 6725 if (ret) { 6726 dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); 6727 goto error_failed_roce_init; 6728 } 6729 6730 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 6731 ret = free_mr_init(hr_dev); 6732 if (ret) { 6733 dev_err(hr_dev->dev, "failed to init free mr!\n"); 6734 goto error_failed_free_mr_init; 6735 } 6736 } 6737 6738 handle->priv = hr_dev; 6739 6740 return 0; 6741 6742 error_failed_free_mr_init: 6743 hns_roce_exit(hr_dev); 6744 6745 error_failed_roce_init: 6746 kfree(hr_dev->priv); 6747 6748 error_failed_kzalloc: 6749 ib_dealloc_device(&hr_dev->ib_dev); 6750 6751 return ret; 6752 } 6753 6754 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, 6755 bool reset) 6756 { 6757 struct hns_roce_dev *hr_dev = handle->priv; 6758 6759 if (!hr_dev) 6760 return; 6761 6762 handle->priv = NULL; 6763 6764 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; 6765 hns_roce_handle_device_err(hr_dev); 6766 6767 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 6768 free_mr_exit(hr_dev); 6769 6770 hns_roce_exit(hr_dev); 6771 kfree(hr_dev->priv); 6772 ib_dealloc_device(&hr_dev->ib_dev); 6773 } 6774 6775 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) 6776 { 6777 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 6778 const struct pci_device_id *id; 6779 struct device *dev = &handle->pdev->dev; 6780 int ret; 6781 6782 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; 6783 6784 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { 6785 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6786 goto reset_chk_err; 6787 } 6788 6789 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); 6790 if (!id) 6791 return 0; 6792 6793 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) 6794 return 0; 6795 6796 ret = __hns_roce_hw_v2_init_instance(handle); 6797 if (ret) { 6798 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6799 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret); 6800 if (ops->ae_dev_resetting(handle) || 6801 ops->get_hw_reset_stat(handle)) 6802 goto reset_chk_err; 6803 else 6804 return ret; 6805 } 6806 6807 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; 6808 6809 return 0; 6810 6811 reset_chk_err: 6812 dev_err(dev, "Device is busy in resetting state.\n" 6813 "please retry later.\n"); 6814 6815 return -EBUSY; 6816 } 6817 6818 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, 6819 bool reset) 6820 { 6821 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) 6822 return; 6823 6824 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; 6825 6826 __hns_roce_hw_v2_uninit_instance(handle, reset); 6827 6828 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6829 } 6830 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) 6831 { 6832 struct hns_roce_dev *hr_dev; 6833 6834 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { 6835 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); 6836 return 0; 6837 } 6838 6839 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; 6840 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); 6841 6842 hr_dev = handle->priv; 6843 if (!hr_dev) 6844 return 0; 6845 6846 hr_dev->active = false; 6847 hr_dev->dis_db = true; 6848 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; 6849 6850 return 0; 6851 } 6852 6853 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) 6854 { 6855 struct device *dev = &handle->pdev->dev; 6856 int ret; 6857 6858 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN, 6859 &handle->rinfo.state)) { 6860 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; 6861 return 0; 6862 } 6863 6864 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; 6865 6866 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); 6867 ret = __hns_roce_hw_v2_init_instance(handle); 6868 if (ret) { 6869 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify 6870 * callback function, RoCE Engine reinitialize. If RoCE reinit 6871 * failed, we should inform NIC driver. 6872 */ 6873 handle->priv = NULL; 6874 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); 6875 } else { 6876 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; 6877 dev_info(dev, "reset done, RoCE client reinit finished.\n"); 6878 } 6879 6880 return ret; 6881 } 6882 6883 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) 6884 { 6885 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) 6886 return 0; 6887 6888 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; 6889 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); 6890 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY); 6891 __hns_roce_hw_v2_uninit_instance(handle, false); 6892 6893 return 0; 6894 } 6895 6896 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle, 6897 enum hnae3_reset_notify_type type) 6898 { 6899 int ret = 0; 6900 6901 switch (type) { 6902 case HNAE3_DOWN_CLIENT: 6903 ret = hns_roce_hw_v2_reset_notify_down(handle); 6904 break; 6905 case HNAE3_INIT_CLIENT: 6906 ret = hns_roce_hw_v2_reset_notify_init(handle); 6907 break; 6908 case HNAE3_UNINIT_CLIENT: 6909 ret = hns_roce_hw_v2_reset_notify_uninit(handle); 6910 break; 6911 default: 6912 break; 6913 } 6914 6915 return ret; 6916 } 6917 6918 static const struct hnae3_client_ops hns_roce_hw_v2_ops = { 6919 .init_instance = hns_roce_hw_v2_init_instance, 6920 .uninit_instance = hns_roce_hw_v2_uninit_instance, 6921 .reset_notify = hns_roce_hw_v2_reset_notify, 6922 }; 6923 6924 static struct hnae3_client hns_roce_hw_v2_client = { 6925 .name = "hns_roce_hw_v2", 6926 .type = HNAE3_CLIENT_ROCE, 6927 .ops = &hns_roce_hw_v2_ops, 6928 }; 6929 6930 static int __init hns_roce_hw_v2_init(void) 6931 { 6932 return hnae3_register_client(&hns_roce_hw_v2_client); 6933 } 6934 6935 static void __exit hns_roce_hw_v2_exit(void) 6936 { 6937 hnae3_unregister_client(&hns_roce_hw_v2_client); 6938 } 6939 6940 module_init(hns_roce_hw_v2_init); 6941 module_exit(hns_roce_hw_v2_exit); 6942 6943 MODULE_LICENSE("Dual BSD/GPL"); 6944 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); 6945 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); 6946 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>"); 6947 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver"); 6948