1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/acpi.h> 34 #include <linux/etherdevice.h> 35 #include <linux/interrupt.h> 36 #include <linux/iopoll.h> 37 #include <linux/kernel.h> 38 #include <linux/types.h> 39 #include <net/addrconf.h> 40 #include <rdma/ib_addr.h> 41 #include <rdma/ib_cache.h> 42 #include <rdma/ib_umem.h> 43 #include <rdma/uverbs_ioctl.h> 44 45 #include "hnae3.h" 46 #include "hns_roce_common.h" 47 #include "hns_roce_device.h" 48 #include "hns_roce_cmd.h" 49 #include "hns_roce_hem.h" 50 #include "hns_roce_hw_v2.h" 51 52 enum { 53 CMD_RST_PRC_OTHERS, 54 CMD_RST_PRC_SUCCESS, 55 CMD_RST_PRC_EBUSY, 56 }; 57 58 enum ecc_resource_type { 59 ECC_RESOURCE_QPC, 60 ECC_RESOURCE_CQC, 61 ECC_RESOURCE_MPT, 62 ECC_RESOURCE_SRQC, 63 ECC_RESOURCE_GMV, 64 ECC_RESOURCE_QPC_TIMER, 65 ECC_RESOURCE_CQC_TIMER, 66 ECC_RESOURCE_SCCC, 67 ECC_RESOURCE_COUNT, 68 }; 69 70 static const struct { 71 const char *name; 72 u8 read_bt0_op; 73 u8 write_bt0_op; 74 } fmea_ram_res[] = { 75 { "ECC_RESOURCE_QPC", 76 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 }, 77 { "ECC_RESOURCE_CQC", 78 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 }, 79 { "ECC_RESOURCE_MPT", 80 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 }, 81 { "ECC_RESOURCE_SRQC", 82 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 }, 83 /* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */ 84 { "ECC_RESOURCE_GMV", 85 0, 0 }, 86 { "ECC_RESOURCE_QPC_TIMER", 87 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 }, 88 { "ECC_RESOURCE_CQC_TIMER", 89 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 }, 90 { "ECC_RESOURCE_SCCC", 91 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 }, 92 }; 93 94 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, 95 struct ib_sge *sg) 96 { 97 dseg->lkey = cpu_to_le32(sg->lkey); 98 dseg->addr = cpu_to_le64(sg->addr); 99 dseg->len = cpu_to_le32(sg->length); 100 } 101 102 /* 103 * mapped-value = 1 + real-value 104 * The hns wr opcode real value is start from 0, In order to distinguish between 105 * initialized and uninitialized map values, we plus 1 to the actual value when 106 * defining the mapping, so that the validity can be identified by checking the 107 * mapped value is greater than 0. 108 */ 109 #define HR_OPC_MAP(ib_key, hr_key) \ 110 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key 111 112 static const u32 hns_roce_op_code[] = { 113 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE), 114 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM), 115 HR_OPC_MAP(SEND, SEND), 116 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM), 117 HR_OPC_MAP(RDMA_READ, RDMA_READ), 118 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), 119 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), 120 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), 121 HR_OPC_MAP(LOCAL_INV, LOCAL_INV), 122 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), 123 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), 124 HR_OPC_MAP(REG_MR, FAST_REG_PMR), 125 }; 126 127 static u32 to_hr_opcode(u32 ib_opcode) 128 { 129 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code)) 130 return HNS_ROCE_V2_WQE_OP_MASK; 131 132 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 : 133 HNS_ROCE_V2_WQE_OP_MASK; 134 } 135 136 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 137 const struct ib_reg_wr *wr) 138 { 139 struct hns_roce_wqe_frmr_seg *fseg = 140 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 141 struct hns_roce_mr *mr = to_hr_mr(wr->mr); 142 u64 pbl_ba; 143 144 /* use ib_access_flags */ 145 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND); 146 hr_reg_write_bool(fseg, FRMR_ATOMIC, 147 wr->access & IB_ACCESS_REMOTE_ATOMIC); 148 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ); 149 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE); 150 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE); 151 152 /* Data structure reuse may lead to confusion */ 153 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; 154 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); 155 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); 156 157 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); 158 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); 159 rc_sq_wqe->rkey = cpu_to_le32(wr->key); 160 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); 161 162 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages); 163 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, 164 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 165 hr_reg_clear(fseg, FRMR_BLK_MODE); 166 } 167 168 static void set_atomic_seg(const struct ib_send_wr *wr, 169 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 170 unsigned int valid_num_sge) 171 { 172 struct hns_roce_v2_wqe_data_seg *dseg = 173 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 174 struct hns_roce_wqe_atomic_seg *aseg = 175 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg); 176 177 set_data_seg_v2(dseg, wr->sg_list); 178 179 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 180 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap); 181 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add); 182 } else { 183 aseg->fetchadd_swap_data = 184 cpu_to_le64(atomic_wr(wr)->compare_add); 185 aseg->cmp_data = 0; 186 } 187 188 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); 189 } 190 191 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, 192 const struct ib_send_wr *wr, 193 unsigned int *sge_idx, u32 msg_len) 194 { 195 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; 196 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg); 197 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len; 198 unsigned int left_len_in_pg; 199 unsigned int idx = *sge_idx; 200 unsigned int i = 0; 201 unsigned int len; 202 void *addr; 203 void *dseg; 204 205 if (msg_len > ext_sge_sz) { 206 ibdev_err(ibdev, 207 "no enough extended sge space for inline data.\n"); 208 return -EINVAL; 209 } 210 211 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); 212 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg; 213 len = wr->sg_list[0].length; 214 addr = (void *)(unsigned long)(wr->sg_list[0].addr); 215 216 /* When copying data to extended sge space, the left length in page may 217 * not long enough for current user's sge. So the data should be 218 * splited into several parts, one in the first page, and the others in 219 * the subsequent pages. 220 */ 221 while (1) { 222 if (len <= left_len_in_pg) { 223 memcpy(dseg, addr, len); 224 225 idx += len / dseg_len; 226 227 i++; 228 if (i >= wr->num_sge) 229 break; 230 231 left_len_in_pg -= len; 232 len = wr->sg_list[i].length; 233 addr = (void *)(unsigned long)(wr->sg_list[i].addr); 234 dseg += len; 235 } else { 236 memcpy(dseg, addr, left_len_in_pg); 237 238 len -= left_len_in_pg; 239 addr += left_len_in_pg; 240 idx += left_len_in_pg / dseg_len; 241 dseg = hns_roce_get_extend_sge(qp, 242 idx & (qp->sge.sge_cnt - 1)); 243 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT; 244 } 245 } 246 247 *sge_idx = idx; 248 249 return 0; 250 } 251 252 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge, 253 unsigned int *sge_ind, unsigned int cnt) 254 { 255 struct hns_roce_v2_wqe_data_seg *dseg; 256 unsigned int idx = *sge_ind; 257 258 while (cnt > 0) { 259 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); 260 if (likely(sge->length)) { 261 set_data_seg_v2(dseg, sge); 262 idx++; 263 cnt--; 264 } 265 sge++; 266 } 267 268 *sge_ind = idx; 269 } 270 271 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len) 272 { 273 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 274 int mtu = ib_mtu_enum_to_int(qp->path_mtu); 275 276 if (len > qp->max_inline_data || len > mtu) { 277 ibdev_err(&hr_dev->ib_dev, 278 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n", 279 len, qp->max_inline_data, mtu); 280 return false; 281 } 282 283 return true; 284 } 285 286 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, 287 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 288 unsigned int *sge_idx) 289 { 290 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 291 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len); 292 struct ib_device *ibdev = &hr_dev->ib_dev; 293 unsigned int curr_idx = *sge_idx; 294 void *dseg = rc_sq_wqe; 295 unsigned int i; 296 int ret; 297 298 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) { 299 ibdev_err(ibdev, "invalid inline parameters!\n"); 300 return -EINVAL; 301 } 302 303 if (!check_inl_data_len(qp, msg_len)) 304 return -EINVAL; 305 306 dseg += sizeof(struct hns_roce_v2_rc_send_wqe); 307 308 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { 309 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); 310 311 for (i = 0; i < wr->num_sge; i++) { 312 memcpy(dseg, ((void *)wr->sg_list[i].addr), 313 wr->sg_list[i].length); 314 dseg += wr->sg_list[i].length; 315 } 316 } else { 317 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); 318 319 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); 320 if (ret) 321 return ret; 322 323 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); 324 } 325 326 *sge_idx = curr_idx; 327 328 return 0; 329 } 330 331 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, 332 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 333 unsigned int *sge_ind, 334 unsigned int valid_num_sge) 335 { 336 struct hns_roce_v2_wqe_data_seg *dseg = 337 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe); 338 struct hns_roce_qp *qp = to_hr_qp(ibqp); 339 int j = 0; 340 int i; 341 342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, 343 (*sge_ind) & (qp->sge.sge_cnt - 1)); 344 345 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, 346 !!(wr->send_flags & IB_SEND_INLINE)); 347 if (wr->send_flags & IB_SEND_INLINE) 348 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); 349 350 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) { 351 for (i = 0; i < wr->num_sge; i++) { 352 if (likely(wr->sg_list[i].length)) { 353 set_data_seg_v2(dseg, wr->sg_list + i); 354 dseg++; 355 } 356 } 357 } else { 358 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) { 359 if (likely(wr->sg_list[i].length)) { 360 set_data_seg_v2(dseg, wr->sg_list + i); 361 dseg++; 362 j++; 363 } 364 } 365 366 set_extend_sge(qp, wr->sg_list + i, sge_ind, 367 valid_num_sge - HNS_ROCE_SGE_IN_WQE); 368 } 369 370 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); 371 372 return 0; 373 } 374 375 static int check_send_valid(struct hns_roce_dev *hr_dev, 376 struct hns_roce_qp *hr_qp) 377 { 378 struct ib_device *ibdev = &hr_dev->ib_dev; 379 struct ib_qp *ibqp = &hr_qp->ibqp; 380 381 if (unlikely(ibqp->qp_type != IB_QPT_RC && 382 ibqp->qp_type != IB_QPT_GSI && 383 ibqp->qp_type != IB_QPT_UD)) { 384 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n", 385 ibqp->qp_type); 386 return -EOPNOTSUPP; 387 } else if (unlikely(hr_qp->state == IB_QPS_RESET || 388 hr_qp->state == IB_QPS_INIT || 389 hr_qp->state == IB_QPS_RTR)) { 390 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n", 391 hr_qp->state); 392 return -EINVAL; 393 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { 394 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", 395 hr_dev->state); 396 return -EIO; 397 } 398 399 return 0; 400 } 401 402 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr, 403 unsigned int *sge_len) 404 { 405 unsigned int valid_num = 0; 406 unsigned int len = 0; 407 int i; 408 409 for (i = 0; i < wr->num_sge; i++) { 410 if (likely(wr->sg_list[i].length)) { 411 len += wr->sg_list[i].length; 412 valid_num++; 413 } 414 } 415 416 *sge_len = len; 417 return valid_num; 418 } 419 420 static __le32 get_immtdata(const struct ib_send_wr *wr) 421 { 422 switch (wr->opcode) { 423 case IB_WR_SEND_WITH_IMM: 424 case IB_WR_RDMA_WRITE_WITH_IMM: 425 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); 426 default: 427 return 0; 428 } 429 } 430 431 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, 432 const struct ib_send_wr *wr) 433 { 434 u32 ib_op = wr->opcode; 435 436 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM) 437 return -EINVAL; 438 439 ud_sq_wqe->immtdata = get_immtdata(wr); 440 441 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); 442 443 return 0; 444 } 445 446 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, 447 struct hns_roce_ah *ah) 448 { 449 struct ib_device *ib_dev = ah->ibah.device; 450 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 451 452 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); 453 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); 454 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); 455 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); 456 457 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL)) 458 return -EINVAL; 459 460 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); 461 462 ud_sq_wqe->sgid_index = ah->av.gid_index; 463 464 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN); 465 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2); 466 467 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 468 return 0; 469 470 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); 471 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); 472 473 return 0; 474 } 475 476 static inline int set_ud_wqe(struct hns_roce_qp *qp, 477 const struct ib_send_wr *wr, 478 void *wqe, unsigned int *sge_idx, 479 unsigned int owner_bit) 480 { 481 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); 482 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe; 483 unsigned int curr_idx = *sge_idx; 484 unsigned int valid_num_sge; 485 u32 msg_len = 0; 486 int ret; 487 488 valid_num_sge = calc_wr_sge_num(wr, &msg_len); 489 490 ret = set_ud_opcode(ud_sq_wqe, wr); 491 if (WARN_ON(ret)) 492 return ret; 493 494 ud_sq_wqe->msg_len = cpu_to_le32(msg_len); 495 496 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE, 497 !!(wr->send_flags & IB_SEND_SIGNALED)); 498 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE, 499 !!(wr->send_flags & IB_SEND_SOLICITED)); 500 501 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); 502 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge); 503 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX, 504 curr_idx & (qp->sge.sge_cnt - 1)); 505 506 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? 507 qp->qkey : ud_wr(wr)->remote_qkey); 508 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); 509 510 ret = fill_ud_av(ud_sq_wqe, ah); 511 if (ret) 512 return ret; 513 514 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl; 515 516 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge); 517 518 /* 519 * The pipeline can sequentially post all valid WQEs into WQ buffer, 520 * including new WQEs waiting for the doorbell to update the PI again. 521 * Therefore, the owner bit of WQE MUST be updated after all fields 522 * and extSGEs have been written into DDR instead of cache. 523 */ 524 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 525 dma_wmb(); 526 527 *sge_idx = curr_idx; 528 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit); 529 530 return 0; 531 } 532 533 static int set_rc_opcode(struct hns_roce_dev *hr_dev, 534 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, 535 const struct ib_send_wr *wr) 536 { 537 u32 ib_op = wr->opcode; 538 int ret = 0; 539 540 rc_sq_wqe->immtdata = get_immtdata(wr); 541 542 switch (ib_op) { 543 case IB_WR_RDMA_READ: 544 case IB_WR_RDMA_WRITE: 545 case IB_WR_RDMA_WRITE_WITH_IMM: 546 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); 547 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); 548 break; 549 case IB_WR_SEND: 550 case IB_WR_SEND_WITH_IMM: 551 break; 552 case IB_WR_ATOMIC_CMP_AND_SWP: 553 case IB_WR_ATOMIC_FETCH_AND_ADD: 554 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); 555 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); 556 break; 557 case IB_WR_REG_MR: 558 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 559 set_frmr_seg(rc_sq_wqe, reg_wr(wr)); 560 else 561 ret = -EOPNOTSUPP; 562 break; 563 case IB_WR_LOCAL_INV: 564 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO); 565 fallthrough; 566 case IB_WR_SEND_WITH_INV: 567 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); 568 break; 569 default: 570 ret = -EINVAL; 571 } 572 573 if (unlikely(ret)) 574 return ret; 575 576 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); 577 578 return ret; 579 } 580 581 static inline int set_rc_wqe(struct hns_roce_qp *qp, 582 const struct ib_send_wr *wr, 583 void *wqe, unsigned int *sge_idx, 584 unsigned int owner_bit) 585 { 586 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); 587 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; 588 unsigned int curr_idx = *sge_idx; 589 unsigned int valid_num_sge; 590 u32 msg_len = 0; 591 int ret; 592 593 valid_num_sge = calc_wr_sge_num(wr, &msg_len); 594 595 rc_sq_wqe->msg_len = cpu_to_le32(msg_len); 596 597 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr); 598 if (WARN_ON(ret)) 599 return ret; 600 601 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, 602 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); 603 604 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, 605 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); 606 607 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, 608 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); 609 610 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 611 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 612 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge); 613 else if (wr->opcode != IB_WR_REG_MR) 614 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, 615 &curr_idx, valid_num_sge); 616 617 /* 618 * The pipeline can sequentially post all valid WQEs into WQ buffer, 619 * including new WQEs waiting for the doorbell to update the PI again. 620 * Therefore, the owner bit of WQE MUST be updated after all fields 621 * and extSGEs have been written into DDR instead of cache. 622 */ 623 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 624 dma_wmb(); 625 626 *sge_idx = curr_idx; 627 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit); 628 629 return ret; 630 } 631 632 static inline void update_sq_db(struct hns_roce_dev *hr_dev, 633 struct hns_roce_qp *qp) 634 { 635 if (unlikely(qp->state == IB_QPS_ERR)) { 636 flush_cqe(hr_dev, qp); 637 } else { 638 struct hns_roce_v2_db sq_db = {}; 639 640 hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn); 641 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB); 642 hr_reg_write(&sq_db, DB_PI, qp->sq.head); 643 hr_reg_write(&sq_db, DB_SL, qp->sl); 644 645 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg); 646 } 647 } 648 649 static inline void update_rq_db(struct hns_roce_dev *hr_dev, 650 struct hns_roce_qp *qp) 651 { 652 if (unlikely(qp->state == IB_QPS_ERR)) { 653 flush_cqe(hr_dev, qp); 654 } else { 655 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) { 656 *qp->rdb.db_record = 657 qp->rq.head & V2_DB_PRODUCER_IDX_M; 658 } else { 659 struct hns_roce_v2_db rq_db = {}; 660 661 hr_reg_write(&rq_db, DB_TAG, qp->qpn); 662 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB); 663 hr_reg_write(&rq_db, DB_PI, qp->rq.head); 664 665 hns_roce_write64(hr_dev, (__le32 *)&rq_db, 666 qp->rq.db_reg); 667 } 668 } 669 } 670 671 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, 672 u64 __iomem *dest) 673 { 674 #define HNS_ROCE_WRITE_TIMES 8 675 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; 676 struct hnae3_handle *handle = priv->handle; 677 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 678 int i; 679 680 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) 681 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++) 682 writeq_relaxed(*(val + i), dest + i); 683 } 684 685 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, 686 void *wqe) 687 { 688 #define HNS_ROCE_SL_SHIFT 2 689 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; 690 691 /* All kinds of DirectWQE have the same header field layout */ 692 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); 693 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); 694 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H, 695 qp->sl >> HNS_ROCE_SL_SHIFT); 696 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); 697 698 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); 699 } 700 701 static int hns_roce_v2_post_send(struct ib_qp *ibqp, 702 const struct ib_send_wr *wr, 703 const struct ib_send_wr **bad_wr) 704 { 705 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 706 struct ib_device *ibdev = &hr_dev->ib_dev; 707 struct hns_roce_qp *qp = to_hr_qp(ibqp); 708 unsigned long flags = 0; 709 unsigned int owner_bit; 710 unsigned int sge_idx; 711 unsigned int wqe_idx; 712 void *wqe = NULL; 713 u32 nreq; 714 int ret; 715 716 spin_lock_irqsave(&qp->sq.lock, flags); 717 718 ret = check_send_valid(hr_dev, qp); 719 if (unlikely(ret)) { 720 *bad_wr = wr; 721 nreq = 0; 722 goto out; 723 } 724 725 sge_idx = qp->next_sge; 726 727 for (nreq = 0; wr; ++nreq, wr = wr->next) { 728 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 729 ret = -ENOMEM; 730 *bad_wr = wr; 731 goto out; 732 } 733 734 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); 735 736 if (unlikely(wr->num_sge > qp->sq.max_gs)) { 737 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n", 738 wr->num_sge, qp->sq.max_gs); 739 ret = -EINVAL; 740 *bad_wr = wr; 741 goto out; 742 } 743 744 wqe = hns_roce_get_send_wqe(qp, wqe_idx); 745 qp->sq.wrid[wqe_idx] = wr->wr_id; 746 owner_bit = 747 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 748 749 /* Corresponding to the QP type, wqe process separately */ 750 if (ibqp->qp_type == IB_QPT_RC) 751 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); 752 else 753 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); 754 755 if (unlikely(ret)) { 756 *bad_wr = wr; 757 goto out; 758 } 759 } 760 761 out: 762 if (likely(nreq)) { 763 qp->sq.head += nreq; 764 qp->next_sge = sge_idx; 765 766 if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)) 767 write_dwqe(hr_dev, qp, wqe); 768 else 769 update_sq_db(hr_dev, qp); 770 } 771 772 spin_unlock_irqrestore(&qp->sq.lock, flags); 773 774 return ret; 775 } 776 777 static int check_recv_valid(struct hns_roce_dev *hr_dev, 778 struct hns_roce_qp *hr_qp) 779 { 780 struct ib_device *ibdev = &hr_dev->ib_dev; 781 struct ib_qp *ibqp = &hr_qp->ibqp; 782 783 if (unlikely(ibqp->qp_type != IB_QPT_RC && 784 ibqp->qp_type != IB_QPT_GSI && 785 ibqp->qp_type != IB_QPT_UD)) { 786 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n", 787 ibqp->qp_type); 788 return -EOPNOTSUPP; 789 } 790 791 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) 792 return -EIO; 793 794 if (hr_qp->state == IB_QPS_RESET) 795 return -EINVAL; 796 797 return 0; 798 } 799 800 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe, 801 u32 max_sge, bool rsv) 802 { 803 struct hns_roce_v2_wqe_data_seg *dseg = wqe; 804 u32 i, cnt; 805 806 for (i = 0, cnt = 0; i < wr->num_sge; i++) { 807 /* Skip zero-length sge */ 808 if (!wr->sg_list[i].length) 809 continue; 810 set_data_seg_v2(dseg + cnt, wr->sg_list + i); 811 cnt++; 812 } 813 814 /* Fill a reserved sge to make hw stop reading remaining segments */ 815 if (rsv) { 816 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 817 dseg[cnt].addr = 0; 818 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); 819 } else { 820 /* Clear remaining segments to make ROCEE ignore sges */ 821 if (cnt < max_sge) 822 memset(dseg + cnt, 0, 823 (max_sge - cnt) * HNS_ROCE_SGE_SIZE); 824 } 825 } 826 827 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr, 828 u32 wqe_idx, u32 max_sge) 829 { 830 struct hns_roce_rinl_sge *sge_list; 831 void *wqe = NULL; 832 u32 i; 833 834 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); 835 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge); 836 837 /* rq support inline data */ 838 if (hr_qp->rq_inl_buf.wqe_cnt) { 839 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list; 840 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge; 841 for (i = 0; i < wr->num_sge; i++) { 842 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; 843 sge_list[i].len = wr->sg_list[i].length; 844 } 845 } 846 } 847 848 static int hns_roce_v2_post_recv(struct ib_qp *ibqp, 849 const struct ib_recv_wr *wr, 850 const struct ib_recv_wr **bad_wr) 851 { 852 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 853 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 854 struct ib_device *ibdev = &hr_dev->ib_dev; 855 u32 wqe_idx, nreq, max_sge; 856 unsigned long flags; 857 int ret; 858 859 spin_lock_irqsave(&hr_qp->rq.lock, flags); 860 861 ret = check_recv_valid(hr_dev, hr_qp); 862 if (unlikely(ret)) { 863 *bad_wr = wr; 864 nreq = 0; 865 goto out; 866 } 867 868 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 869 for (nreq = 0; wr; ++nreq, wr = wr->next) { 870 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq, 871 hr_qp->ibqp.recv_cq))) { 872 ret = -ENOMEM; 873 *bad_wr = wr; 874 goto out; 875 } 876 877 if (unlikely(wr->num_sge > max_sge)) { 878 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n", 879 wr->num_sge, max_sge); 880 ret = -EINVAL; 881 *bad_wr = wr; 882 goto out; 883 } 884 885 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); 886 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge); 887 hr_qp->rq.wrid[wqe_idx] = wr->wr_id; 888 } 889 890 out: 891 if (likely(nreq)) { 892 hr_qp->rq.head += nreq; 893 894 update_rq_db(hr_dev, hr_qp); 895 } 896 spin_unlock_irqrestore(&hr_qp->rq.lock, flags); 897 898 return ret; 899 } 900 901 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n) 902 { 903 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); 904 } 905 906 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n) 907 { 908 return hns_roce_buf_offset(idx_que->mtr.kmem, 909 n << idx_que->entry_shift); 910 } 911 912 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index) 913 { 914 /* always called with interrupts disabled. */ 915 spin_lock(&srq->lock); 916 917 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); 918 srq->idx_que.tail++; 919 920 spin_unlock(&srq->lock); 921 } 922 923 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq) 924 { 925 struct hns_roce_idx_que *idx_que = &srq->idx_que; 926 927 return idx_que->head - idx_que->tail >= srq->wqe_cnt; 928 } 929 930 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge, 931 const struct ib_recv_wr *wr) 932 { 933 struct ib_device *ib_dev = srq->ibsrq.device; 934 935 if (unlikely(wr->num_sge > max_sge)) { 936 ibdev_err(ib_dev, 937 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n", 938 wr->num_sge, max_sge); 939 return -EINVAL; 940 } 941 942 if (unlikely(hns_roce_srqwq_overflow(srq))) { 943 ibdev_err(ib_dev, 944 "failed to check srqwq status, srqwq is full.\n"); 945 return -ENOMEM; 946 } 947 948 return 0; 949 } 950 951 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx) 952 { 953 struct hns_roce_idx_que *idx_que = &srq->idx_que; 954 u32 pos; 955 956 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt); 957 if (unlikely(pos == srq->wqe_cnt)) 958 return -ENOSPC; 959 960 bitmap_set(idx_que->bitmap, pos, 1); 961 *wqe_idx = pos; 962 return 0; 963 } 964 965 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) 966 { 967 struct hns_roce_idx_que *idx_que = &srq->idx_que; 968 unsigned int head; 969 __le32 *buf; 970 971 head = idx_que->head & (srq->wqe_cnt - 1); 972 973 buf = get_idx_buf(idx_que, head); 974 *buf = cpu_to_le32(wqe_idx); 975 976 idx_que->head++; 977 } 978 979 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq) 980 { 981 hr_reg_write(db, DB_TAG, srq->srqn); 982 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB); 983 hr_reg_write(db, DB_PI, srq->idx_que.head); 984 } 985 986 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, 987 const struct ib_recv_wr *wr, 988 const struct ib_recv_wr **bad_wr) 989 { 990 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 991 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 992 struct hns_roce_v2_db srq_db; 993 unsigned long flags; 994 int ret = 0; 995 u32 max_sge; 996 u32 wqe_idx; 997 void *wqe; 998 u32 nreq; 999 1000 spin_lock_irqsave(&srq->lock, flags); 1001 1002 max_sge = srq->max_gs - srq->rsv_sge; 1003 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1004 ret = check_post_srq_valid(srq, max_sge, wr); 1005 if (ret) { 1006 *bad_wr = wr; 1007 break; 1008 } 1009 1010 ret = get_srq_wqe_idx(srq, &wqe_idx); 1011 if (unlikely(ret)) { 1012 *bad_wr = wr; 1013 break; 1014 } 1015 1016 wqe = get_srq_wqe_buf(srq, wqe_idx); 1017 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge); 1018 fill_wqe_idx(srq, wqe_idx); 1019 srq->wrid[wqe_idx] = wr->wr_id; 1020 } 1021 1022 if (likely(nreq)) { 1023 update_srq_db(&srq_db, srq); 1024 1025 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg); 1026 } 1027 1028 spin_unlock_irqrestore(&srq->lock, flags); 1029 1030 return ret; 1031 } 1032 1033 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev, 1034 unsigned long instance_stage, 1035 unsigned long reset_stage) 1036 { 1037 /* When hardware reset has been completed once or more, we should stop 1038 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance() 1039 * function, we should exit with error. If now at HNAE3_INIT_CLIENT 1040 * stage of soft reset process, we should exit with error, and then 1041 * HNAE3_INIT_CLIENT related process can rollback the operation like 1042 * notifing hardware to free resources, HNAE3_INIT_CLIENT related 1043 * process will exit with error to notify NIC driver to reschedule soft 1044 * reset process once again. 1045 */ 1046 hr_dev->is_reset = true; 1047 hr_dev->dis_db = true; 1048 1049 if (reset_stage == HNS_ROCE_STATE_RST_INIT || 1050 instance_stage == HNS_ROCE_STATE_INIT) 1051 return CMD_RST_PRC_EBUSY; 1052 1053 return CMD_RST_PRC_SUCCESS; 1054 } 1055 1056 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, 1057 unsigned long instance_stage, 1058 unsigned long reset_stage) 1059 { 1060 #define HW_RESET_TIMEOUT_US 1000000 1061 #define HW_RESET_SLEEP_US 1000 1062 1063 struct hns_roce_v2_priv *priv = hr_dev->priv; 1064 struct hnae3_handle *handle = priv->handle; 1065 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1066 unsigned long val; 1067 int ret; 1068 1069 /* When hardware reset is detected, we should stop sending mailbox&cmq& 1070 * doorbell to hardware. If now in .init_instance() function, we should 1071 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset 1072 * process, we should exit with error, and then HNAE3_INIT_CLIENT 1073 * related process can rollback the operation like notifing hardware to 1074 * free resources, HNAE3_INIT_CLIENT related process will exit with 1075 * error to notify NIC driver to reschedule soft reset process once 1076 * again. 1077 */ 1078 hr_dev->dis_db = true; 1079 1080 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val, 1081 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US, 1082 HW_RESET_TIMEOUT_US, false, handle); 1083 if (!ret) 1084 hr_dev->is_reset = true; 1085 1086 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || 1087 instance_stage == HNS_ROCE_STATE_INIT) 1088 return CMD_RST_PRC_EBUSY; 1089 1090 return CMD_RST_PRC_SUCCESS; 1091 } 1092 1093 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev) 1094 { 1095 struct hns_roce_v2_priv *priv = hr_dev->priv; 1096 struct hnae3_handle *handle = priv->handle; 1097 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1098 1099 /* When software reset is detected at .init_instance() function, we 1100 * should stop sending mailbox&cmq&doorbell to hardware, and exit 1101 * with error. 1102 */ 1103 hr_dev->dis_db = true; 1104 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) 1105 hr_dev->is_reset = true; 1106 1107 return CMD_RST_PRC_EBUSY; 1108 } 1109 1110 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev, 1111 struct hnae3_handle *handle) 1112 { 1113 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1114 unsigned long instance_stage; /* the current instance stage */ 1115 unsigned long reset_stage; /* the current reset stage */ 1116 unsigned long reset_cnt; 1117 bool sw_resetting; 1118 bool hw_resetting; 1119 1120 /* Get information about reset from NIC driver or RoCE driver itself, 1121 * the meaning of the following variables from NIC driver are described 1122 * as below: 1123 * reset_cnt -- The count value of completed hardware reset. 1124 * hw_resetting -- Whether hardware device is resetting now. 1125 * sw_resetting -- Whether NIC's software reset process is running now. 1126 */ 1127 instance_stage = handle->rinfo.instance_state; 1128 reset_stage = handle->rinfo.reset_state; 1129 reset_cnt = ops->ae_dev_reset_cnt(handle); 1130 if (reset_cnt != hr_dev->reset_cnt) 1131 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage, 1132 reset_stage); 1133 1134 hw_resetting = ops->get_cmdq_stat(handle); 1135 if (hw_resetting) 1136 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage, 1137 reset_stage); 1138 1139 sw_resetting = ops->ae_dev_resetting(handle); 1140 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) 1141 return hns_roce_v2_cmd_sw_resetting(hr_dev); 1142 1143 return CMD_RST_PRC_OTHERS; 1144 } 1145 1146 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev) 1147 { 1148 struct hns_roce_v2_priv *priv = hr_dev->priv; 1149 struct hnae3_handle *handle = priv->handle; 1150 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1151 1152 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle)) 1153 return true; 1154 1155 if (ops->get_hw_reset_stat(handle)) 1156 return true; 1157 1158 if (ops->ae_dev_resetting(handle)) 1159 return true; 1160 1161 return false; 1162 } 1163 1164 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy) 1165 { 1166 struct hns_roce_v2_priv *priv = hr_dev->priv; 1167 u32 status; 1168 1169 if (hr_dev->is_reset) 1170 status = CMD_RST_PRC_SUCCESS; 1171 else 1172 status = check_aedev_reset_status(hr_dev, priv->handle); 1173 1174 *busy = (status == CMD_RST_PRC_EBUSY); 1175 1176 return status == CMD_RST_PRC_OTHERS; 1177 } 1178 1179 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev, 1180 struct hns_roce_v2_cmq_ring *ring) 1181 { 1182 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc); 1183 1184 ring->desc = dma_alloc_coherent(hr_dev->dev, size, 1185 &ring->desc_dma_addr, GFP_KERNEL); 1186 if (!ring->desc) 1187 return -ENOMEM; 1188 1189 return 0; 1190 } 1191 1192 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, 1193 struct hns_roce_v2_cmq_ring *ring) 1194 { 1195 dma_free_coherent(hr_dev->dev, 1196 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 1197 ring->desc, ring->desc_dma_addr); 1198 1199 ring->desc_dma_addr = 0; 1200 } 1201 1202 static int init_csq(struct hns_roce_dev *hr_dev, 1203 struct hns_roce_v2_cmq_ring *csq) 1204 { 1205 dma_addr_t dma; 1206 int ret; 1207 1208 csq->desc_num = CMD_CSQ_DESC_NUM; 1209 spin_lock_init(&csq->lock); 1210 csq->flag = TYPE_CSQ; 1211 csq->head = 0; 1212 1213 ret = hns_roce_alloc_cmq_desc(hr_dev, csq); 1214 if (ret) 1215 return ret; 1216 1217 dma = csq->desc_dma_addr; 1218 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma)); 1219 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma)); 1220 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, 1221 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); 1222 1223 /* Make sure to write CI first and then PI */ 1224 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0); 1225 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0); 1226 1227 return 0; 1228 } 1229 1230 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev) 1231 { 1232 struct hns_roce_v2_priv *priv = hr_dev->priv; 1233 int ret; 1234 1235 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT; 1236 1237 ret = init_csq(hr_dev, &priv->cmq.csq); 1238 if (ret) 1239 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret); 1240 1241 return ret; 1242 } 1243 1244 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev) 1245 { 1246 struct hns_roce_v2_priv *priv = hr_dev->priv; 1247 1248 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq); 1249 } 1250 1251 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc, 1252 enum hns_roce_opcode_type opcode, 1253 bool is_read) 1254 { 1255 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc)); 1256 desc->opcode = cpu_to_le16(opcode); 1257 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); 1258 if (is_read) 1259 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR); 1260 else 1261 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); 1262 } 1263 1264 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) 1265 { 1266 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); 1267 struct hns_roce_v2_priv *priv = hr_dev->priv; 1268 1269 return tail == priv->cmq.csq.head; 1270 } 1271 1272 static void update_cmdq_status(struct hns_roce_dev *hr_dev) 1273 { 1274 struct hns_roce_v2_priv *priv = hr_dev->priv; 1275 struct hnae3_handle *handle = priv->handle; 1276 1277 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || 1278 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) 1279 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; 1280 } 1281 1282 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, 1283 struct hns_roce_cmq_desc *desc, int num) 1284 { 1285 struct hns_roce_v2_priv *priv = hr_dev->priv; 1286 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; 1287 u32 timeout = 0; 1288 u16 desc_ret; 1289 u32 tail; 1290 int ret; 1291 int i; 1292 1293 spin_lock_bh(&csq->lock); 1294 1295 tail = csq->head; 1296 1297 for (i = 0; i < num; i++) { 1298 csq->desc[csq->head++] = desc[i]; 1299 if (csq->head == csq->desc_num) 1300 csq->head = 0; 1301 } 1302 1303 /* Write to hardware */ 1304 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head); 1305 1306 do { 1307 if (hns_roce_cmq_csq_done(hr_dev)) 1308 break; 1309 udelay(1); 1310 } while (++timeout < priv->cmq.tx_timeout); 1311 1312 if (hns_roce_cmq_csq_done(hr_dev)) { 1313 ret = 0; 1314 for (i = 0; i < num; i++) { 1315 /* check the result of hardware write back */ 1316 desc[i] = csq->desc[tail++]; 1317 if (tail == csq->desc_num) 1318 tail = 0; 1319 1320 desc_ret = le16_to_cpu(desc[i].retval); 1321 if (likely(desc_ret == CMD_EXEC_SUCCESS)) 1322 continue; 1323 1324 dev_err_ratelimited(hr_dev->dev, 1325 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", 1326 desc->opcode, desc_ret); 1327 ret = -EIO; 1328 } 1329 } else { 1330 /* FW/HW reset or incorrect number of desc */ 1331 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); 1332 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", 1333 csq->head, tail); 1334 csq->head = tail; 1335 1336 update_cmdq_status(hr_dev); 1337 1338 ret = -EAGAIN; 1339 } 1340 1341 spin_unlock_bh(&csq->lock); 1342 1343 return ret; 1344 } 1345 1346 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, 1347 struct hns_roce_cmq_desc *desc, int num) 1348 { 1349 bool busy; 1350 int ret; 1351 1352 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 1353 return -EIO; 1354 1355 if (!v2_chk_mbox_is_avail(hr_dev, &busy)) 1356 return busy ? -EBUSY : 0; 1357 1358 ret = __hns_roce_cmq_send(hr_dev, desc, num); 1359 if (ret) { 1360 if (!v2_chk_mbox_is_avail(hr_dev, &busy)) 1361 return busy ? -EBUSY : 0; 1362 } 1363 1364 return ret; 1365 } 1366 1367 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, 1368 dma_addr_t base_addr, u8 cmd, unsigned long tag) 1369 { 1370 struct hns_roce_cmd_mailbox *mbox; 1371 int ret; 1372 1373 mbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1374 if (IS_ERR(mbox)) 1375 return PTR_ERR(mbox); 1376 1377 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); 1378 hns_roce_free_cmd_mailbox(hr_dev, mbox); 1379 return ret; 1380 } 1381 1382 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) 1383 { 1384 struct hns_roce_query_version *resp; 1385 struct hns_roce_cmq_desc desc; 1386 int ret; 1387 1388 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true); 1389 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1390 if (ret) 1391 return ret; 1392 1393 resp = (struct hns_roce_query_version *)desc.data; 1394 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); 1395 hr_dev->vendor_id = hr_dev->pci_dev->vendor; 1396 1397 return 0; 1398 } 1399 1400 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev, 1401 struct hnae3_handle *handle) 1402 { 1403 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1404 unsigned long end; 1405 1406 hr_dev->dis_db = true; 1407 1408 dev_warn(hr_dev->dev, 1409 "Func clear is pending, device in resetting state.\n"); 1410 end = HNS_ROCE_V2_HW_RST_TIMEOUT; 1411 while (end) { 1412 if (!ops->get_hw_reset_stat(handle)) { 1413 hr_dev->is_reset = true; 1414 dev_info(hr_dev->dev, 1415 "Func clear success after reset.\n"); 1416 return; 1417 } 1418 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); 1419 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; 1420 } 1421 1422 dev_warn(hr_dev->dev, "Func clear failed.\n"); 1423 } 1424 1425 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev, 1426 struct hnae3_handle *handle) 1427 { 1428 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1429 unsigned long end; 1430 1431 hr_dev->dis_db = true; 1432 1433 dev_warn(hr_dev->dev, 1434 "Func clear is pending, device in resetting state.\n"); 1435 end = HNS_ROCE_V2_HW_RST_TIMEOUT; 1436 while (end) { 1437 if (ops->ae_dev_reset_cnt(handle) != 1438 hr_dev->reset_cnt) { 1439 hr_dev->is_reset = true; 1440 dev_info(hr_dev->dev, 1441 "Func clear success after sw reset\n"); 1442 return; 1443 } 1444 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT); 1445 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; 1446 } 1447 1448 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n"); 1449 } 1450 1451 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval, 1452 int flag) 1453 { 1454 struct hns_roce_v2_priv *priv = hr_dev->priv; 1455 struct hnae3_handle *handle = priv->handle; 1456 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 1457 1458 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { 1459 hr_dev->dis_db = true; 1460 hr_dev->is_reset = true; 1461 dev_info(hr_dev->dev, "Func clear success after reset.\n"); 1462 return; 1463 } 1464 1465 if (ops->get_hw_reset_stat(handle)) { 1466 func_clr_hw_resetting_state(hr_dev, handle); 1467 return; 1468 } 1469 1470 if (ops->ae_dev_resetting(handle) && 1471 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) { 1472 func_clr_sw_resetting_state(hr_dev, handle); 1473 return; 1474 } 1475 1476 if (retval && !flag) 1477 dev_warn(hr_dev->dev, 1478 "Func clear read failed, ret = %d.\n", retval); 1479 1480 dev_warn(hr_dev->dev, "Func clear failed.\n"); 1481 } 1482 1483 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) 1484 { 1485 bool fclr_write_fail_flag = false; 1486 struct hns_roce_func_clear *resp; 1487 struct hns_roce_cmq_desc desc; 1488 unsigned long end; 1489 int ret = 0; 1490 1491 if (check_device_is_in_reset(hr_dev)) 1492 goto out; 1493 1494 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false); 1495 resp = (struct hns_roce_func_clear *)desc.data; 1496 resp->rst_funcid_en = cpu_to_le32(vf_id); 1497 1498 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1499 if (ret) { 1500 fclr_write_fail_flag = true; 1501 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n", 1502 ret); 1503 goto out; 1504 } 1505 1506 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL); 1507 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS; 1508 while (end) { 1509 if (check_device_is_in_reset(hr_dev)) 1510 goto out; 1511 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT); 1512 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; 1513 1514 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, 1515 true); 1516 1517 resp->rst_funcid_en = cpu_to_le32(vf_id); 1518 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1519 if (ret) 1520 continue; 1521 1522 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) { 1523 if (vf_id == 0) 1524 hr_dev->is_reset = true; 1525 return; 1526 } 1527 } 1528 1529 out: 1530 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag); 1531 } 1532 1533 static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) 1534 { 1535 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; 1536 struct hns_roce_cmq_desc desc[2]; 1537 struct hns_roce_cmq_req *req_a; 1538 1539 req_a = (struct hns_roce_cmq_req *)desc[0].data; 1540 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 1541 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1542 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 1543 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id); 1544 1545 return hns_roce_cmq_send(hr_dev, desc, 2); 1546 } 1547 1548 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) 1549 { 1550 int ret; 1551 int i; 1552 1553 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 1554 return; 1555 1556 for (i = hr_dev->func_num - 1; i >= 0; i--) { 1557 __hns_roce_function_clear(hr_dev, i); 1558 1559 if (i == 0) 1560 continue; 1561 1562 ret = hns_roce_free_vf_resource(hr_dev, i); 1563 if (ret) 1564 ibdev_err(&hr_dev->ib_dev, 1565 "failed to free vf resource, vf_id = %d, ret = %d.\n", 1566 i, ret); 1567 } 1568 } 1569 1570 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev) 1571 { 1572 struct hns_roce_cmq_desc desc; 1573 int ret; 1574 1575 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO, 1576 false); 1577 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1578 if (ret) 1579 ibdev_err(&hr_dev->ib_dev, 1580 "failed to clear extended doorbell info, ret = %d.\n", 1581 ret); 1582 1583 return ret; 1584 } 1585 1586 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev) 1587 { 1588 struct hns_roce_query_fw_info *resp; 1589 struct hns_roce_cmq_desc desc; 1590 int ret; 1591 1592 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true); 1593 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1594 if (ret) 1595 return ret; 1596 1597 resp = (struct hns_roce_query_fw_info *)desc.data; 1598 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); 1599 1600 return 0; 1601 } 1602 1603 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) 1604 { 1605 struct hns_roce_cmq_desc desc; 1606 int ret; 1607 1608 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 1609 hr_dev->func_num = 1; 1610 return 0; 1611 } 1612 1613 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO, 1614 true); 1615 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1616 if (ret) { 1617 hr_dev->func_num = 1; 1618 return ret; 1619 } 1620 1621 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num); 1622 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id); 1623 1624 return 0; 1625 } 1626 1627 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) 1628 { 1629 struct hns_roce_cmq_desc desc; 1630 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1631 u32 clock_cycles_of_1us; 1632 1633 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, 1634 false); 1635 1636 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 1637 clock_cycles_of_1us = HNS_ROCE_1NS_CFG; 1638 else 1639 clock_cycles_of_1us = HNS_ROCE_1US_CFG; 1640 1641 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us); 1642 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT); 1643 1644 return hns_roce_cmq_send(hr_dev, &desc, 1); 1645 } 1646 1647 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf) 1648 { 1649 struct hns_roce_cmq_desc desc[2]; 1650 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 1651 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 1652 struct hns_roce_caps *caps = &hr_dev->caps; 1653 enum hns_roce_opcode_type opcode; 1654 u32 func_num; 1655 int ret; 1656 1657 if (is_vf) { 1658 opcode = HNS_ROCE_OPC_QUERY_VF_RES; 1659 func_num = 1; 1660 } else { 1661 opcode = HNS_ROCE_OPC_QUERY_PF_RES; 1662 func_num = hr_dev->func_num; 1663 } 1664 1665 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true); 1666 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1667 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true); 1668 1669 ret = hns_roce_cmq_send(hr_dev, desc, 2); 1670 if (ret) 1671 return ret; 1672 1673 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num; 1674 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num; 1675 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num; 1676 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num; 1677 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num; 1678 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num; 1679 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num; 1680 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num; 1681 1682 if (is_vf) { 1683 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num; 1684 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) / 1685 func_num; 1686 } else { 1687 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num; 1688 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) / 1689 func_num; 1690 } 1691 1692 return 0; 1693 } 1694 1695 static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf) 1696 { 1697 struct hns_roce_cmq_desc desc; 1698 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1699 struct hns_roce_caps *caps = &hr_dev->caps; 1700 u32 func_num, qp_num; 1701 int ret; 1702 1703 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true); 1704 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1705 if (ret) 1706 return ret; 1707 1708 func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num); 1709 qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num; 1710 caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM); 1711 1712 qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num; 1713 caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM); 1714 1715 return 0; 1716 } 1717 1718 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev) 1719 { 1720 struct hns_roce_cmq_desc desc; 1721 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1722 struct hns_roce_caps *caps = &hr_dev->caps; 1723 int ret; 1724 1725 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, 1726 true); 1727 1728 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1729 if (ret) 1730 return ret; 1731 1732 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM); 1733 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM); 1734 1735 return 0; 1736 } 1737 1738 static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf) 1739 { 1740 struct device *dev = hr_dev->dev; 1741 int ret; 1742 1743 ret = load_func_res_caps(hr_dev, is_vf); 1744 if (ret) { 1745 dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret, 1746 is_vf ? "vf" : "pf"); 1747 return ret; 1748 } 1749 1750 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 1751 ret = load_ext_cfg_caps(hr_dev, is_vf); 1752 if (ret) 1753 dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n", 1754 ret, is_vf ? "vf" : "pf"); 1755 } 1756 1757 return ret; 1758 } 1759 1760 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) 1761 { 1762 struct device *dev = hr_dev->dev; 1763 int ret; 1764 1765 ret = query_func_resource_caps(hr_dev, false); 1766 if (ret) 1767 return ret; 1768 1769 ret = load_pf_timer_res_caps(hr_dev); 1770 if (ret) 1771 dev_err(dev, "failed to load pf timer resource, ret = %d.\n", 1772 ret); 1773 1774 return ret; 1775 } 1776 1777 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev) 1778 { 1779 return query_func_resource_caps(hr_dev, true); 1780 } 1781 1782 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, 1783 u32 vf_id) 1784 { 1785 struct hns_roce_vf_switch *swt; 1786 struct hns_roce_cmq_desc desc; 1787 int ret; 1788 1789 swt = (struct hns_roce_vf_switch *)desc.data; 1790 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); 1791 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); 1792 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id); 1793 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 1794 if (ret) 1795 return ret; 1796 1797 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); 1798 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); 1799 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK); 1800 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK); 1801 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD); 1802 1803 return hns_roce_cmq_send(hr_dev, &desc, 1); 1804 } 1805 1806 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev) 1807 { 1808 u32 vf_id; 1809 int ret; 1810 1811 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) { 1812 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id); 1813 if (ret) 1814 return ret; 1815 } 1816 return 0; 1817 } 1818 1819 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id) 1820 { 1821 struct hns_roce_cmq_desc desc[2]; 1822 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 1823 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 1824 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; 1825 struct hns_roce_caps *caps = &hr_dev->caps; 1826 1827 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 1828 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 1829 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 1830 1831 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id); 1832 1833 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num); 1834 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num); 1835 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num); 1836 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num); 1837 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num); 1838 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num); 1839 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num); 1840 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num); 1841 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num); 1842 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num); 1843 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num); 1844 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num); 1845 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num); 1846 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num); 1847 1848 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 1849 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num); 1850 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX, 1851 vf_id * caps->gmv_bt_num); 1852 } else { 1853 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num); 1854 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX, 1855 vf_id * caps->sgid_bt_num); 1856 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num); 1857 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX, 1858 vf_id * caps->smac_bt_num); 1859 } 1860 1861 return hns_roce_cmq_send(hr_dev, desc, 2); 1862 } 1863 1864 static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id) 1865 { 1866 struct hns_roce_cmq_desc desc; 1867 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1868 struct hns_roce_caps *caps = &hr_dev->caps; 1869 1870 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false); 1871 1872 hr_reg_write(req, EXT_CFG_VF_ID, vf_id); 1873 1874 hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps); 1875 hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps); 1876 hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps); 1877 hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps); 1878 1879 return hns_roce_cmq_send(hr_dev, &desc, 1); 1880 } 1881 1882 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) 1883 { 1884 u32 func_num = max_t(u32, 1, hr_dev->func_num); 1885 u32 vf_id; 1886 int ret; 1887 1888 for (vf_id = 0; vf_id < func_num; vf_id++) { 1889 ret = config_vf_hem_resource(hr_dev, vf_id); 1890 if (ret) { 1891 dev_err(hr_dev->dev, 1892 "failed to config vf-%u hem res, ret = %d.\n", 1893 vf_id, ret); 1894 return ret; 1895 } 1896 1897 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 1898 ret = config_vf_ext_resource(hr_dev, vf_id); 1899 if (ret) { 1900 dev_err(hr_dev->dev, 1901 "failed to config vf-%u ext res, ret = %d.\n", 1902 vf_id, ret); 1903 return ret; 1904 } 1905 } 1906 } 1907 1908 return 0; 1909 } 1910 1911 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) 1912 { 1913 struct hns_roce_cmq_desc desc; 1914 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 1915 struct hns_roce_caps *caps = &hr_dev->caps; 1916 1917 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false); 1918 1919 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ, 1920 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET); 1921 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ, 1922 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET); 1923 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM, 1924 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps)); 1925 1926 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ, 1927 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET); 1928 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ, 1929 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET); 1930 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM, 1931 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs)); 1932 1933 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ, 1934 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET); 1935 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ, 1936 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET); 1937 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM, 1938 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs)); 1939 1940 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ, 1941 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET); 1942 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ, 1943 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET); 1944 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM, 1945 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts)); 1946 1947 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ, 1948 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET); 1949 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ, 1950 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET); 1951 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM, 1952 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps)); 1953 1954 return hns_roce_cmq_send(hr_dev, &desc, 1); 1955 } 1956 1957 /* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */ 1958 static void set_default_caps(struct hns_roce_dev *hr_dev) 1959 { 1960 struct hns_roce_caps *caps = &hr_dev->caps; 1961 1962 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM; 1963 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; 1964 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; 1965 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; 1966 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; 1967 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; 1968 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; 1969 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; 1970 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; 1971 1972 caps->num_uars = HNS_ROCE_V2_UAR_NUM; 1973 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; 1974 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; 1975 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM; 1976 caps->num_comp_vectors = 0; 1977 1978 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; 1979 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; 1980 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; 1981 caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; 1982 1983 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; 1984 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; 1985 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; 1986 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; 1987 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ; 1988 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; 1989 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; 1990 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; 1991 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ; 1992 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; 1993 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; 1994 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; 1995 caps->reserved_lkey = 0; 1996 caps->reserved_pds = 0; 1997 caps->reserved_mrws = 1; 1998 caps->reserved_uars = 0; 1999 caps->reserved_cqs = 0; 2000 caps->reserved_srqs = 0; 2001 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS; 2002 2003 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; 2004 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; 2005 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; 2006 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; 2007 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM; 2008 2009 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM; 2010 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM; 2011 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM; 2012 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM; 2013 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM; 2014 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM; 2015 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM; 2016 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE; 2017 2018 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR | 2019 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 | 2020 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB | 2021 HNS_ROCE_CAP_FLAG_QP_RECORD_DB; 2022 2023 caps->pkey_table_len[0] = 1; 2024 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; 2025 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; 2026 caps->local_ca_ack_delay = 0; 2027 caps->max_mtu = IB_MTU_4096; 2028 2029 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; 2030 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; 2031 2032 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW | 2033 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR | 2034 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC; 2035 2036 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; 2037 2038 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 2039 caps->flags |= HNS_ROCE_CAP_FLAG_STASH | 2040 HNS_ROCE_CAP_FLAG_DIRECT_WQE; 2041 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; 2042 } else { 2043 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; 2044 2045 /* The following configuration are only valid for HIP08 */ 2046 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; 2047 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ; 2048 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; 2049 } 2050 } 2051 2052 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num, 2053 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type) 2054 { 2055 u64 obj_per_chunk; 2056 u64 bt_chunk_size = PAGE_SIZE; 2057 u64 buf_chunk_size = PAGE_SIZE; 2058 u64 obj_per_chunk_default = buf_chunk_size / obj_size; 2059 2060 *buf_page_size = 0; 2061 *bt_page_size = 0; 2062 2063 switch (hop_num) { 2064 case 3: 2065 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 2066 (bt_chunk_size / BA_BYTE_LEN) * 2067 (bt_chunk_size / BA_BYTE_LEN) * 2068 obj_per_chunk_default; 2069 break; 2070 case 2: 2071 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 2072 (bt_chunk_size / BA_BYTE_LEN) * 2073 obj_per_chunk_default; 2074 break; 2075 case 1: 2076 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) * 2077 obj_per_chunk_default; 2078 break; 2079 case HNS_ROCE_HOP_NUM_0: 2080 obj_per_chunk = ctx_bt_num * obj_per_chunk_default; 2081 break; 2082 default: 2083 pr_err("table %u not support hop_num = %u!\n", hem_type, 2084 hop_num); 2085 return; 2086 } 2087 2088 if (hem_type >= HEM_TYPE_MTT) 2089 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); 2090 else 2091 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk)); 2092 } 2093 2094 static void set_hem_page_size(struct hns_roce_dev *hr_dev) 2095 { 2096 struct hns_roce_caps *caps = &hr_dev->caps; 2097 2098 /* EQ */ 2099 caps->eqe_ba_pg_sz = 0; 2100 caps->eqe_buf_pg_sz = 0; 2101 2102 /* Link Table */ 2103 caps->llm_buf_pg_sz = 0; 2104 2105 /* MR */ 2106 caps->mpt_ba_pg_sz = 0; 2107 caps->mpt_buf_pg_sz = 0; 2108 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; 2109 caps->pbl_buf_pg_sz = 0; 2110 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, 2111 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, 2112 HEM_TYPE_MTPT); 2113 2114 /* QP */ 2115 caps->qpc_ba_pg_sz = 0; 2116 caps->qpc_buf_pg_sz = 0; 2117 caps->qpc_timer_ba_pg_sz = 0; 2118 caps->qpc_timer_buf_pg_sz = 0; 2119 caps->sccc_ba_pg_sz = 0; 2120 caps->sccc_buf_pg_sz = 0; 2121 caps->mtt_ba_pg_sz = 0; 2122 caps->mtt_buf_pg_sz = 0; 2123 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num, 2124 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, 2125 HEM_TYPE_QPC); 2126 2127 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) 2128 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num, 2129 caps->sccc_bt_num, &caps->sccc_buf_pg_sz, 2130 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC); 2131 2132 /* CQ */ 2133 caps->cqc_ba_pg_sz = 0; 2134 caps->cqc_buf_pg_sz = 0; 2135 caps->cqc_timer_ba_pg_sz = 0; 2136 caps->cqc_timer_buf_pg_sz = 0; 2137 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; 2138 caps->cqe_buf_pg_sz = 0; 2139 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, 2140 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, 2141 HEM_TYPE_CQC); 2142 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num, 2143 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE); 2144 2145 /* SRQ */ 2146 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) { 2147 caps->srqc_ba_pg_sz = 0; 2148 caps->srqc_buf_pg_sz = 0; 2149 caps->srqwqe_ba_pg_sz = 0; 2150 caps->srqwqe_buf_pg_sz = 0; 2151 caps->idx_ba_pg_sz = 0; 2152 caps->idx_buf_pg_sz = 0; 2153 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, 2154 caps->srqc_hop_num, caps->srqc_bt_num, 2155 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz, 2156 HEM_TYPE_SRQC); 2157 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, 2158 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, 2159 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE); 2160 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, 2161 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz, 2162 &caps->idx_ba_pg_sz, HEM_TYPE_IDX); 2163 } 2164 2165 /* GMV */ 2166 caps->gmv_ba_pg_sz = 0; 2167 caps->gmv_buf_pg_sz = 0; 2168 } 2169 2170 /* Apply all loaded caps before setting to hardware */ 2171 static void apply_func_caps(struct hns_roce_dev *hr_dev) 2172 { 2173 struct hns_roce_caps *caps = &hr_dev->caps; 2174 struct hns_roce_v2_priv *priv = hr_dev->priv; 2175 2176 /* The following configurations don't need to be got from firmware. */ 2177 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; 2178 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; 2179 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; 2180 2181 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; 2182 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; 2183 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; 2184 2185 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM; 2186 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM; 2187 2188 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS; 2189 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; 2190 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; 2191 2192 if (!caps->num_comp_vectors) 2193 caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1, 2194 (u32)priv->handle->rinfo.num_vectors - 2); 2195 2196 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 2197 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; 2198 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; 2199 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; 2200 2201 /* The following configurations will be overwritten */ 2202 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ; 2203 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; 2204 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ; 2205 2206 /* The following configurations are not got from firmware */ 2207 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ; 2208 2209 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0; 2210 caps->gid_table_len[0] = caps->gmv_bt_num * 2211 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz); 2212 2213 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / 2214 caps->gmv_entry_sz); 2215 } else { 2216 u32 func_num = max_t(u32, 1, hr_dev->func_num); 2217 2218 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; 2219 caps->ceqe_size = HNS_ROCE_CEQE_SIZE; 2220 caps->aeqe_size = HNS_ROCE_AEQE_SIZE; 2221 caps->gid_table_len[0] /= func_num; 2222 } 2223 2224 if (hr_dev->is_vf) { 2225 caps->default_aeq_arm_st = 0x3; 2226 caps->default_ceq_arm_st = 0x3; 2227 caps->default_ceq_max_cnt = 0x1; 2228 caps->default_ceq_period = 0x10; 2229 caps->default_aeq_max_cnt = 0x1; 2230 caps->default_aeq_period = 0x10; 2231 } 2232 2233 set_hem_page_size(hr_dev); 2234 } 2235 2236 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) 2237 { 2238 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM]; 2239 struct hns_roce_caps *caps = &hr_dev->caps; 2240 struct hns_roce_query_pf_caps_a *resp_a; 2241 struct hns_roce_query_pf_caps_b *resp_b; 2242 struct hns_roce_query_pf_caps_c *resp_c; 2243 struct hns_roce_query_pf_caps_d *resp_d; 2244 struct hns_roce_query_pf_caps_e *resp_e; 2245 int ctx_hop_num; 2246 int pbl_hop_num; 2247 int ret; 2248 int i; 2249 2250 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) { 2251 hns_roce_cmq_setup_basic_desc(&desc[i], 2252 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM, 2253 true); 2254 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) 2255 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2256 else 2257 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2258 } 2259 2260 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM); 2261 if (ret) 2262 return ret; 2263 2264 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data; 2265 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data; 2266 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; 2267 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; 2268 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; 2269 2270 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; 2271 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); 2272 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); 2273 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); 2274 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); 2275 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); 2276 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); 2277 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); 2278 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); 2279 caps->num_aeq_vectors = resp_a->num_aeq_vectors; 2280 caps->num_other_vectors = resp_a->num_other_vectors; 2281 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; 2282 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; 2283 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; 2284 caps->cqe_sz = resp_a->cqe_sz; 2285 2286 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; 2287 caps->irrl_entry_sz = resp_b->irrl_entry_sz; 2288 caps->trrl_entry_sz = resp_b->trrl_entry_sz; 2289 caps->cqc_entry_sz = resp_b->cqc_entry_sz; 2290 caps->srqc_entry_sz = resp_b->srqc_entry_sz; 2291 caps->idx_entry_sz = resp_b->idx_entry_sz; 2292 caps->sccc_sz = resp_b->sccc_sz; 2293 caps->max_mtu = resp_b->max_mtu; 2294 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz); 2295 caps->min_cqes = resp_b->min_cqes; 2296 caps->min_wqes = resp_b->min_wqes; 2297 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); 2298 caps->pkey_table_len[0] = resp_b->pkey_table_len; 2299 caps->phy_num_uars = resp_b->phy_num_uars; 2300 ctx_hop_num = resp_b->ctx_hop_num; 2301 pbl_hop_num = resp_b->pbl_hop_num; 2302 2303 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); 2304 2305 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); 2306 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << 2307 HNS_ROCE_CAP_FLAGS_EX_SHIFT; 2308 2309 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); 2310 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); 2311 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); 2312 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); 2313 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); 2314 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); 2315 caps->max_qp_dest_rdma = caps->max_qp_init_rdma; 2316 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); 2317 2318 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); 2319 caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); 2320 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); 2321 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); 2322 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); 2323 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); 2324 caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); 2325 caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); 2326 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); 2327 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); 2328 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); 2329 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); 2330 2331 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); 2332 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); 2333 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); 2334 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); 2335 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); 2336 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); 2337 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); 2338 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); 2339 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); 2340 2341 caps->qpc_hop_num = ctx_hop_num; 2342 caps->sccc_hop_num = ctx_hop_num; 2343 caps->srqc_hop_num = ctx_hop_num; 2344 caps->cqc_hop_num = ctx_hop_num; 2345 caps->mpt_hop_num = ctx_hop_num; 2346 caps->mtt_hop_num = pbl_hop_num; 2347 caps->cqe_hop_num = pbl_hop_num; 2348 caps->srqwqe_hop_num = pbl_hop_num; 2349 caps->idx_hop_num = pbl_hop_num; 2350 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); 2351 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); 2352 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); 2353 2354 return 0; 2355 } 2356 2357 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val) 2358 { 2359 struct hns_roce_cmq_desc desc; 2360 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 2361 2362 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE, 2363 false); 2364 2365 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type); 2366 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val); 2367 2368 return hns_roce_cmq_send(hr_dev, &desc, 1); 2369 } 2370 2371 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) 2372 { 2373 struct hns_roce_caps *caps = &hr_dev->caps; 2374 int ret; 2375 2376 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 2377 return 0; 2378 2379 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, 2380 caps->qpc_sz); 2381 if (ret) { 2382 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret); 2383 return ret; 2384 } 2385 2386 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE, 2387 caps->sccc_sz); 2388 if (ret) 2389 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret); 2390 2391 return ret; 2392 } 2393 2394 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev) 2395 { 2396 struct device *dev = hr_dev->dev; 2397 int ret; 2398 2399 hr_dev->func_num = 1; 2400 2401 set_default_caps(hr_dev); 2402 2403 ret = hns_roce_query_vf_resource(hr_dev); 2404 if (ret) { 2405 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret); 2406 return ret; 2407 } 2408 2409 apply_func_caps(hr_dev); 2410 2411 ret = hns_roce_v2_set_bt(hr_dev); 2412 if (ret) 2413 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret); 2414 2415 return ret; 2416 } 2417 2418 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev) 2419 { 2420 struct device *dev = hr_dev->dev; 2421 int ret; 2422 2423 ret = hns_roce_query_func_info(hr_dev); 2424 if (ret) { 2425 dev_err(dev, "failed to query func info, ret = %d.\n", ret); 2426 return ret; 2427 } 2428 2429 ret = hns_roce_config_global_param(hr_dev); 2430 if (ret) { 2431 dev_err(dev, "failed to config global param, ret = %d.\n", ret); 2432 return ret; 2433 } 2434 2435 ret = hns_roce_set_vf_switch_param(hr_dev); 2436 if (ret) { 2437 dev_err(dev, "failed to set switch param, ret = %d.\n", ret); 2438 return ret; 2439 } 2440 2441 ret = hns_roce_query_pf_caps(hr_dev); 2442 if (ret) 2443 set_default_caps(hr_dev); 2444 2445 ret = hns_roce_query_pf_resource(hr_dev); 2446 if (ret) { 2447 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret); 2448 return ret; 2449 } 2450 2451 apply_func_caps(hr_dev); 2452 2453 ret = hns_roce_alloc_vf_resource(hr_dev); 2454 if (ret) { 2455 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret); 2456 return ret; 2457 } 2458 2459 ret = hns_roce_v2_set_bt(hr_dev); 2460 if (ret) { 2461 dev_err(dev, "failed to config BA table, ret = %d.\n", ret); 2462 return ret; 2463 } 2464 2465 /* Configure the size of QPC, SCCC, etc. */ 2466 return hns_roce_config_entry_size(hr_dev); 2467 } 2468 2469 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) 2470 { 2471 struct device *dev = hr_dev->dev; 2472 int ret; 2473 2474 ret = hns_roce_cmq_query_hw_info(hr_dev); 2475 if (ret) { 2476 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret); 2477 return ret; 2478 } 2479 2480 ret = hns_roce_query_fw_ver(hr_dev); 2481 if (ret) { 2482 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret); 2483 return ret; 2484 } 2485 2486 hr_dev->vendor_part_id = hr_dev->pci_dev->device; 2487 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); 2488 2489 if (hr_dev->is_vf) 2490 return hns_roce_v2_vf_profile(hr_dev); 2491 else 2492 return hns_roce_v2_pf_profile(hr_dev); 2493 } 2494 2495 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf) 2496 { 2497 u32 i, next_ptr, page_num; 2498 __le64 *entry = cfg_buf; 2499 dma_addr_t addr; 2500 u64 val; 2501 2502 page_num = data_buf->npages; 2503 for (i = 0; i < page_num; i++) { 2504 addr = hns_roce_buf_page(data_buf, i); 2505 if (i == (page_num - 1)) 2506 next_ptr = 0; 2507 else 2508 next_ptr = i + 1; 2509 2510 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr); 2511 entry[i] = cpu_to_le64(val); 2512 } 2513 } 2514 2515 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev, 2516 struct hns_roce_link_table *table) 2517 { 2518 struct hns_roce_cmq_desc desc[2]; 2519 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data; 2520 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data; 2521 struct hns_roce_buf *buf = table->buf; 2522 enum hns_roce_opcode_type opcode; 2523 dma_addr_t addr; 2524 2525 opcode = HNS_ROCE_OPC_CFG_EXT_LLM; 2526 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false); 2527 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 2528 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); 2529 2530 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map)); 2531 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map)); 2532 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages); 2533 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift)); 2534 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN); 2535 2536 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0)); 2537 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr)); 2538 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr)); 2539 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1); 2540 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0); 2541 2542 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1)); 2543 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr)); 2544 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr)); 2545 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1); 2546 2547 return hns_roce_cmq_send(hr_dev, desc, 2); 2548 } 2549 2550 static struct hns_roce_link_table * 2551 alloc_link_table_buf(struct hns_roce_dev *hr_dev) 2552 { 2553 struct hns_roce_v2_priv *priv = hr_dev->priv; 2554 struct hns_roce_link_table *link_tbl; 2555 u32 pg_shift, size, min_size; 2556 2557 link_tbl = &priv->ext_llm; 2558 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT; 2559 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ; 2560 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift; 2561 2562 /* Alloc data table */ 2563 size = max(size, min_size); 2564 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0); 2565 if (IS_ERR(link_tbl->buf)) 2566 return ERR_PTR(-ENOMEM); 2567 2568 /* Alloc config table */ 2569 size = link_tbl->buf->npages * sizeof(u64); 2570 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size, 2571 &link_tbl->table.map, 2572 GFP_KERNEL); 2573 if (!link_tbl->table.buf) { 2574 hns_roce_buf_free(hr_dev, link_tbl->buf); 2575 return ERR_PTR(-ENOMEM); 2576 } 2577 2578 return link_tbl; 2579 } 2580 2581 static void free_link_table_buf(struct hns_roce_dev *hr_dev, 2582 struct hns_roce_link_table *tbl) 2583 { 2584 if (tbl->buf) { 2585 u32 size = tbl->buf->npages * sizeof(u64); 2586 2587 dma_free_coherent(hr_dev->dev, size, tbl->table.buf, 2588 tbl->table.map); 2589 } 2590 2591 hns_roce_buf_free(hr_dev, tbl->buf); 2592 } 2593 2594 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev) 2595 { 2596 struct hns_roce_link_table *link_tbl; 2597 int ret; 2598 2599 link_tbl = alloc_link_table_buf(hr_dev); 2600 if (IS_ERR(link_tbl)) 2601 return -ENOMEM; 2602 2603 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) { 2604 ret = -EINVAL; 2605 goto err_alloc; 2606 } 2607 2608 config_llm_table(link_tbl->buf, link_tbl->table.buf); 2609 ret = set_llm_cfg_to_hw(hr_dev, link_tbl); 2610 if (ret) 2611 goto err_alloc; 2612 2613 return 0; 2614 2615 err_alloc: 2616 free_link_table_buf(hr_dev, link_tbl); 2617 return ret; 2618 } 2619 2620 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev) 2621 { 2622 struct hns_roce_v2_priv *priv = hr_dev->priv; 2623 2624 free_link_table_buf(hr_dev, &priv->ext_llm); 2625 } 2626 2627 static void free_dip_list(struct hns_roce_dev *hr_dev) 2628 { 2629 struct hns_roce_dip *hr_dip; 2630 struct hns_roce_dip *tmp; 2631 unsigned long flags; 2632 2633 spin_lock_irqsave(&hr_dev->dip_list_lock, flags); 2634 2635 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) { 2636 list_del(&hr_dip->node); 2637 kfree(hr_dip); 2638 } 2639 2640 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); 2641 } 2642 2643 static void free_mr_exit(struct hns_roce_dev *hr_dev) 2644 { 2645 struct hns_roce_v2_priv *priv = hr_dev->priv; 2646 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2647 int ret; 2648 int i; 2649 2650 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2651 if (free_mr->rsv_qp[i]) { 2652 ret = ib_destroy_qp(free_mr->rsv_qp[i]); 2653 if (ret) 2654 ibdev_err(&hr_dev->ib_dev, 2655 "failed to destroy qp in free mr.\n"); 2656 2657 free_mr->rsv_qp[i] = NULL; 2658 } 2659 } 2660 2661 if (free_mr->rsv_cq) { 2662 ib_destroy_cq(free_mr->rsv_cq); 2663 free_mr->rsv_cq = NULL; 2664 } 2665 2666 if (free_mr->rsv_pd) { 2667 ib_dealloc_pd(free_mr->rsv_pd); 2668 free_mr->rsv_pd = NULL; 2669 } 2670 } 2671 2672 static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) 2673 { 2674 struct hns_roce_v2_priv *priv = hr_dev->priv; 2675 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2676 struct ib_device *ibdev = &hr_dev->ib_dev; 2677 struct ib_cq_init_attr cq_init_attr = {}; 2678 struct ib_qp_init_attr qp_init_attr = {}; 2679 struct ib_pd *pd; 2680 struct ib_cq *cq; 2681 struct ib_qp *qp; 2682 int ret; 2683 int i; 2684 2685 pd = ib_alloc_pd(ibdev, 0); 2686 if (IS_ERR(pd)) { 2687 ibdev_err(ibdev, "failed to create pd for free mr.\n"); 2688 return PTR_ERR(pd); 2689 } 2690 free_mr->rsv_pd = pd; 2691 2692 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM; 2693 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr); 2694 if (IS_ERR(cq)) { 2695 ibdev_err(ibdev, "failed to create cq for free mr.\n"); 2696 ret = PTR_ERR(cq); 2697 goto create_failed; 2698 } 2699 free_mr->rsv_cq = cq; 2700 2701 qp_init_attr.qp_type = IB_QPT_RC; 2702 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2703 qp_init_attr.send_cq = free_mr->rsv_cq; 2704 qp_init_attr.recv_cq = free_mr->rsv_cq; 2705 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2706 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM; 2707 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM; 2708 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM; 2709 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM; 2710 2711 qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr); 2712 if (IS_ERR(qp)) { 2713 ibdev_err(ibdev, "failed to create qp for free mr.\n"); 2714 ret = PTR_ERR(qp); 2715 goto create_failed; 2716 } 2717 2718 free_mr->rsv_qp[i] = qp; 2719 } 2720 2721 return 0; 2722 2723 create_failed: 2724 free_mr_exit(hr_dev); 2725 2726 return ret; 2727 } 2728 2729 static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, 2730 struct ib_qp_attr *attr, int sl_num) 2731 { 2732 struct hns_roce_v2_priv *priv = hr_dev->priv; 2733 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2734 struct ib_device *ibdev = &hr_dev->ib_dev; 2735 struct hns_roce_qp *hr_qp; 2736 int loopback; 2737 int mask; 2738 int ret; 2739 2740 hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]); 2741 hr_qp->free_mr_en = 1; 2742 2743 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS; 2744 attr->qp_state = IB_QPS_INIT; 2745 attr->port_num = 1; 2746 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; 2747 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); 2748 if (ret) { 2749 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", 2750 ret); 2751 return ret; 2752 } 2753 2754 loopback = hr_dev->loop_idc; 2755 /* Set qpc lbi = 1 incidate loopback IO */ 2756 hr_dev->loop_idc = 1; 2757 2758 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | 2759 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; 2760 attr->qp_state = IB_QPS_RTR; 2761 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 2762 attr->path_mtu = IB_MTU_256; 2763 attr->dest_qp_num = hr_qp->qpn; 2764 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN; 2765 2766 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); 2767 2768 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); 2769 hr_dev->loop_idc = loopback; 2770 if (ret) { 2771 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n", 2772 ret); 2773 return ret; 2774 } 2775 2776 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT | 2777 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; 2778 attr->qp_state = IB_QPS_RTS; 2779 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; 2780 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; 2781 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; 2782 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask); 2783 if (ret) 2784 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n", 2785 ret); 2786 2787 return ret; 2788 } 2789 2790 static int free_mr_modify_qp(struct hns_roce_dev *hr_dev) 2791 { 2792 struct hns_roce_v2_priv *priv = hr_dev->priv; 2793 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 2794 struct ib_qp_attr attr = {}; 2795 int ret; 2796 int i; 2797 2798 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 2799 rdma_ah_set_static_rate(&attr.ah_attr, 3); 2800 rdma_ah_set_port_num(&attr.ah_attr, 1); 2801 2802 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 2803 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i); 2804 if (ret) 2805 return ret; 2806 } 2807 2808 return 0; 2809 } 2810 2811 static int free_mr_init(struct hns_roce_dev *hr_dev) 2812 { 2813 int ret; 2814 2815 ret = free_mr_alloc_res(hr_dev); 2816 if (ret) 2817 return ret; 2818 2819 ret = free_mr_modify_qp(hr_dev); 2820 if (ret) 2821 goto err_modify_qp; 2822 2823 return 0; 2824 2825 err_modify_qp: 2826 free_mr_exit(hr_dev); 2827 2828 return ret; 2829 } 2830 2831 static int get_hem_table(struct hns_roce_dev *hr_dev) 2832 { 2833 unsigned int qpc_count; 2834 unsigned int cqc_count; 2835 unsigned int gmv_count; 2836 int ret; 2837 int i; 2838 2839 /* Alloc memory for source address table buffer space chunk */ 2840 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num; 2841 gmv_count++) { 2842 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count); 2843 if (ret) 2844 goto err_gmv_failed; 2845 } 2846 2847 if (hr_dev->is_vf) 2848 return 0; 2849 2850 /* Alloc memory for QPC Timer buffer space chunk */ 2851 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; 2852 qpc_count++) { 2853 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, 2854 qpc_count); 2855 if (ret) { 2856 dev_err(hr_dev->dev, "QPC Timer get failed\n"); 2857 goto err_qpc_timer_failed; 2858 } 2859 } 2860 2861 /* Alloc memory for CQC Timer buffer space chunk */ 2862 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; 2863 cqc_count++) { 2864 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, 2865 cqc_count); 2866 if (ret) { 2867 dev_err(hr_dev->dev, "CQC Timer get failed\n"); 2868 goto err_cqc_timer_failed; 2869 } 2870 } 2871 2872 return 0; 2873 2874 err_cqc_timer_failed: 2875 for (i = 0; i < cqc_count; i++) 2876 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); 2877 2878 err_qpc_timer_failed: 2879 for (i = 0; i < qpc_count; i++) 2880 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); 2881 2882 err_gmv_failed: 2883 for (i = 0; i < gmv_count; i++) 2884 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); 2885 2886 return ret; 2887 } 2888 2889 static void put_hem_table(struct hns_roce_dev *hr_dev) 2890 { 2891 int i; 2892 2893 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++) 2894 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); 2895 2896 if (hr_dev->is_vf) 2897 return; 2898 2899 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++) 2900 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); 2901 2902 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++) 2903 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); 2904 } 2905 2906 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) 2907 { 2908 int ret; 2909 2910 /* The hns ROCEE requires the extdb info to be cleared before using */ 2911 ret = hns_roce_clear_extdb_list_info(hr_dev); 2912 if (ret) 2913 return ret; 2914 2915 ret = get_hem_table(hr_dev); 2916 if (ret) 2917 return ret; 2918 2919 if (hr_dev->is_vf) 2920 return 0; 2921 2922 ret = hns_roce_init_link_table(hr_dev); 2923 if (ret) { 2924 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret); 2925 goto err_llm_init_failed; 2926 } 2927 2928 return 0; 2929 2930 err_llm_init_failed: 2931 put_hem_table(hr_dev); 2932 2933 return ret; 2934 } 2935 2936 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) 2937 { 2938 hns_roce_function_clear(hr_dev); 2939 2940 if (!hr_dev->is_vf) 2941 hns_roce_free_link_table(hr_dev); 2942 2943 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) 2944 free_dip_list(hr_dev); 2945 } 2946 2947 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, 2948 struct hns_roce_mbox_msg *mbox_msg) 2949 { 2950 struct hns_roce_cmq_desc desc; 2951 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; 2952 2953 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); 2954 2955 mb->in_param_l = cpu_to_le32(mbox_msg->in_param); 2956 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); 2957 mb->out_param_l = cpu_to_le32(mbox_msg->out_param); 2958 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); 2959 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); 2960 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | 2961 mbox_msg->token); 2962 2963 return hns_roce_cmq_send(hr_dev, &desc, 1); 2964 } 2965 2966 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, 2967 u8 *complete_status) 2968 { 2969 struct hns_roce_mbox_status *mb_st; 2970 struct hns_roce_cmq_desc desc; 2971 unsigned long end; 2972 int ret = -EBUSY; 2973 u32 status; 2974 bool busy; 2975 2976 mb_st = (struct hns_roce_mbox_status *)desc.data; 2977 end = msecs_to_jiffies(timeout) + jiffies; 2978 while (v2_chk_mbox_is_avail(hr_dev, &busy)) { 2979 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) 2980 return -EIO; 2981 2982 status = 0; 2983 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, 2984 true); 2985 ret = __hns_roce_cmq_send(hr_dev, &desc, 1); 2986 if (!ret) { 2987 status = le32_to_cpu(mb_st->mb_status_hw_run); 2988 /* No pending message exists in ROCEE mbox. */ 2989 if (!(status & MB_ST_HW_RUN_M)) 2990 break; 2991 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) { 2992 break; 2993 } 2994 2995 if (time_after(jiffies, end)) { 2996 dev_err_ratelimited(hr_dev->dev, 2997 "failed to wait mbox status 0x%x\n", 2998 status); 2999 return -ETIMEDOUT; 3000 } 3001 3002 cond_resched(); 3003 ret = -EBUSY; 3004 } 3005 3006 if (!ret) { 3007 *complete_status = (u8)(status & MB_ST_COMPLETE_M); 3008 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) { 3009 /* Ignore all errors if the mbox is unavailable. */ 3010 ret = 0; 3011 *complete_status = MB_ST_COMPLETE_M; 3012 } 3013 3014 return ret; 3015 } 3016 3017 static int v2_post_mbox(struct hns_roce_dev *hr_dev, 3018 struct hns_roce_mbox_msg *mbox_msg) 3019 { 3020 u8 status = 0; 3021 int ret; 3022 3023 /* Waiting for the mbox to be idle */ 3024 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS, 3025 &status); 3026 if (unlikely(ret)) { 3027 dev_err_ratelimited(hr_dev->dev, 3028 "failed to check post mbox status = 0x%x, ret = %d.\n", 3029 status, ret); 3030 return ret; 3031 } 3032 3033 /* Post new message to mbox */ 3034 ret = hns_roce_mbox_post(hr_dev, mbox_msg); 3035 if (ret) 3036 dev_err_ratelimited(hr_dev->dev, 3037 "failed to post mailbox, ret = %d.\n", ret); 3038 3039 return ret; 3040 } 3041 3042 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev) 3043 { 3044 u8 status = 0; 3045 int ret; 3046 3047 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS, 3048 &status); 3049 if (!ret) { 3050 if (status != MB_ST_COMPLETE_SUCC) 3051 return -EBUSY; 3052 } else { 3053 dev_err_ratelimited(hr_dev->dev, 3054 "failed to check mbox status = 0x%x, ret = %d.\n", 3055 status, ret); 3056 } 3057 3058 return ret; 3059 } 3060 3061 static void copy_gid(void *dest, const union ib_gid *gid) 3062 { 3063 #define GID_SIZE 4 3064 const union ib_gid *src = gid; 3065 __le32 (*p)[GID_SIZE] = dest; 3066 int i; 3067 3068 if (!gid) 3069 src = &zgid; 3070 3071 for (i = 0; i < GID_SIZE; i++) 3072 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]); 3073 } 3074 3075 static int config_sgid_table(struct hns_roce_dev *hr_dev, 3076 int gid_index, const union ib_gid *gid, 3077 enum hns_roce_sgid_type sgid_type) 3078 { 3079 struct hns_roce_cmq_desc desc; 3080 struct hns_roce_cfg_sgid_tb *sgid_tb = 3081 (struct hns_roce_cfg_sgid_tb *)desc.data; 3082 3083 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); 3084 3085 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index); 3086 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type); 3087 3088 copy_gid(&sgid_tb->vf_sgid_l, gid); 3089 3090 return hns_roce_cmq_send(hr_dev, &desc, 1); 3091 } 3092 3093 static int config_gmv_table(struct hns_roce_dev *hr_dev, 3094 int gid_index, const union ib_gid *gid, 3095 enum hns_roce_sgid_type sgid_type, 3096 const struct ib_gid_attr *attr) 3097 { 3098 struct hns_roce_cmq_desc desc[2]; 3099 struct hns_roce_cfg_gmv_tb_a *tb_a = 3100 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data; 3101 struct hns_roce_cfg_gmv_tb_b *tb_b = 3102 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data; 3103 3104 u16 vlan_id = VLAN_CFI_MASK; 3105 u8 mac[ETH_ALEN] = {}; 3106 int ret; 3107 3108 if (gid) { 3109 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac); 3110 if (ret) 3111 return ret; 3112 } 3113 3114 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false); 3115 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); 3116 3117 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false); 3118 3119 copy_gid(&tb_a->vf_sgid_l, gid); 3120 3121 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type); 3122 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK); 3123 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id); 3124 3125 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); 3126 3127 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]); 3128 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index); 3129 3130 return hns_roce_cmq_send(hr_dev, desc, 2); 3131 } 3132 3133 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, 3134 const union ib_gid *gid, 3135 const struct ib_gid_attr *attr) 3136 { 3137 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; 3138 int ret; 3139 3140 if (gid) { 3141 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 3142 if (ipv6_addr_v4mapped((void *)gid)) 3143 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4; 3144 else 3145 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6; 3146 } else if (attr->gid_type == IB_GID_TYPE_ROCE) { 3147 sgid_type = GID_TYPE_FLAG_ROCE_V1; 3148 } 3149 } 3150 3151 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 3152 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr); 3153 else 3154 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type); 3155 3156 if (ret) 3157 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n", 3158 ret); 3159 3160 return ret; 3161 } 3162 3163 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, 3164 const u8 *addr) 3165 { 3166 struct hns_roce_cmq_desc desc; 3167 struct hns_roce_cfg_smac_tb *smac_tb = 3168 (struct hns_roce_cfg_smac_tb *)desc.data; 3169 u16 reg_smac_h; 3170 u32 reg_smac_l; 3171 3172 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false); 3173 3174 reg_smac_l = *(u32 *)(&addr[0]); 3175 reg_smac_h = *(u16 *)(&addr[4]); 3176 3177 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port); 3178 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h); 3179 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); 3180 3181 return hns_roce_cmq_send(hr_dev, &desc, 1); 3182 } 3183 3184 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, 3185 struct hns_roce_v2_mpt_entry *mpt_entry, 3186 struct hns_roce_mr *mr) 3187 { 3188 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; 3189 struct ib_device *ibdev = &hr_dev->ib_dev; 3190 dma_addr_t pbl_ba; 3191 int i, count; 3192 3193 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, 3194 ARRAY_SIZE(pages), &pbl_ba); 3195 if (count < 1) { 3196 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", 3197 count); 3198 return -ENOBUFS; 3199 } 3200 3201 /* Aligned to the hardware address access unit */ 3202 for (i = 0; i < count; i++) 3203 pages[i] >>= 6; 3204 3205 mpt_entry->pbl_size = cpu_to_le32(mr->npages); 3206 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); 3207 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); 3208 3209 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); 3210 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); 3211 3212 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); 3213 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); 3214 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3215 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 3216 3217 return 0; 3218 } 3219 3220 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, 3221 void *mb_buf, struct hns_roce_mr *mr) 3222 { 3223 struct hns_roce_v2_mpt_entry *mpt_entry; 3224 3225 mpt_entry = mb_buf; 3226 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3227 3228 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); 3229 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3230 hr_reg_enable(mpt_entry, MPT_L_INV_EN); 3231 3232 hr_reg_write_bool(mpt_entry, MPT_BIND_EN, 3233 mr->access & IB_ACCESS_MW_BIND); 3234 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN, 3235 mr->access & IB_ACCESS_REMOTE_ATOMIC); 3236 hr_reg_write_bool(mpt_entry, MPT_RR_EN, 3237 mr->access & IB_ACCESS_REMOTE_READ); 3238 hr_reg_write_bool(mpt_entry, MPT_RW_EN, 3239 mr->access & IB_ACCESS_REMOTE_WRITE); 3240 hr_reg_write_bool(mpt_entry, MPT_LW_EN, 3241 mr->access & IB_ACCESS_LOCAL_WRITE); 3242 3243 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 3244 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 3245 mpt_entry->lkey = cpu_to_le32(mr->key); 3246 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); 3247 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); 3248 3249 if (mr->type != MR_TYPE_MR) 3250 hr_reg_enable(mpt_entry, MPT_PA); 3251 3252 if (mr->type == MR_TYPE_DMA) 3253 return 0; 3254 3255 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0) 3256 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num); 3257 3258 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3259 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); 3260 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); 3261 3262 return set_mtpt_pbl(hr_dev, mpt_entry, mr); 3263 } 3264 3265 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, 3266 struct hns_roce_mr *mr, int flags, 3267 void *mb_buf) 3268 { 3269 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; 3270 u32 mr_access_flags = mr->access; 3271 int ret = 0; 3272 3273 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); 3274 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3275 3276 if (flags & IB_MR_REREG_ACCESS) { 3277 hr_reg_write(mpt_entry, MPT_BIND_EN, 3278 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); 3279 hr_reg_write(mpt_entry, MPT_ATOMIC_EN, 3280 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); 3281 hr_reg_write(mpt_entry, MPT_RR_EN, 3282 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); 3283 hr_reg_write(mpt_entry, MPT_RW_EN, 3284 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); 3285 hr_reg_write(mpt_entry, MPT_LW_EN, 3286 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); 3287 } 3288 3289 if (flags & IB_MR_REREG_TRANS) { 3290 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); 3291 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); 3292 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 3293 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); 3294 3295 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); 3296 } 3297 3298 return ret; 3299 } 3300 3301 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, 3302 void *mb_buf, struct hns_roce_mr *mr) 3303 { 3304 struct ib_device *ibdev = &hr_dev->ib_dev; 3305 struct hns_roce_v2_mpt_entry *mpt_entry; 3306 dma_addr_t pbl_ba = 0; 3307 3308 mpt_entry = mb_buf; 3309 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3310 3311 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) { 3312 ibdev_err(ibdev, "failed to find frmr mtr.\n"); 3313 return -ENOBUFS; 3314 } 3315 3316 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); 3317 hr_reg_write(mpt_entry, MPT_PD, mr->pd); 3318 3319 hr_reg_enable(mpt_entry, MPT_RA_EN); 3320 hr_reg_enable(mpt_entry, MPT_R_INV_EN); 3321 hr_reg_enable(mpt_entry, MPT_L_INV_EN); 3322 3323 hr_reg_enable(mpt_entry, MPT_FRE); 3324 hr_reg_clear(mpt_entry, MPT_MR_MW); 3325 hr_reg_enable(mpt_entry, MPT_BPD); 3326 hr_reg_clear(mpt_entry, MPT_PA); 3327 3328 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); 3329 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3330 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); 3331 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3332 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); 3333 3334 mpt_entry->pbl_size = cpu_to_le32(mr->npages); 3335 3336 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); 3337 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); 3338 3339 return 0; 3340 } 3341 3342 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) 3343 { 3344 struct hns_roce_v2_mpt_entry *mpt_entry; 3345 3346 mpt_entry = mb_buf; 3347 memset(mpt_entry, 0, sizeof(*mpt_entry)); 3348 3349 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); 3350 hr_reg_write(mpt_entry, MPT_PD, mw->pdn); 3351 3352 hr_reg_enable(mpt_entry, MPT_R_INV_EN); 3353 hr_reg_enable(mpt_entry, MPT_L_INV_EN); 3354 hr_reg_enable(mpt_entry, MPT_LW_EN); 3355 3356 hr_reg_enable(mpt_entry, MPT_MR_MW); 3357 hr_reg_enable(mpt_entry, MPT_BPD); 3358 hr_reg_clear(mpt_entry, MPT_PA); 3359 hr_reg_write(mpt_entry, MPT_BQP, 3360 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); 3361 3362 mpt_entry->lkey = cpu_to_le32(mw->rkey); 3363 3364 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 3365 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : 3366 mw->pbl_hop_num); 3367 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, 3368 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); 3369 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, 3370 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); 3371 3372 return 0; 3373 } 3374 3375 static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) 3376 { 3377 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); 3378 struct ib_device *ibdev = &hr_dev->ib_dev; 3379 const struct ib_send_wr *bad_wr; 3380 struct ib_rdma_wr rdma_wr = {}; 3381 struct ib_send_wr *send_wr; 3382 int ret; 3383 3384 send_wr = &rdma_wr.wr; 3385 send_wr->opcode = IB_WR_RDMA_WRITE; 3386 3387 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); 3388 if (ret) { 3389 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n", 3390 ret); 3391 return ret; 3392 } 3393 3394 return 0; 3395 } 3396 3397 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, 3398 struct ib_wc *wc); 3399 3400 static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) 3401 { 3402 struct hns_roce_v2_priv *priv = hr_dev->priv; 3403 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; 3404 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)]; 3405 struct ib_device *ibdev = &hr_dev->ib_dev; 3406 struct hns_roce_qp *hr_qp; 3407 unsigned long end; 3408 int cqe_cnt = 0; 3409 int npolled; 3410 int ret; 3411 int i; 3412 3413 /* 3414 * If the device initialization is not complete or in the uninstall 3415 * process, then there is no need to execute free mr. 3416 */ 3417 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || 3418 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT || 3419 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) 3420 return; 3421 3422 mutex_lock(&free_mr->mutex); 3423 3424 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { 3425 hr_qp = to_hr_qp(free_mr->rsv_qp[i]); 3426 3427 ret = free_mr_post_send_lp_wqe(hr_qp); 3428 if (ret) { 3429 ibdev_err(ibdev, 3430 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", 3431 hr_qp->qpn, ret); 3432 break; 3433 } 3434 3435 cqe_cnt++; 3436 } 3437 3438 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies; 3439 while (cqe_cnt) { 3440 npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc); 3441 if (npolled < 0) { 3442 ibdev_err(ibdev, 3443 "failed to poll cqe for free mr, remain %d cqe.\n", 3444 cqe_cnt); 3445 goto out; 3446 } 3447 3448 if (time_after(jiffies, end)) { 3449 ibdev_err(ibdev, 3450 "failed to poll cqe for free mr and timeout, remain %d cqe.\n", 3451 cqe_cnt); 3452 goto out; 3453 } 3454 cqe_cnt -= npolled; 3455 } 3456 3457 out: 3458 mutex_unlock(&free_mr->mutex); 3459 } 3460 3461 static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev) 3462 { 3463 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 3464 free_mr_send_cmd_to_hw(hr_dev); 3465 } 3466 3467 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) 3468 { 3469 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); 3470 } 3471 3472 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) 3473 { 3474 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); 3475 3476 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ 3477 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : 3478 NULL; 3479 } 3480 3481 static inline void update_cq_db(struct hns_roce_dev *hr_dev, 3482 struct hns_roce_cq *hr_cq) 3483 { 3484 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) { 3485 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M; 3486 } else { 3487 struct hns_roce_v2_db cq_db = {}; 3488 3489 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); 3490 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB); 3491 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); 3492 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1); 3493 3494 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); 3495 } 3496 } 3497 3498 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 3499 struct hns_roce_srq *srq) 3500 { 3501 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3502 struct hns_roce_v2_cqe *cqe, *dest; 3503 u32 prod_index; 3504 int nfreed = 0; 3505 int wqe_index; 3506 u8 owner_bit; 3507 3508 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); 3509 ++prod_index) { 3510 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) 3511 break; 3512 } 3513 3514 /* 3515 * Now backwards through the CQ, removing CQ entries 3516 * that match our QP by overwriting them with next entries. 3517 */ 3518 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { 3519 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe); 3520 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) { 3521 if (srq && hr_reg_read(cqe, CQE_S_R)) { 3522 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX); 3523 hns_roce_free_srq_wqe(srq, wqe_index); 3524 } 3525 ++nfreed; 3526 } else if (nfreed) { 3527 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & 3528 hr_cq->ib_cq.cqe); 3529 owner_bit = hr_reg_read(dest, CQE_OWNER); 3530 memcpy(dest, cqe, hr_cq->cqe_size); 3531 hr_reg_write(dest, CQE_OWNER, owner_bit); 3532 } 3533 } 3534 3535 if (nfreed) { 3536 hr_cq->cons_index += nfreed; 3537 update_cq_db(hr_dev, hr_cq); 3538 } 3539 } 3540 3541 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, 3542 struct hns_roce_srq *srq) 3543 { 3544 spin_lock_irq(&hr_cq->lock); 3545 __hns_roce_v2_cq_clean(hr_cq, qpn, srq); 3546 spin_unlock_irq(&hr_cq->lock); 3547 } 3548 3549 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, 3550 struct hns_roce_cq *hr_cq, void *mb_buf, 3551 u64 *mtts, dma_addr_t dma_handle) 3552 { 3553 struct hns_roce_v2_cq_context *cq_context; 3554 3555 cq_context = mb_buf; 3556 memset(cq_context, 0, sizeof(*cq_context)); 3557 3558 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID); 3559 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED); 3560 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); 3561 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector); 3562 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn); 3563 3564 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE) 3565 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B); 3566 3567 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) 3568 hr_reg_enable(cq_context, CQC_STASH); 3569 3570 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L, 3571 to_hr_hw_page_addr(mtts[0])); 3572 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H, 3573 upper_32_bits(to_hr_hw_page_addr(mtts[0]))); 3574 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num == 3575 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); 3576 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L, 3577 to_hr_hw_page_addr(mtts[1])); 3578 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H, 3579 upper_32_bits(to_hr_hw_page_addr(mtts[1]))); 3580 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ, 3581 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); 3582 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ, 3583 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); 3584 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3); 3585 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3))); 3586 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN, 3587 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB); 3588 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L, 3589 ((u32)hr_cq->db.dma) >> 1); 3590 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H, 3591 hr_cq->db.dma >> 32); 3592 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, 3593 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM); 3594 hr_reg_write(cq_context, CQC_CQ_PERIOD, 3595 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL); 3596 } 3597 3598 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, 3599 enum ib_cq_notify_flags flags) 3600 { 3601 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); 3602 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 3603 struct hns_roce_v2_db cq_db = {}; 3604 u32 notify_flag; 3605 3606 /* 3607 * flags = 0, then notify_flag : next 3608 * flags = 1, then notify flag : solocited 3609 */ 3610 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 3611 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL; 3612 3613 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); 3614 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY); 3615 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); 3616 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn); 3617 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag); 3618 3619 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); 3620 3621 return 0; 3622 } 3623 3624 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe, 3625 struct hns_roce_qp *qp, 3626 struct ib_wc *wc) 3627 { 3628 struct hns_roce_rinl_sge *sge_list; 3629 u32 wr_num, wr_cnt, sge_num; 3630 u32 sge_cnt, data_len, size; 3631 void *wqe_buf; 3632 3633 wr_num = hr_reg_read(cqe, CQE_WQE_IDX); 3634 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1); 3635 3636 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list; 3637 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt; 3638 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt); 3639 data_len = wc->byte_len; 3640 3641 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) { 3642 size = min(sge_list[sge_cnt].len, data_len); 3643 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size); 3644 3645 data_len -= size; 3646 wqe_buf += size; 3647 } 3648 3649 if (unlikely(data_len)) { 3650 wc->status = IB_WC_LOC_LEN_ERR; 3651 return -EAGAIN; 3652 } 3653 3654 return 0; 3655 } 3656 3657 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq, 3658 int num_entries, struct ib_wc *wc) 3659 { 3660 unsigned int left; 3661 int npolled = 0; 3662 3663 left = wq->head - wq->tail; 3664 if (left == 0) 3665 return 0; 3666 3667 left = min_t(unsigned int, (unsigned int)num_entries, left); 3668 while (npolled < left) { 3669 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3670 wc->status = IB_WC_WR_FLUSH_ERR; 3671 wc->vendor_err = 0; 3672 wc->qp = &hr_qp->ibqp; 3673 3674 wq->tail++; 3675 wc++; 3676 npolled++; 3677 } 3678 3679 return npolled; 3680 } 3681 3682 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, 3683 struct ib_wc *wc) 3684 { 3685 struct hns_roce_qp *hr_qp; 3686 int npolled = 0; 3687 3688 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) { 3689 npolled += sw_comp(hr_qp, &hr_qp->sq, 3690 num_entries - npolled, wc + npolled); 3691 if (npolled >= num_entries) 3692 goto out; 3693 } 3694 3695 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) { 3696 npolled += sw_comp(hr_qp, &hr_qp->rq, 3697 num_entries - npolled, wc + npolled); 3698 if (npolled >= num_entries) 3699 goto out; 3700 } 3701 3702 out: 3703 return npolled; 3704 } 3705 3706 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, 3707 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe, 3708 struct ib_wc *wc) 3709 { 3710 static const struct { 3711 u32 cqe_status; 3712 enum ib_wc_status wc_status; 3713 } map[] = { 3714 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS }, 3715 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR }, 3716 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, 3717 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR }, 3718 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, 3719 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR }, 3720 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, 3721 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, 3722 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, 3723 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, 3724 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR }, 3725 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR, 3726 IB_WC_RETRY_EXC_ERR }, 3727 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR }, 3728 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR }, 3729 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR} 3730 }; 3731 3732 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS); 3733 int i; 3734 3735 wc->status = IB_WC_GENERAL_ERR; 3736 for (i = 0; i < ARRAY_SIZE(map); i++) 3737 if (cqe_status == map[i].cqe_status) { 3738 wc->status = map[i].wc_status; 3739 break; 3740 } 3741 3742 if (likely(wc->status == IB_WC_SUCCESS || 3743 wc->status == IB_WC_WR_FLUSH_ERR)) 3744 return; 3745 3746 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); 3747 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, 3748 cq->cqe_size, false); 3749 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); 3750 3751 /* 3752 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in 3753 * the standard protocol, the driver must ignore it and needn't to set 3754 * the QP to an error state. 3755 */ 3756 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR) 3757 return; 3758 3759 flush_cqe(hr_dev, qp); 3760 } 3761 3762 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe, 3763 struct hns_roce_qp **cur_qp) 3764 { 3765 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3766 struct hns_roce_qp *hr_qp = *cur_qp; 3767 u32 qpn; 3768 3769 qpn = hr_reg_read(cqe, CQE_LCL_QPN); 3770 3771 if (!hr_qp || qpn != hr_qp->qpn) { 3772 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); 3773 if (unlikely(!hr_qp)) { 3774 ibdev_err(&hr_dev->ib_dev, 3775 "CQ %06lx with entry for unknown QPN %06x\n", 3776 hr_cq->cqn, qpn); 3777 return -EINVAL; 3778 } 3779 *cur_qp = hr_qp; 3780 } 3781 3782 return 0; 3783 } 3784 3785 /* 3786 * mapped-value = 1 + real-value 3787 * The ib wc opcode's real value is start from 0, In order to distinguish 3788 * between initialized and uninitialized map values, we plus 1 to the actual 3789 * value when defining the mapping, so that the validity can be identified by 3790 * checking whether the mapped value is greater than 0. 3791 */ 3792 #define HR_WC_OP_MAP(hr_key, ib_key) \ 3793 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key 3794 3795 static const u32 wc_send_op_map[] = { 3796 HR_WC_OP_MAP(SEND, SEND), 3797 HR_WC_OP_MAP(SEND_WITH_INV, SEND), 3798 HR_WC_OP_MAP(SEND_WITH_IMM, SEND), 3799 HR_WC_OP_MAP(RDMA_READ, RDMA_READ), 3800 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE), 3801 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE), 3802 HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV), 3803 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP), 3804 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD), 3805 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP), 3806 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD), 3807 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR), 3808 HR_WC_OP_MAP(BIND_MW, REG_MR), 3809 }; 3810 3811 static int to_ib_wc_send_op(u32 hr_opcode) 3812 { 3813 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map)) 3814 return -EINVAL; 3815 3816 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 : 3817 -EINVAL; 3818 } 3819 3820 static const u32 wc_recv_op_map[] = { 3821 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM), 3822 HR_WC_OP_MAP(SEND, RECV), 3823 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM), 3824 HR_WC_OP_MAP(SEND_WITH_INV, RECV), 3825 }; 3826 3827 static int to_ib_wc_recv_op(u32 hr_opcode) 3828 { 3829 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map)) 3830 return -EINVAL; 3831 3832 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 : 3833 -EINVAL; 3834 } 3835 3836 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) 3837 { 3838 u32 hr_opcode; 3839 int ib_opcode; 3840 3841 wc->wc_flags = 0; 3842 3843 hr_opcode = hr_reg_read(cqe, CQE_OPCODE); 3844 switch (hr_opcode) { 3845 case HNS_ROCE_V2_WQE_OP_RDMA_READ: 3846 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 3847 break; 3848 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM: 3849 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM: 3850 wc->wc_flags |= IB_WC_WITH_IMM; 3851 break; 3852 case HNS_ROCE_V2_WQE_OP_LOCAL_INV: 3853 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 3854 break; 3855 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP: 3856 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD: 3857 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP: 3858 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD: 3859 wc->byte_len = 8; 3860 break; 3861 default: 3862 break; 3863 } 3864 3865 ib_opcode = to_ib_wc_send_op(hr_opcode); 3866 if (ib_opcode < 0) 3867 wc->status = IB_WC_GENERAL_ERR; 3868 else 3869 wc->opcode = ib_opcode; 3870 } 3871 3872 static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode, 3873 struct hns_roce_v2_cqe *cqe) 3874 { 3875 return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI && 3876 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND || 3877 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM || 3878 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) && 3879 hr_reg_read(cqe, CQE_RQ_INLINE); 3880 } 3881 3882 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) 3883 { 3884 struct hns_roce_qp *qp = to_hr_qp(wc->qp); 3885 u32 hr_opcode; 3886 int ib_opcode; 3887 int ret; 3888 3889 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 3890 3891 hr_opcode = hr_reg_read(cqe, CQE_OPCODE); 3892 switch (hr_opcode) { 3893 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM: 3894 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM: 3895 wc->wc_flags = IB_WC_WITH_IMM; 3896 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata)); 3897 break; 3898 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV: 3899 wc->wc_flags = IB_WC_WITH_INVALIDATE; 3900 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey); 3901 break; 3902 default: 3903 wc->wc_flags = 0; 3904 } 3905 3906 ib_opcode = to_ib_wc_recv_op(hr_opcode); 3907 if (ib_opcode < 0) 3908 wc->status = IB_WC_GENERAL_ERR; 3909 else 3910 wc->opcode = ib_opcode; 3911 3912 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) { 3913 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc); 3914 if (unlikely(ret)) 3915 return ret; 3916 } 3917 3918 wc->sl = hr_reg_read(cqe, CQE_SL); 3919 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN); 3920 wc->slid = 0; 3921 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0; 3922 wc->port_num = hr_reg_read(cqe, CQE_PORTN); 3923 wc->pkey_index = 0; 3924 3925 if (hr_reg_read(cqe, CQE_VID_VLD)) { 3926 wc->vlan_id = hr_reg_read(cqe, CQE_VID); 3927 wc->wc_flags |= IB_WC_WITH_VLAN; 3928 } else { 3929 wc->vlan_id = 0xffff; 3930 } 3931 3932 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE); 3933 3934 return 0; 3935 } 3936 3937 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, 3938 struct hns_roce_qp **cur_qp, struct ib_wc *wc) 3939 { 3940 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 3941 struct hns_roce_qp *qp = *cur_qp; 3942 struct hns_roce_srq *srq = NULL; 3943 struct hns_roce_v2_cqe *cqe; 3944 struct hns_roce_wq *wq; 3945 int is_send; 3946 u16 wqe_idx; 3947 int ret; 3948 3949 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index); 3950 if (!cqe) 3951 return -EAGAIN; 3952 3953 ++hr_cq->cons_index; 3954 /* Memory barrier */ 3955 rmb(); 3956 3957 ret = get_cur_qp(hr_cq, cqe, &qp); 3958 if (ret) 3959 return ret; 3960 3961 wc->qp = &qp->ibqp; 3962 wc->vendor_err = 0; 3963 3964 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX); 3965 3966 is_send = !hr_reg_read(cqe, CQE_S_R); 3967 if (is_send) { 3968 wq = &qp->sq; 3969 3970 /* If sg_signal_bit is set, tail pointer will be updated to 3971 * the WQE corresponding to the current CQE. 3972 */ 3973 if (qp->sq_signal_bits) 3974 wq->tail += (wqe_idx - (u16)wq->tail) & 3975 (wq->wqe_cnt - 1); 3976 3977 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3978 ++wq->tail; 3979 3980 fill_send_wc(wc, cqe); 3981 } else { 3982 if (qp->ibqp.srq) { 3983 srq = to_hr_srq(qp->ibqp.srq); 3984 wc->wr_id = srq->wrid[wqe_idx]; 3985 hns_roce_free_srq_wqe(srq, wqe_idx); 3986 } else { 3987 wq = &qp->rq; 3988 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 3989 ++wq->tail; 3990 } 3991 3992 ret = fill_recv_wc(wc, cqe); 3993 } 3994 3995 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc); 3996 if (unlikely(wc->status != IB_WC_SUCCESS)) 3997 return 0; 3998 3999 return ret; 4000 } 4001 4002 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, 4003 struct ib_wc *wc) 4004 { 4005 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); 4006 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); 4007 struct hns_roce_qp *cur_qp = NULL; 4008 unsigned long flags; 4009 int npolled; 4010 4011 spin_lock_irqsave(&hr_cq->lock, flags); 4012 4013 /* 4014 * When the device starts to reset, the state is RST_DOWN. At this time, 4015 * there may still be some valid CQEs in the hardware that are not 4016 * polled. Therefore, it is not allowed to switch to the software mode 4017 * immediately. When the state changes to UNINIT, CQE no longer exists 4018 * in the hardware, and then switch to software mode. 4019 */ 4020 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) { 4021 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc); 4022 goto out; 4023 } 4024 4025 for (npolled = 0; npolled < num_entries; ++npolled) { 4026 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled)) 4027 break; 4028 } 4029 4030 if (npolled) 4031 update_cq_db(hr_dev, hr_cq); 4032 4033 out: 4034 spin_unlock_irqrestore(&hr_cq->lock, flags); 4035 4036 return npolled; 4037 } 4038 4039 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, 4040 u32 step_idx, u8 *mbox_cmd) 4041 { 4042 u8 cmd; 4043 4044 switch (type) { 4045 case HEM_TYPE_QPC: 4046 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0; 4047 break; 4048 case HEM_TYPE_MTPT: 4049 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0; 4050 break; 4051 case HEM_TYPE_CQC: 4052 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0; 4053 break; 4054 case HEM_TYPE_SRQC: 4055 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0; 4056 break; 4057 case HEM_TYPE_SCCC: 4058 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0; 4059 break; 4060 case HEM_TYPE_QPC_TIMER: 4061 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; 4062 break; 4063 case HEM_TYPE_CQC_TIMER: 4064 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; 4065 break; 4066 default: 4067 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); 4068 return -EINVAL; 4069 } 4070 4071 *mbox_cmd = cmd + step_idx; 4072 4073 return 0; 4074 } 4075 4076 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, 4077 dma_addr_t base_addr) 4078 { 4079 struct hns_roce_cmq_desc desc; 4080 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 4081 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz); 4082 u64 addr = to_hr_hw_page_addr(base_addr); 4083 4084 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false); 4085 4086 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr)); 4087 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr)); 4088 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 4089 4090 return hns_roce_cmq_send(hr_dev, &desc, 1); 4091 } 4092 4093 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, 4094 dma_addr_t base_addr, u32 hem_type, u32 step_idx) 4095 { 4096 int ret; 4097 u8 cmd; 4098 4099 if (unlikely(hem_type == HEM_TYPE_GMV)) 4100 return config_gmv_ba_to_hw(hr_dev, obj, base_addr); 4101 4102 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx)) 4103 return 0; 4104 4105 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd); 4106 if (ret < 0) 4107 return ret; 4108 4109 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj); 4110 } 4111 4112 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, 4113 struct hns_roce_hem_table *table, int obj, 4114 u32 step_idx) 4115 { 4116 struct hns_roce_hem_iter iter; 4117 struct hns_roce_hem_mhop mhop; 4118 struct hns_roce_hem *hem; 4119 unsigned long mhop_obj = obj; 4120 int i, j, k; 4121 int ret = 0; 4122 u64 hem_idx = 0; 4123 u64 l1_idx = 0; 4124 u64 bt_ba = 0; 4125 u32 chunk_ba_num; 4126 u32 hop_num; 4127 4128 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) 4129 return 0; 4130 4131 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); 4132 i = mhop.l0_idx; 4133 j = mhop.l1_idx; 4134 k = mhop.l2_idx; 4135 hop_num = mhop.hop_num; 4136 chunk_ba_num = mhop.bt_chunk_size / 8; 4137 4138 if (hop_num == 2) { 4139 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num + 4140 k; 4141 l1_idx = i * chunk_ba_num + j; 4142 } else if (hop_num == 1) { 4143 hem_idx = i * chunk_ba_num + j; 4144 } else if (hop_num == HNS_ROCE_HOP_NUM_0) { 4145 hem_idx = i; 4146 } 4147 4148 if (table->type == HEM_TYPE_SCCC) 4149 obj = mhop.l0_idx; 4150 4151 if (check_whether_last_step(hop_num, step_idx)) { 4152 hem = table->hem[hem_idx]; 4153 for (hns_roce_hem_first(hem, &iter); 4154 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { 4155 bt_ba = hns_roce_hem_addr(&iter); 4156 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, 4157 step_idx); 4158 } 4159 } else { 4160 if (step_idx == 0) 4161 bt_ba = table->bt_l0_dma_addr[i]; 4162 else if (step_idx == 1 && hop_num == 2) 4163 bt_ba = table->bt_l1_dma_addr[l1_idx]; 4164 4165 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx); 4166 } 4167 4168 return ret; 4169 } 4170 4171 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, 4172 struct hns_roce_hem_table *table, 4173 int tag, u32 step_idx) 4174 { 4175 struct hns_roce_cmd_mailbox *mailbox; 4176 struct device *dev = hr_dev->dev; 4177 u8 cmd = 0xff; 4178 int ret; 4179 4180 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) 4181 return 0; 4182 4183 switch (table->type) { 4184 case HEM_TYPE_QPC: 4185 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0; 4186 break; 4187 case HEM_TYPE_MTPT: 4188 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0; 4189 break; 4190 case HEM_TYPE_CQC: 4191 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0; 4192 break; 4193 case HEM_TYPE_SRQC: 4194 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0; 4195 break; 4196 case HEM_TYPE_SCCC: 4197 case HEM_TYPE_QPC_TIMER: 4198 case HEM_TYPE_CQC_TIMER: 4199 case HEM_TYPE_GMV: 4200 return 0; 4201 default: 4202 dev_warn(dev, "table %u not to be destroyed by mailbox!\n", 4203 table->type); 4204 return 0; 4205 } 4206 4207 cmd += step_idx; 4208 4209 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 4210 if (IS_ERR(mailbox)) 4211 return PTR_ERR(mailbox); 4212 4213 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); 4214 4215 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 4216 return ret; 4217 } 4218 4219 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, 4220 struct hns_roce_v2_qp_context *context, 4221 struct hns_roce_v2_qp_context *qpc_mask, 4222 struct hns_roce_qp *hr_qp) 4223 { 4224 struct hns_roce_cmd_mailbox *mailbox; 4225 int qpc_size; 4226 int ret; 4227 4228 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 4229 if (IS_ERR(mailbox)) 4230 return PTR_ERR(mailbox); 4231 4232 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */ 4233 qpc_size = hr_dev->caps.qpc_sz; 4234 memcpy(mailbox->buf, context, qpc_size); 4235 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); 4236 4237 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 4238 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); 4239 4240 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 4241 4242 return ret; 4243 } 4244 4245 static void set_access_flags(struct hns_roce_qp *hr_qp, 4246 struct hns_roce_v2_qp_context *context, 4247 struct hns_roce_v2_qp_context *qpc_mask, 4248 const struct ib_qp_attr *attr, int attr_mask) 4249 { 4250 u8 dest_rd_atomic; 4251 u32 access_flags; 4252 4253 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ? 4254 attr->max_dest_rd_atomic : hr_qp->resp_depth; 4255 4256 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ? 4257 attr->qp_access_flags : hr_qp->atomic_rd_en; 4258 4259 if (!dest_rd_atomic) 4260 access_flags &= IB_ACCESS_REMOTE_WRITE; 4261 4262 hr_reg_write_bool(context, QPC_RRE, 4263 access_flags & IB_ACCESS_REMOTE_READ); 4264 hr_reg_clear(qpc_mask, QPC_RRE); 4265 4266 hr_reg_write_bool(context, QPC_RWE, 4267 access_flags & IB_ACCESS_REMOTE_WRITE); 4268 hr_reg_clear(qpc_mask, QPC_RWE); 4269 4270 hr_reg_write_bool(context, QPC_ATE, 4271 access_flags & IB_ACCESS_REMOTE_ATOMIC); 4272 hr_reg_clear(qpc_mask, QPC_ATE); 4273 hr_reg_write_bool(context, QPC_EXT_ATE, 4274 access_flags & IB_ACCESS_REMOTE_ATOMIC); 4275 hr_reg_clear(qpc_mask, QPC_EXT_ATE); 4276 } 4277 4278 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, 4279 struct hns_roce_v2_qp_context *context, 4280 struct hns_roce_v2_qp_context *qpc_mask) 4281 { 4282 hr_reg_write(context, QPC_SGE_SHIFT, 4283 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, 4284 hr_qp->sge.sge_shift)); 4285 4286 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt)); 4287 4288 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt)); 4289 } 4290 4291 static inline int get_cqn(struct ib_cq *ib_cq) 4292 { 4293 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0; 4294 } 4295 4296 static inline int get_pdn(struct ib_pd *ib_pd) 4297 { 4298 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0; 4299 } 4300 4301 static void modify_qp_reset_to_init(struct ib_qp *ibqp, 4302 const struct ib_qp_attr *attr, 4303 int attr_mask, 4304 struct hns_roce_v2_qp_context *context, 4305 struct hns_roce_v2_qp_context *qpc_mask) 4306 { 4307 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4308 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4309 4310 /* 4311 * In v2 engine, software pass context and context mask to hardware 4312 * when modifying qp. If software need modify some fields in context, 4313 * we should set all bits of the relevant fields in context mask to 4314 * 0 at the same time, else set them to 0x1. 4315 */ 4316 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); 4317 4318 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); 4319 4320 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs)); 4321 4322 set_qpc_wqe_cnt(hr_qp, context, qpc_mask); 4323 4324 /* No VLAN need to set 0xFFF */ 4325 hr_reg_write(context, QPC_VLAN_ID, 0xfff); 4326 4327 if (ibqp->qp_type == IB_QPT_XRC_TGT) { 4328 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn); 4329 4330 hr_reg_enable(context, QPC_XRC_QP_TYPE); 4331 } 4332 4333 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 4334 hr_reg_enable(context, QPC_RQ_RECORD_EN); 4335 4336 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) 4337 hr_reg_enable(context, QPC_OWNER_MODE); 4338 4339 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L, 4340 lower_32_bits(hr_qp->rdb.dma) >> 1); 4341 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H, 4342 upper_32_bits(hr_qp->rdb.dma)); 4343 4344 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI) 4345 hr_reg_write_bool(context, QPC_RQIE, 4346 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE); 4347 4348 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); 4349 4350 if (ibqp->srq) { 4351 hr_reg_enable(context, QPC_SRQ_EN); 4352 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); 4353 } 4354 4355 hr_reg_enable(context, QPC_FRE); 4356 4357 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); 4358 4359 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ) 4360 return; 4361 4362 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) 4363 hr_reg_enable(&context->ext, QPCEX_STASH); 4364 } 4365 4366 static void modify_qp_init_to_init(struct ib_qp *ibqp, 4367 const struct ib_qp_attr *attr, int attr_mask, 4368 struct hns_roce_v2_qp_context *context, 4369 struct hns_roce_v2_qp_context *qpc_mask) 4370 { 4371 /* 4372 * In v2 engine, software pass context and context mask to hardware 4373 * when modifying qp. If software need modify some fields in context, 4374 * we should set all bits of the relevant fields in context mask to 4375 * 0 at the same time, else set them to 0x1. 4376 */ 4377 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); 4378 hr_reg_clear(qpc_mask, QPC_TST); 4379 4380 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); 4381 hr_reg_clear(qpc_mask, QPC_PD); 4382 4383 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); 4384 hr_reg_clear(qpc_mask, QPC_RX_CQN); 4385 4386 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); 4387 hr_reg_clear(qpc_mask, QPC_TX_CQN); 4388 4389 if (ibqp->srq) { 4390 hr_reg_enable(context, QPC_SRQ_EN); 4391 hr_reg_clear(qpc_mask, QPC_SRQ_EN); 4392 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); 4393 hr_reg_clear(qpc_mask, QPC_SRQN); 4394 } 4395 } 4396 4397 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, 4398 struct hns_roce_qp *hr_qp, 4399 struct hns_roce_v2_qp_context *context, 4400 struct hns_roce_v2_qp_context *qpc_mask) 4401 { 4402 u64 mtts[MTT_MIN_COUNT] = { 0 }; 4403 u64 wqe_sge_ba; 4404 int count; 4405 4406 /* Search qp buf's mtts */ 4407 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, 4408 MTT_MIN_COUNT, &wqe_sge_ba); 4409 if (hr_qp->rq.wqe_cnt && count < 1) { 4410 ibdev_err(&hr_dev->ib_dev, 4411 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn); 4412 return -EINVAL; 4413 } 4414 4415 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); 4416 qpc_mask->wqe_sge_ba = 0; 4417 4418 /* 4419 * In v2 engine, software pass context and context mask to hardware 4420 * when modifying qp. If software need modify some fields in context, 4421 * we should set all bits of the relevant fields in context mask to 4422 * 0 at the same time, else set them to 0x1. 4423 */ 4424 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3)); 4425 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H); 4426 4427 hr_reg_write(context, QPC_SQ_HOP_NUM, 4428 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num, 4429 hr_qp->sq.wqe_cnt)); 4430 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM); 4431 4432 hr_reg_write(context, QPC_SGE_HOP_NUM, 4433 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num, 4434 hr_qp->sge.sge_cnt)); 4435 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM); 4436 4437 hr_reg_write(context, QPC_RQ_HOP_NUM, 4438 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num, 4439 hr_qp->rq.wqe_cnt)); 4440 4441 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM); 4442 4443 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ, 4444 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); 4445 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ); 4446 4447 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ, 4448 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); 4449 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ); 4450 4451 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); 4452 qpc_mask->rq_cur_blk_addr = 0; 4453 4454 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H, 4455 upper_32_bits(to_hr_hw_page_addr(mtts[0]))); 4456 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H); 4457 4458 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); 4459 qpc_mask->rq_nxt_blk_addr = 0; 4460 4461 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, 4462 upper_32_bits(to_hr_hw_page_addr(mtts[1]))); 4463 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); 4464 4465 return 0; 4466 } 4467 4468 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, 4469 struct hns_roce_qp *hr_qp, 4470 struct hns_roce_v2_qp_context *context, 4471 struct hns_roce_v2_qp_context *qpc_mask) 4472 { 4473 struct ib_device *ibdev = &hr_dev->ib_dev; 4474 u64 sge_cur_blk = 0; 4475 u64 sq_cur_blk = 0; 4476 int count; 4477 4478 /* search qp buf's mtts */ 4479 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); 4480 if (count < 1) { 4481 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n", 4482 hr_qp->qpn); 4483 return -EINVAL; 4484 } 4485 if (hr_qp->sge.sge_cnt > 0) { 4486 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 4487 hr_qp->sge.offset, 4488 &sge_cur_blk, 1, NULL); 4489 if (count < 1) { 4490 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", 4491 hr_qp->qpn); 4492 return -EINVAL; 4493 } 4494 } 4495 4496 /* 4497 * In v2 engine, software pass context and context mask to hardware 4498 * when modifying qp. If software need modify some fields in context, 4499 * we should set all bits of the relevant fields in context mask to 4500 * 0 at the same time, else set them to 0x1. 4501 */ 4502 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L, 4503 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4504 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H, 4505 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4506 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L); 4507 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H); 4508 4509 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L, 4510 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk))); 4511 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H, 4512 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk))); 4513 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L); 4514 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H); 4515 4516 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L, 4517 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4518 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H, 4519 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk))); 4520 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L); 4521 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H); 4522 4523 return 0; 4524 } 4525 4526 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, 4527 const struct ib_qp_attr *attr) 4528 { 4529 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) 4530 return IB_MTU_4096; 4531 4532 return attr->path_mtu; 4533 } 4534 4535 static int modify_qp_init_to_rtr(struct ib_qp *ibqp, 4536 const struct ib_qp_attr *attr, int attr_mask, 4537 struct hns_roce_v2_qp_context *context, 4538 struct hns_roce_v2_qp_context *qpc_mask) 4539 { 4540 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4541 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4542 struct ib_device *ibdev = &hr_dev->ib_dev; 4543 dma_addr_t trrl_ba; 4544 dma_addr_t irrl_ba; 4545 enum ib_mtu ib_mtu; 4546 const u8 *smac; 4547 u8 lp_pktn_ini; 4548 u64 *mtts; 4549 u8 *dmac; 4550 u32 port; 4551 int mtu; 4552 int ret; 4553 4554 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask); 4555 if (ret) { 4556 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret); 4557 return ret; 4558 } 4559 4560 /* Search IRRL's mtts */ 4561 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, 4562 hr_qp->qpn, &irrl_ba); 4563 if (!mtts) { 4564 ibdev_err(ibdev, "failed to find qp irrl_table.\n"); 4565 return -EINVAL; 4566 } 4567 4568 /* Search TRRL's mtts */ 4569 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, 4570 hr_qp->qpn, &trrl_ba); 4571 if (!mtts) { 4572 ibdev_err(ibdev, "failed to find qp trrl_table.\n"); 4573 return -EINVAL; 4574 } 4575 4576 if (attr_mask & IB_QP_ALT_PATH) { 4577 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n", 4578 attr_mask); 4579 return -EINVAL; 4580 } 4581 4582 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4); 4583 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L); 4584 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4)); 4585 qpc_mask->trrl_ba = 0; 4586 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4)); 4587 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H); 4588 4589 context->irrl_ba = cpu_to_le32(irrl_ba >> 6); 4590 qpc_mask->irrl_ba = 0; 4591 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6)); 4592 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H); 4593 4594 hr_reg_enable(context, QPC_RMT_E2E); 4595 hr_reg_clear(qpc_mask, QPC_RMT_E2E); 4596 4597 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits); 4598 hr_reg_clear(qpc_mask, QPC_SIG_TYPE); 4599 4600 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; 4601 4602 smac = (const u8 *)hr_dev->dev_addr[port]; 4603 dmac = (u8 *)attr->ah_attr.roce.dmac; 4604 /* when dmac equals smac or loop_idc is 1, it should loopback */ 4605 if (ether_addr_equal_unaligned(dmac, smac) || 4606 hr_dev->loop_idc == 0x1) { 4607 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc); 4608 hr_reg_clear(qpc_mask, QPC_LBI); 4609 } 4610 4611 if (attr_mask & IB_QP_DEST_QPN) { 4612 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num); 4613 hr_reg_clear(qpc_mask, QPC_DQPN); 4614 } 4615 4616 memcpy(&(context->dmac), dmac, sizeof(u32)); 4617 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4]))); 4618 qpc_mask->dmac = 0; 4619 hr_reg_clear(qpc_mask, QPC_DMAC_H); 4620 4621 ib_mtu = get_mtu(ibqp, attr); 4622 hr_qp->path_mtu = ib_mtu; 4623 4624 mtu = ib_mtu_enum_to_int(ib_mtu); 4625 if (WARN_ON(mtu <= 0)) 4626 return -EINVAL; 4627 #define MAX_LP_MSG_LEN 16384 4628 /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ 4629 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); 4630 if (WARN_ON(lp_pktn_ini >= 0xF)) 4631 return -EINVAL; 4632 4633 if (attr_mask & IB_QP_PATH_MTU) { 4634 hr_reg_write(context, QPC_MTU, ib_mtu); 4635 hr_reg_clear(qpc_mask, QPC_MTU); 4636 } 4637 4638 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini); 4639 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI); 4640 4641 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */ 4642 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini); 4643 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ); 4644 4645 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR); 4646 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN); 4647 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE); 4648 4649 context->rq_rnr_timer = 0; 4650 qpc_mask->rq_rnr_timer = 0; 4651 4652 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX); 4653 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX); 4654 4655 /* rocee send 2^lp_sgen_ini segs every time */ 4656 hr_reg_write(context, QPC_LP_SGEN_INI, 3); 4657 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI); 4658 4659 return 0; 4660 } 4661 4662 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, 4663 const struct ib_qp_attr *attr, int attr_mask, 4664 struct hns_roce_v2_qp_context *context, 4665 struct hns_roce_v2_qp_context *qpc_mask) 4666 { 4667 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4668 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4669 struct ib_device *ibdev = &hr_dev->ib_dev; 4670 int ret; 4671 4672 /* Not support alternate path and path migration */ 4673 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) { 4674 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); 4675 return -EINVAL; 4676 } 4677 4678 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask); 4679 if (ret) { 4680 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret); 4681 return ret; 4682 } 4683 4684 /* 4685 * Set some fields in context to zero, Because the default values 4686 * of all fields in context are zero, we need not set them to 0 again. 4687 * but we should set the relevant fields of context mask to 0. 4688 */ 4689 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX); 4690 4691 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN); 4692 4693 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE); 4694 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD); 4695 hr_reg_clear(qpc_mask, QPC_IRRL_PSN); 4696 4697 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL); 4698 4699 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN); 4700 4701 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG); 4702 4703 hr_reg_clear(qpc_mask, QPC_CHECK_FLG); 4704 4705 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD); 4706 4707 return 0; 4708 } 4709 4710 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 4711 u32 *dip_idx) 4712 { 4713 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4714 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4715 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx; 4716 u32 *head = &hr_dev->qp_table.idx_table.head; 4717 u32 *tail = &hr_dev->qp_table.idx_table.tail; 4718 struct hns_roce_dip *hr_dip; 4719 unsigned long flags; 4720 int ret = 0; 4721 4722 spin_lock_irqsave(&hr_dev->dip_list_lock, flags); 4723 4724 spare_idx[*tail] = ibqp->qp_num; 4725 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1); 4726 4727 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) { 4728 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) { 4729 *dip_idx = hr_dip->dip_idx; 4730 goto out; 4731 } 4732 } 4733 4734 /* If no dgid is found, a new dip and a mapping between dgid and 4735 * dip_idx will be created. 4736 */ 4737 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC); 4738 if (!hr_dip) { 4739 ret = -ENOMEM; 4740 goto out; 4741 } 4742 4743 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); 4744 hr_dip->dip_idx = *dip_idx = spare_idx[*head]; 4745 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1); 4746 list_add_tail(&hr_dip->node, &hr_dev->dip_list); 4747 4748 out: 4749 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags); 4750 return ret; 4751 } 4752 4753 enum { 4754 CONG_DCQCN, 4755 CONG_WINDOW, 4756 }; 4757 4758 enum { 4759 UNSUPPORT_CONG_LEVEL, 4760 SUPPORT_CONG_LEVEL, 4761 }; 4762 4763 enum { 4764 CONG_LDCP, 4765 CONG_HC3, 4766 }; 4767 4768 enum { 4769 DIP_INVALID, 4770 DIP_VALID, 4771 }; 4772 4773 enum { 4774 WND_LIMIT, 4775 WND_UNLIMIT, 4776 }; 4777 4778 static int check_cong_type(struct ib_qp *ibqp, 4779 struct hns_roce_congestion_algorithm *cong_alg) 4780 { 4781 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4782 4783 /* different congestion types match different configurations */ 4784 switch (hr_dev->caps.cong_type) { 4785 case CONG_TYPE_DCQCN: 4786 cong_alg->alg_sel = CONG_DCQCN; 4787 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; 4788 cong_alg->dip_vld = DIP_INVALID; 4789 cong_alg->wnd_mode_sel = WND_LIMIT; 4790 break; 4791 case CONG_TYPE_LDCP: 4792 cong_alg->alg_sel = CONG_WINDOW; 4793 cong_alg->alg_sub_sel = CONG_LDCP; 4794 cong_alg->dip_vld = DIP_INVALID; 4795 cong_alg->wnd_mode_sel = WND_UNLIMIT; 4796 break; 4797 case CONG_TYPE_HC3: 4798 cong_alg->alg_sel = CONG_WINDOW; 4799 cong_alg->alg_sub_sel = CONG_HC3; 4800 cong_alg->dip_vld = DIP_INVALID; 4801 cong_alg->wnd_mode_sel = WND_LIMIT; 4802 break; 4803 case CONG_TYPE_DIP: 4804 cong_alg->alg_sel = CONG_DCQCN; 4805 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; 4806 cong_alg->dip_vld = DIP_VALID; 4807 cong_alg->wnd_mode_sel = WND_LIMIT; 4808 break; 4809 default: 4810 ibdev_err(&hr_dev->ib_dev, 4811 "error type(%u) for congestion selection.\n", 4812 hr_dev->caps.cong_type); 4813 return -EINVAL; 4814 } 4815 4816 return 0; 4817 } 4818 4819 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr, 4820 struct hns_roce_v2_qp_context *context, 4821 struct hns_roce_v2_qp_context *qpc_mask) 4822 { 4823 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4824 struct hns_roce_congestion_algorithm cong_field; 4825 struct ib_device *ibdev = ibqp->device; 4826 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 4827 u32 dip_idx = 0; 4828 int ret; 4829 4830 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 || 4831 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE) 4832 return 0; 4833 4834 ret = check_cong_type(ibqp, &cong_field); 4835 if (ret) 4836 return ret; 4837 4838 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id + 4839 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE); 4840 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID); 4841 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel); 4842 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL); 4843 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL, 4844 cong_field.alg_sub_sel); 4845 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL); 4846 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld); 4847 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD); 4848 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN, 4849 cong_field.wnd_mode_sel); 4850 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN); 4851 4852 /* if dip is disabled, there is no need to set dip idx */ 4853 if (cong_field.dip_vld == 0) 4854 return 0; 4855 4856 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx); 4857 if (ret) { 4858 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret); 4859 return ret; 4860 } 4861 4862 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx); 4863 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0); 4864 4865 return 0; 4866 } 4867 4868 static int hns_roce_v2_set_path(struct ib_qp *ibqp, 4869 const struct ib_qp_attr *attr, 4870 int attr_mask, 4871 struct hns_roce_v2_qp_context *context, 4872 struct hns_roce_v2_qp_context *qpc_mask) 4873 { 4874 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); 4875 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 4876 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 4877 struct ib_device *ibdev = &hr_dev->ib_dev; 4878 const struct ib_gid_attr *gid_attr = NULL; 4879 int is_roce_protocol; 4880 u16 vlan_id = 0xffff; 4881 bool is_udp = false; 4882 u8 ib_port; 4883 u8 hr_port; 4884 int ret; 4885 4886 /* 4887 * If free_mr_en of qp is set, it means that this qp comes from 4888 * free mr. This qp will perform the loopback operation. 4889 * In the loopback scenario, only sl needs to be set. 4890 */ 4891 if (hr_qp->free_mr_en) { 4892 hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr)); 4893 hr_reg_clear(qpc_mask, QPC_SL); 4894 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); 4895 return 0; 4896 } 4897 4898 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; 4899 hr_port = ib_port - 1; 4900 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && 4901 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; 4902 4903 if (is_roce_protocol) { 4904 gid_attr = attr->ah_attr.grh.sgid_attr; 4905 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); 4906 if (ret) 4907 return ret; 4908 4909 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); 4910 } 4911 4912 /* Only HIP08 needs to set the vlan_en bits in QPC */ 4913 if (vlan_id < VLAN_N_VID && 4914 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 4915 hr_reg_enable(context, QPC_RQ_VLAN_EN); 4916 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN); 4917 hr_reg_enable(context, QPC_SQ_VLAN_EN); 4918 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN); 4919 } 4920 4921 hr_reg_write(context, QPC_VLAN_ID, vlan_id); 4922 hr_reg_clear(qpc_mask, QPC_VLAN_ID); 4923 4924 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { 4925 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n", 4926 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); 4927 return -EINVAL; 4928 } 4929 4930 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { 4931 ibdev_err(ibdev, "ah attr is not RDMA roce type\n"); 4932 return -EINVAL; 4933 } 4934 4935 hr_reg_write(context, QPC_UDPSPN, 4936 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, 4937 attr->dest_qp_num) : 4938 0); 4939 4940 hr_reg_clear(qpc_mask, QPC_UDPSPN); 4941 4942 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index); 4943 4944 hr_reg_clear(qpc_mask, QPC_GMV_IDX); 4945 4946 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit); 4947 hr_reg_clear(qpc_mask, QPC_HOPLIMIT); 4948 4949 ret = fill_cong_field(ibqp, attr, context, qpc_mask); 4950 if (ret) 4951 return ret; 4952 4953 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh)); 4954 hr_reg_clear(qpc_mask, QPC_TC); 4955 4956 hr_reg_write(context, QPC_FL, grh->flow_label); 4957 hr_reg_clear(qpc_mask, QPC_FL); 4958 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); 4959 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); 4960 4961 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); 4962 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { 4963 ibdev_err(ibdev, 4964 "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", 4965 hr_qp->sl, MAX_SERVICE_LEVEL); 4966 return -EINVAL; 4967 } 4968 4969 hr_reg_write(context, QPC_SL, hr_qp->sl); 4970 hr_reg_clear(qpc_mask, QPC_SL); 4971 4972 return 0; 4973 } 4974 4975 static bool check_qp_state(enum ib_qp_state cur_state, 4976 enum ib_qp_state new_state) 4977 { 4978 static const bool sm[][IB_QPS_ERR + 1] = { 4979 [IB_QPS_RESET] = { [IB_QPS_RESET] = true, 4980 [IB_QPS_INIT] = true }, 4981 [IB_QPS_INIT] = { [IB_QPS_RESET] = true, 4982 [IB_QPS_INIT] = true, 4983 [IB_QPS_RTR] = true, 4984 [IB_QPS_ERR] = true }, 4985 [IB_QPS_RTR] = { [IB_QPS_RESET] = true, 4986 [IB_QPS_RTS] = true, 4987 [IB_QPS_ERR] = true }, 4988 [IB_QPS_RTS] = { [IB_QPS_RESET] = true, 4989 [IB_QPS_RTS] = true, 4990 [IB_QPS_ERR] = true }, 4991 [IB_QPS_SQD] = {}, 4992 [IB_QPS_SQE] = {}, 4993 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, 4994 [IB_QPS_ERR] = true } 4995 }; 4996 4997 return sm[cur_state][new_state]; 4998 } 4999 5000 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, 5001 const struct ib_qp_attr *attr, 5002 int attr_mask, 5003 enum ib_qp_state cur_state, 5004 enum ib_qp_state new_state, 5005 struct hns_roce_v2_qp_context *context, 5006 struct hns_roce_v2_qp_context *qpc_mask) 5007 { 5008 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5009 int ret = 0; 5010 5011 if (!check_qp_state(cur_state, new_state)) { 5012 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); 5013 return -EINVAL; 5014 } 5015 5016 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 5017 memset(qpc_mask, 0, hr_dev->caps.qpc_sz); 5018 modify_qp_reset_to_init(ibqp, attr, attr_mask, context, 5019 qpc_mask); 5020 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 5021 modify_qp_init_to_init(ibqp, attr, attr_mask, context, 5022 qpc_mask); 5023 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 5024 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, 5025 qpc_mask); 5026 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 5027 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context, 5028 qpc_mask); 5029 } 5030 5031 return ret; 5032 } 5033 5034 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) 5035 { 5036 #define QP_ACK_TIMEOUT_MAX_HIP08 20 5037 #define QP_ACK_TIMEOUT_OFFSET 10 5038 #define QP_ACK_TIMEOUT_MAX 31 5039 5040 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 5041 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { 5042 ibdev_warn(&hr_dev->ib_dev, 5043 "Local ACK timeout shall be 0 to 20.\n"); 5044 return false; 5045 } 5046 *timeout += QP_ACK_TIMEOUT_OFFSET; 5047 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { 5048 if (*timeout > QP_ACK_TIMEOUT_MAX) { 5049 ibdev_warn(&hr_dev->ib_dev, 5050 "Local ACK timeout shall be 0 to 31.\n"); 5051 return false; 5052 } 5053 } 5054 5055 return true; 5056 } 5057 5058 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, 5059 const struct ib_qp_attr *attr, 5060 int attr_mask, 5061 struct hns_roce_v2_qp_context *context, 5062 struct hns_roce_v2_qp_context *qpc_mask) 5063 { 5064 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5065 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5066 int ret = 0; 5067 u8 timeout; 5068 5069 if (attr_mask & IB_QP_AV) { 5070 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, 5071 qpc_mask); 5072 if (ret) 5073 return ret; 5074 } 5075 5076 if (attr_mask & IB_QP_TIMEOUT) { 5077 timeout = attr->timeout; 5078 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) { 5079 hr_reg_write(context, QPC_AT, timeout); 5080 hr_reg_clear(qpc_mask, QPC_AT); 5081 } 5082 } 5083 5084 if (attr_mask & IB_QP_RETRY_CNT) { 5085 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt); 5086 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT); 5087 5088 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt); 5089 hr_reg_clear(qpc_mask, QPC_RETRY_CNT); 5090 } 5091 5092 if (attr_mask & IB_QP_RNR_RETRY) { 5093 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry); 5094 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT); 5095 5096 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry); 5097 hr_reg_clear(qpc_mask, QPC_RNR_CNT); 5098 } 5099 5100 if (attr_mask & IB_QP_SQ_PSN) { 5101 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn); 5102 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN); 5103 5104 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn); 5105 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN); 5106 5107 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn); 5108 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L); 5109 5110 hr_reg_write(context, QPC_RETRY_MSG_PSN_H, 5111 attr->sq_psn >> RETRY_MSG_PSN_SHIFT); 5112 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H); 5113 5114 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn); 5115 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN); 5116 5117 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn); 5118 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN); 5119 } 5120 5121 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && 5122 attr->max_dest_rd_atomic) { 5123 hr_reg_write(context, QPC_RR_MAX, 5124 fls(attr->max_dest_rd_atomic - 1)); 5125 hr_reg_clear(qpc_mask, QPC_RR_MAX); 5126 } 5127 5128 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { 5129 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1)); 5130 hr_reg_clear(qpc_mask, QPC_SR_MAX); 5131 } 5132 5133 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) 5134 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); 5135 5136 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 5137 hr_reg_write(context, QPC_MIN_RNR_TIME, 5138 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? 5139 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer); 5140 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME); 5141 } 5142 5143 if (attr_mask & IB_QP_RQ_PSN) { 5144 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn); 5145 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN); 5146 5147 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1); 5148 hr_reg_clear(qpc_mask, QPC_RAQ_PSN); 5149 } 5150 5151 if (attr_mask & IB_QP_QKEY) { 5152 context->qkey_xrcd = cpu_to_le32(attr->qkey); 5153 qpc_mask->qkey_xrcd = 0; 5154 hr_qp->qkey = attr->qkey; 5155 } 5156 5157 return ret; 5158 } 5159 5160 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp, 5161 const struct ib_qp_attr *attr, 5162 int attr_mask) 5163 { 5164 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5165 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5166 5167 if (attr_mask & IB_QP_ACCESS_FLAGS) 5168 hr_qp->atomic_rd_en = attr->qp_access_flags; 5169 5170 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 5171 hr_qp->resp_depth = attr->max_dest_rd_atomic; 5172 if (attr_mask & IB_QP_PORT) { 5173 hr_qp->port = attr->port_num - 1; 5174 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 5175 } 5176 } 5177 5178 static void clear_qp(struct hns_roce_qp *hr_qp) 5179 { 5180 struct ib_qp *ibqp = &hr_qp->ibqp; 5181 5182 if (ibqp->send_cq) 5183 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), 5184 hr_qp->qpn, NULL); 5185 5186 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) 5187 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), 5188 hr_qp->qpn, ibqp->srq ? 5189 to_hr_srq(ibqp->srq) : NULL); 5190 5191 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 5192 *hr_qp->rdb.db_record = 0; 5193 5194 hr_qp->rq.head = 0; 5195 hr_qp->rq.tail = 0; 5196 hr_qp->sq.head = 0; 5197 hr_qp->sq.tail = 0; 5198 hr_qp->next_sge = 0; 5199 } 5200 5201 static void v2_set_flushed_fields(struct ib_qp *ibqp, 5202 struct hns_roce_v2_qp_context *context, 5203 struct hns_roce_v2_qp_context *qpc_mask) 5204 { 5205 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5206 unsigned long sq_flag = 0; 5207 unsigned long rq_flag = 0; 5208 5209 if (ibqp->qp_type == IB_QPT_XRC_TGT) 5210 return; 5211 5212 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); 5213 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head); 5214 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX); 5215 hr_qp->state = IB_QPS_ERR; 5216 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); 5217 5218 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */ 5219 return; 5220 5221 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); 5222 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head); 5223 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX); 5224 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); 5225 } 5226 5227 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, 5228 const struct ib_qp_attr *attr, 5229 int attr_mask, enum ib_qp_state cur_state, 5230 enum ib_qp_state new_state) 5231 { 5232 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5233 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5234 struct hns_roce_v2_qp_context ctx[2]; 5235 struct hns_roce_v2_qp_context *context = ctx; 5236 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; 5237 struct ib_device *ibdev = &hr_dev->ib_dev; 5238 int ret; 5239 5240 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 5241 return -EOPNOTSUPP; 5242 5243 /* 5244 * In v2 engine, software pass context and context mask to hardware 5245 * when modifying qp. If software need modify some fields in context, 5246 * we should set all bits of the relevant fields in context mask to 5247 * 0 at the same time, else set them to 0x1. 5248 */ 5249 memset(context, 0, hr_dev->caps.qpc_sz); 5250 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); 5251 5252 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state, 5253 new_state, context, qpc_mask); 5254 if (ret) 5255 goto out; 5256 5257 /* When QP state is err, SQ and RQ WQE should be flushed */ 5258 if (new_state == IB_QPS_ERR) 5259 v2_set_flushed_fields(ibqp, context, qpc_mask); 5260 5261 /* Configure the optional fields */ 5262 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context, 5263 qpc_mask); 5264 if (ret) 5265 goto out; 5266 5267 hr_reg_write_bool(context, QPC_INV_CREDIT, 5268 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC || 5269 ibqp->srq); 5270 hr_reg_clear(qpc_mask, QPC_INV_CREDIT); 5271 5272 /* Every status migrate must change state */ 5273 hr_reg_write(context, QPC_QP_ST, new_state); 5274 hr_reg_clear(qpc_mask, QPC_QP_ST); 5275 5276 /* SW pass context to HW */ 5277 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); 5278 if (ret) { 5279 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret); 5280 goto out; 5281 } 5282 5283 hr_qp->state = new_state; 5284 5285 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask); 5286 5287 if (new_state == IB_QPS_RESET && !ibqp->uobject) 5288 clear_qp(hr_qp); 5289 5290 out: 5291 return ret; 5292 } 5293 5294 static int to_ib_qp_st(enum hns_roce_v2_qp_state state) 5295 { 5296 static const enum ib_qp_state map[] = { 5297 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET, 5298 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT, 5299 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR, 5300 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS, 5301 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD, 5302 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE, 5303 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR, 5304 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD 5305 }; 5306 5307 return (state < ARRAY_SIZE(map)) ? map[state] : -1; 5308 } 5309 5310 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, 5311 struct hns_roce_qp *hr_qp, 5312 struct hns_roce_v2_qp_context *hr_context) 5313 { 5314 struct hns_roce_cmd_mailbox *mailbox; 5315 int ret; 5316 5317 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5318 if (IS_ERR(mailbox)) 5319 return PTR_ERR(mailbox); 5320 5321 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, 5322 hr_qp->qpn); 5323 if (ret) 5324 goto out; 5325 5326 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz); 5327 5328 out: 5329 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5330 return ret; 5331 } 5332 5333 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 5334 int qp_attr_mask, 5335 struct ib_qp_init_attr *qp_init_attr) 5336 { 5337 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5338 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5339 struct hns_roce_v2_qp_context context = {}; 5340 struct ib_device *ibdev = &hr_dev->ib_dev; 5341 int tmp_qp_state; 5342 int state; 5343 int ret; 5344 5345 memset(qp_attr, 0, sizeof(*qp_attr)); 5346 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 5347 5348 mutex_lock(&hr_qp->mutex); 5349 5350 if (hr_qp->state == IB_QPS_RESET) { 5351 qp_attr->qp_state = IB_QPS_RESET; 5352 ret = 0; 5353 goto done; 5354 } 5355 5356 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); 5357 if (ret) { 5358 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); 5359 ret = -EINVAL; 5360 goto out; 5361 } 5362 5363 state = hr_reg_read(&context, QPC_QP_ST); 5364 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); 5365 if (tmp_qp_state == -1) { 5366 ibdev_err(ibdev, "Illegal ib_qp_state\n"); 5367 ret = -EINVAL; 5368 goto out; 5369 } 5370 hr_qp->state = (u8)tmp_qp_state; 5371 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; 5372 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU); 5373 qp_attr->path_mig_state = IB_MIG_ARMED; 5374 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 5375 if (hr_qp->ibqp.qp_type == IB_QPT_UD) 5376 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); 5377 5378 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN); 5379 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN); 5380 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN); 5381 qp_attr->qp_access_flags = 5382 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) | 5383 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) | 5384 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S); 5385 5386 if (hr_qp->ibqp.qp_type == IB_QPT_RC || 5387 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || 5388 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) { 5389 struct ib_global_route *grh = 5390 rdma_ah_retrieve_grh(&qp_attr->ah_attr); 5391 5392 rdma_ah_set_sl(&qp_attr->ah_attr, 5393 hr_reg_read(&context, QPC_SL)); 5394 grh->flow_label = hr_reg_read(&context, QPC_FL); 5395 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); 5396 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); 5397 grh->traffic_class = hr_reg_read(&context, QPC_TC); 5398 5399 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); 5400 } 5401 5402 qp_attr->port_num = hr_qp->port + 1; 5403 qp_attr->sq_draining = 0; 5404 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX); 5405 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); 5406 5407 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); 5408 qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT); 5409 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); 5410 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); 5411 5412 done: 5413 qp_attr->cur_qp_state = qp_attr->qp_state; 5414 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; 5415 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 5416 qp_attr->cap.max_inline_data = hr_qp->max_inline_data; 5417 5418 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; 5419 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; 5420 5421 qp_init_attr->qp_context = ibqp->qp_context; 5422 qp_init_attr->qp_type = ibqp->qp_type; 5423 qp_init_attr->recv_cq = ibqp->recv_cq; 5424 qp_init_attr->send_cq = ibqp->send_cq; 5425 qp_init_attr->srq = ibqp->srq; 5426 qp_init_attr->cap = qp_attr->cap; 5427 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; 5428 5429 out: 5430 mutex_unlock(&hr_qp->mutex); 5431 return ret; 5432 } 5433 5434 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp) 5435 { 5436 return ((hr_qp->ibqp.qp_type == IB_QPT_RC || 5437 hr_qp->ibqp.qp_type == IB_QPT_UD || 5438 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || 5439 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) && 5440 hr_qp->state != IB_QPS_RESET); 5441 } 5442 5443 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, 5444 struct hns_roce_qp *hr_qp, 5445 struct ib_udata *udata) 5446 { 5447 struct ib_device *ibdev = &hr_dev->ib_dev; 5448 struct hns_roce_cq *send_cq, *recv_cq; 5449 unsigned long flags; 5450 int ret = 0; 5451 5452 if (modify_qp_is_ok(hr_qp)) { 5453 /* Modify qp to reset before destroying qp */ 5454 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, 5455 hr_qp->state, IB_QPS_RESET); 5456 if (ret) 5457 ibdev_err(ibdev, 5458 "failed to modify QP to RST, ret = %d.\n", 5459 ret); 5460 } 5461 5462 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; 5463 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; 5464 5465 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 5466 hns_roce_lock_cqs(send_cq, recv_cq); 5467 5468 if (!udata) { 5469 if (recv_cq) 5470 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, 5471 (hr_qp->ibqp.srq ? 5472 to_hr_srq(hr_qp->ibqp.srq) : 5473 NULL)); 5474 5475 if (send_cq && send_cq != recv_cq) 5476 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); 5477 } 5478 5479 hns_roce_qp_remove(hr_dev, hr_qp); 5480 5481 hns_roce_unlock_cqs(send_cq, recv_cq); 5482 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 5483 5484 return ret; 5485 } 5486 5487 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 5488 { 5489 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 5490 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 5491 int ret; 5492 5493 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); 5494 if (ret) 5495 ibdev_err(&hr_dev->ib_dev, 5496 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", 5497 hr_qp->qpn, ret); 5498 5499 hns_roce_qp_destroy(hr_dev, hr_qp, udata); 5500 5501 return 0; 5502 } 5503 5504 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, 5505 struct hns_roce_qp *hr_qp) 5506 { 5507 struct ib_device *ibdev = &hr_dev->ib_dev; 5508 struct hns_roce_sccc_clr_done *resp; 5509 struct hns_roce_sccc_clr *clr; 5510 struct hns_roce_cmq_desc desc; 5511 int ret, i; 5512 5513 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 5514 return 0; 5515 5516 mutex_lock(&hr_dev->qp_table.scc_mutex); 5517 5518 /* set scc ctx clear done flag */ 5519 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); 5520 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5521 if (ret) { 5522 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret); 5523 goto out; 5524 } 5525 5526 /* clear scc context */ 5527 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false); 5528 clr = (struct hns_roce_sccc_clr *)desc.data; 5529 clr->qpn = cpu_to_le32(hr_qp->qpn); 5530 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5531 if (ret) { 5532 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret); 5533 goto out; 5534 } 5535 5536 /* query scc context clear is done or not */ 5537 resp = (struct hns_roce_sccc_clr_done *)desc.data; 5538 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { 5539 hns_roce_cmq_setup_basic_desc(&desc, 5540 HNS_ROCE_OPC_QUERY_SCCC, true); 5541 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 5542 if (ret) { 5543 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n", 5544 ret); 5545 goto out; 5546 } 5547 5548 if (resp->clr_done) 5549 goto out; 5550 5551 msleep(20); 5552 } 5553 5554 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n"); 5555 ret = -ETIMEDOUT; 5556 5557 out: 5558 mutex_unlock(&hr_dev->qp_table.scc_mutex); 5559 return ret; 5560 } 5561 5562 #define DMA_IDX_SHIFT 3 5563 #define DMA_WQE_SHIFT 3 5564 5565 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, 5566 struct hns_roce_srq_context *ctx) 5567 { 5568 struct hns_roce_idx_que *idx_que = &srq->idx_que; 5569 struct ib_device *ibdev = srq->ibsrq.device; 5570 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 5571 u64 mtts_idx[MTT_MIN_COUNT] = {}; 5572 dma_addr_t dma_handle_idx = 0; 5573 int ret; 5574 5575 /* Get physical address of idx que buf */ 5576 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, 5577 ARRAY_SIZE(mtts_idx), &dma_handle_idx); 5578 if (ret < 1) { 5579 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", 5580 ret); 5581 return -ENOBUFS; 5582 } 5583 5584 hr_reg_write(ctx, SRQC_IDX_HOP_NUM, 5585 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); 5586 5587 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT); 5588 hr_reg_write(ctx, SRQC_IDX_BT_BA_H, 5589 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT)); 5590 5591 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ, 5592 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift)); 5593 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ, 5594 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift)); 5595 5596 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L, 5597 to_hr_hw_page_addr(mtts_idx[0])); 5598 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H, 5599 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0]))); 5600 5601 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L, 5602 to_hr_hw_page_addr(mtts_idx[1])); 5603 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H, 5604 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1]))); 5605 5606 return 0; 5607 } 5608 5609 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) 5610 { 5611 struct ib_device *ibdev = srq->ibsrq.device; 5612 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 5613 struct hns_roce_srq_context *ctx = mb_buf; 5614 u64 mtts_wqe[MTT_MIN_COUNT] = {}; 5615 dma_addr_t dma_handle_wqe = 0; 5616 int ret; 5617 5618 memset(ctx, 0, sizeof(*ctx)); 5619 5620 /* Get the physical address of srq buf */ 5621 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, 5622 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe); 5623 if (ret < 1) { 5624 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", 5625 ret); 5626 return -ENOBUFS; 5627 } 5628 5629 hr_reg_write(ctx, SRQC_SRQ_ST, 1); 5630 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, 5631 srq->ibsrq.srq_type == IB_SRQT_XRC); 5632 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn); 5633 hr_reg_write(ctx, SRQC_SRQN, srq->srqn); 5634 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn); 5635 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn); 5636 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt)); 5637 hr_reg_write(ctx, SRQC_RQWS, 5638 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1)); 5639 5640 hr_reg_write(ctx, SRQC_WQE_HOP_NUM, 5641 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num, 5642 srq->wqe_cnt)); 5643 5644 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT); 5645 hr_reg_write(ctx, SRQC_WQE_BT_BA_H, 5646 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT)); 5647 5648 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ, 5649 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); 5650 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, 5651 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); 5652 5653 return hns_roce_v2_write_srqc_index_queue(srq, ctx); 5654 } 5655 5656 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, 5657 struct ib_srq_attr *srq_attr, 5658 enum ib_srq_attr_mask srq_attr_mask, 5659 struct ib_udata *udata) 5660 { 5661 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 5662 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 5663 struct hns_roce_srq_context *srq_context; 5664 struct hns_roce_srq_context *srqc_mask; 5665 struct hns_roce_cmd_mailbox *mailbox; 5666 int ret; 5667 5668 /* Resizing SRQs is not supported yet */ 5669 if (srq_attr_mask & IB_SRQ_MAX_WR) 5670 return -EINVAL; 5671 5672 if (srq_attr_mask & IB_SRQ_LIMIT) { 5673 if (srq_attr->srq_limit > srq->wqe_cnt) 5674 return -EINVAL; 5675 5676 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5677 if (IS_ERR(mailbox)) 5678 return PTR_ERR(mailbox); 5679 5680 srq_context = mailbox->buf; 5681 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1; 5682 5683 memset(srqc_mask, 0xff, sizeof(*srqc_mask)); 5684 5685 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); 5686 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL); 5687 5688 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 5689 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); 5690 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5691 if (ret) { 5692 ibdev_err(&hr_dev->ib_dev, 5693 "failed to handle cmd of modifying SRQ, ret = %d.\n", 5694 ret); 5695 return ret; 5696 } 5697 } 5698 5699 return 0; 5700 } 5701 5702 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 5703 { 5704 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); 5705 struct hns_roce_srq *srq = to_hr_srq(ibsrq); 5706 struct hns_roce_srq_context *srq_context; 5707 struct hns_roce_cmd_mailbox *mailbox; 5708 int ret; 5709 5710 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5711 if (IS_ERR(mailbox)) 5712 return PTR_ERR(mailbox); 5713 5714 srq_context = mailbox->buf; 5715 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, 5716 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); 5717 if (ret) { 5718 ibdev_err(&hr_dev->ib_dev, 5719 "failed to process cmd of querying SRQ, ret = %d.\n", 5720 ret); 5721 goto out; 5722 } 5723 5724 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL); 5725 attr->max_wr = srq->wqe_cnt; 5726 attr->max_sge = srq->max_gs - srq->rsv_sge; 5727 5728 out: 5729 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5730 return ret; 5731 } 5732 5733 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 5734 { 5735 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); 5736 struct hns_roce_v2_cq_context *cq_context; 5737 struct hns_roce_cq *hr_cq = to_hr_cq(cq); 5738 struct hns_roce_v2_cq_context *cqc_mask; 5739 struct hns_roce_cmd_mailbox *mailbox; 5740 int ret; 5741 5742 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 5743 if (IS_ERR(mailbox)) 5744 return PTR_ERR(mailbox); 5745 5746 cq_context = mailbox->buf; 5747 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1; 5748 5749 memset(cqc_mask, 0xff, sizeof(*cqc_mask)); 5750 5751 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count); 5752 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT); 5753 5754 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 5755 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 5756 dev_info(hr_dev->dev, 5757 "cq_period(%u) reached the upper limit, adjusted to 65.\n", 5758 cq_period); 5759 cq_period = HNS_ROCE_MAX_CQ_PERIOD; 5760 } 5761 cq_period *= HNS_ROCE_CLOCK_ADJUST; 5762 } 5763 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); 5764 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); 5765 5766 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, 5767 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); 5768 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 5769 if (ret) 5770 ibdev_err(&hr_dev->ib_dev, 5771 "failed to process cmd when modifying CQ, ret = %d.\n", 5772 ret); 5773 5774 return ret; 5775 } 5776 5777 static void hns_roce_irq_work_handle(struct work_struct *work) 5778 { 5779 struct hns_roce_work *irq_work = 5780 container_of(work, struct hns_roce_work, work); 5781 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; 5782 5783 switch (irq_work->event_type) { 5784 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 5785 ibdev_info(ibdev, "Path migrated succeeded.\n"); 5786 break; 5787 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 5788 ibdev_warn(ibdev, "Path migration failed.\n"); 5789 break; 5790 case HNS_ROCE_EVENT_TYPE_COMM_EST: 5791 break; 5792 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 5793 ibdev_warn(ibdev, "Send queue drained.\n"); 5794 break; 5795 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 5796 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n", 5797 irq_work->queue_num, irq_work->sub_type); 5798 break; 5799 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 5800 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n", 5801 irq_work->queue_num); 5802 break; 5803 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 5804 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n", 5805 irq_work->queue_num, irq_work->sub_type); 5806 break; 5807 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: 5808 ibdev_warn(ibdev, "SRQ limit reach.\n"); 5809 break; 5810 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 5811 ibdev_warn(ibdev, "SRQ last wqe reach.\n"); 5812 break; 5813 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: 5814 ibdev_err(ibdev, "SRQ catas error.\n"); 5815 break; 5816 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 5817 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num); 5818 break; 5819 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 5820 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num); 5821 break; 5822 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: 5823 ibdev_warn(ibdev, "DB overflow.\n"); 5824 break; 5825 case HNS_ROCE_EVENT_TYPE_FLR: 5826 ibdev_warn(ibdev, "Function level reset.\n"); 5827 break; 5828 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 5829 ibdev_err(ibdev, "xrc domain violation error.\n"); 5830 break; 5831 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 5832 ibdev_err(ibdev, "invalid xrceth error.\n"); 5833 break; 5834 default: 5835 break; 5836 } 5837 5838 kfree(irq_work); 5839 } 5840 5841 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, 5842 struct hns_roce_eq *eq, u32 queue_num) 5843 { 5844 struct hns_roce_work *irq_work; 5845 5846 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC); 5847 if (!irq_work) 5848 return; 5849 5850 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle); 5851 irq_work->hr_dev = hr_dev; 5852 irq_work->event_type = eq->event_type; 5853 irq_work->sub_type = eq->sub_type; 5854 irq_work->queue_num = queue_num; 5855 queue_work(hr_dev->irq_workq, &(irq_work->work)); 5856 } 5857 5858 static void update_eq_db(struct hns_roce_eq *eq) 5859 { 5860 struct hns_roce_dev *hr_dev = eq->hr_dev; 5861 struct hns_roce_v2_db eq_db = {}; 5862 5863 if (eq->type_flag == HNS_ROCE_AEQ) { 5864 hr_reg_write(&eq_db, EQ_DB_CMD, 5865 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? 5866 HNS_ROCE_EQ_DB_CMD_AEQ : 5867 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED); 5868 } else { 5869 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn); 5870 5871 hr_reg_write(&eq_db, EQ_DB_CMD, 5872 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? 5873 HNS_ROCE_EQ_DB_CMD_CEQ : 5874 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED); 5875 } 5876 5877 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index); 5878 5879 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg); 5880 } 5881 5882 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) 5883 { 5884 struct hns_roce_aeqe *aeqe; 5885 5886 aeqe = hns_roce_buf_offset(eq->mtr.kmem, 5887 (eq->cons_index & (eq->entries - 1)) * 5888 eq->eqe_size); 5889 5890 return (hr_reg_read(aeqe, AEQE_OWNER) ^ 5891 !!(eq->cons_index & eq->entries)) ? aeqe : NULL; 5892 } 5893 5894 static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, 5895 struct hns_roce_eq *eq) 5896 { 5897 struct device *dev = hr_dev->dev; 5898 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); 5899 irqreturn_t aeqe_found = IRQ_NONE; 5900 int event_type; 5901 u32 queue_num; 5902 int sub_type; 5903 5904 while (aeqe) { 5905 /* Make sure we read AEQ entry after we have checked the 5906 * ownership bit 5907 */ 5908 dma_rmb(); 5909 5910 event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE); 5911 sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE); 5912 queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); 5913 5914 switch (event_type) { 5915 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 5916 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 5917 case HNS_ROCE_EVENT_TYPE_COMM_EST: 5918 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 5919 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 5920 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 5921 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 5922 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 5923 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 5924 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 5925 hns_roce_qp_event(hr_dev, queue_num, event_type); 5926 break; 5927 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: 5928 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: 5929 hns_roce_srq_event(hr_dev, queue_num, event_type); 5930 break; 5931 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: 5932 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: 5933 hns_roce_cq_event(hr_dev, queue_num, event_type); 5934 break; 5935 case HNS_ROCE_EVENT_TYPE_MB: 5936 hns_roce_cmd_event(hr_dev, 5937 le16_to_cpu(aeqe->event.cmd.token), 5938 aeqe->event.cmd.status, 5939 le64_to_cpu(aeqe->event.cmd.out_param)); 5940 break; 5941 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: 5942 case HNS_ROCE_EVENT_TYPE_FLR: 5943 break; 5944 default: 5945 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n", 5946 event_type, eq->eqn, eq->cons_index); 5947 break; 5948 } 5949 5950 eq->event_type = event_type; 5951 eq->sub_type = sub_type; 5952 ++eq->cons_index; 5953 aeqe_found = IRQ_HANDLED; 5954 5955 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); 5956 5957 aeqe = next_aeqe_sw_v2(eq); 5958 } 5959 5960 update_eq_db(eq); 5961 5962 return IRQ_RETVAL(aeqe_found); 5963 } 5964 5965 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) 5966 { 5967 struct hns_roce_ceqe *ceqe; 5968 5969 ceqe = hns_roce_buf_offset(eq->mtr.kmem, 5970 (eq->cons_index & (eq->entries - 1)) * 5971 eq->eqe_size); 5972 5973 return (hr_reg_read(ceqe, CEQE_OWNER) ^ 5974 !!(eq->cons_index & eq->entries)) ? ceqe : NULL; 5975 } 5976 5977 static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, 5978 struct hns_roce_eq *eq) 5979 { 5980 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); 5981 irqreturn_t ceqe_found = IRQ_NONE; 5982 u32 cqn; 5983 5984 while (ceqe) { 5985 /* Make sure we read CEQ entry after we have checked the 5986 * ownership bit 5987 */ 5988 dma_rmb(); 5989 5990 cqn = hr_reg_read(ceqe, CEQE_CQN); 5991 5992 hns_roce_cq_completion(hr_dev, cqn); 5993 5994 ++eq->cons_index; 5995 ceqe_found = IRQ_HANDLED; 5996 5997 ceqe = next_ceqe_sw_v2(eq); 5998 } 5999 6000 update_eq_db(eq); 6001 6002 return IRQ_RETVAL(ceqe_found); 6003 } 6004 6005 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) 6006 { 6007 struct hns_roce_eq *eq = eq_ptr; 6008 struct hns_roce_dev *hr_dev = eq->hr_dev; 6009 irqreturn_t int_work; 6010 6011 if (eq->type_flag == HNS_ROCE_CEQ) 6012 /* Completion event interrupt */ 6013 int_work = hns_roce_v2_ceq_int(hr_dev, eq); 6014 else 6015 /* Asychronous event interrupt */ 6016 int_work = hns_roce_v2_aeq_int(hr_dev, eq); 6017 6018 return IRQ_RETVAL(int_work); 6019 } 6020 6021 static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, 6022 u32 int_st) 6023 { 6024 struct pci_dev *pdev = hr_dev->pci_dev; 6025 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 6026 const struct hnae3_ae_ops *ops = ae_dev->ops; 6027 irqreturn_t int_work = IRQ_NONE; 6028 u32 int_en; 6029 6030 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); 6031 6032 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { 6033 dev_err(hr_dev->dev, "AEQ overflow!\n"); 6034 6035 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, 6036 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); 6037 6038 /* Set reset level for reset_event() */ 6039 if (ops->set_default_reset_request) 6040 ops->set_default_reset_request(ae_dev, 6041 HNAE3_FUNC_RESET); 6042 if (ops->reset_event) 6043 ops->reset_event(pdev, NULL); 6044 6045 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S; 6046 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); 6047 6048 int_work = IRQ_HANDLED; 6049 } else { 6050 dev_err(hr_dev->dev, "there is no basic abn irq found.\n"); 6051 } 6052 6053 return IRQ_RETVAL(int_work); 6054 } 6055 6056 static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev, 6057 struct fmea_ram_ecc *ecc_info) 6058 { 6059 struct hns_roce_cmq_desc desc; 6060 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 6061 int ret; 6062 6063 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true); 6064 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 6065 if (ret) 6066 return ret; 6067 6068 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR); 6069 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE); 6070 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG); 6071 6072 return 0; 6073 } 6074 6075 static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx) 6076 { 6077 struct hns_roce_cmq_desc desc; 6078 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; 6079 u32 addr_upper; 6080 u32 addr_low; 6081 int ret; 6082 6083 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true); 6084 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 6085 6086 ret = hns_roce_cmq_send(hr_dev, &desc, 1); 6087 if (ret) { 6088 dev_err(hr_dev->dev, 6089 "failed to execute cmd to read gmv, ret = %d.\n", ret); 6090 return ret; 6091 } 6092 6093 addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L); 6094 addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H); 6095 6096 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false); 6097 hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low); 6098 hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper); 6099 hr_reg_write(req, CFG_GMV_BT_IDX, idx); 6100 6101 return hns_roce_cmq_send(hr_dev, &desc, 1); 6102 } 6103 6104 static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) 6105 { 6106 if (res_type == ECC_RESOURCE_QPC_TIMER || 6107 res_type == ECC_RESOURCE_CQC_TIMER || 6108 res_type == ECC_RESOURCE_SCCC) 6109 return le64_to_cpu(*data); 6110 6111 return le64_to_cpu(*data) << PAGE_SHIFT; 6112 } 6113 6114 static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, 6115 u32 index) 6116 { 6117 u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op; 6118 u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op; 6119 struct hns_roce_cmd_mailbox *mailbox; 6120 u64 addr; 6121 int ret; 6122 6123 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 6124 if (IS_ERR(mailbox)) 6125 return PTR_ERR(mailbox); 6126 6127 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index); 6128 if (ret) { 6129 dev_err(hr_dev->dev, 6130 "failed to execute cmd to read fmea ram, ret = %d.\n", 6131 ret); 6132 goto out; 6133 } 6134 6135 addr = fmea_get_ram_res_addr(res_type, mailbox->buf); 6136 6137 ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index); 6138 if (ret) 6139 dev_err(hr_dev->dev, 6140 "failed to execute cmd to write fmea ram, ret = %d.\n", 6141 ret); 6142 6143 out: 6144 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6145 return ret; 6146 } 6147 6148 static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev, 6149 struct fmea_ram_ecc *ecc_info) 6150 { 6151 u32 res_type = ecc_info->res_type; 6152 u32 index = ecc_info->index; 6153 int ret; 6154 6155 BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT); 6156 6157 if (res_type >= ECC_RESOURCE_COUNT) { 6158 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n", 6159 res_type); 6160 return; 6161 } 6162 6163 if (res_type == ECC_RESOURCE_GMV) 6164 ret = fmea_recover_gmv(hr_dev, index); 6165 else 6166 ret = fmea_recover_others(hr_dev, res_type, index); 6167 if (ret) 6168 dev_err(hr_dev->dev, 6169 "failed to recover %s, index = %u, ret = %d.\n", 6170 fmea_ram_res[res_type].name, index, ret); 6171 } 6172 6173 static void fmea_ram_ecc_work(struct work_struct *ecc_work) 6174 { 6175 struct hns_roce_dev *hr_dev = 6176 container_of(ecc_work, struct hns_roce_dev, ecc_work); 6177 struct fmea_ram_ecc ecc_info = {}; 6178 6179 if (fmea_ram_ecc_query(hr_dev, &ecc_info)) { 6180 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n"); 6181 return; 6182 } 6183 6184 if (!ecc_info.is_ecc_err) { 6185 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n"); 6186 return; 6187 } 6188 6189 fmea_ram_ecc_recover(hr_dev, &ecc_info); 6190 } 6191 6192 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) 6193 { 6194 struct hns_roce_dev *hr_dev = dev_id; 6195 irqreturn_t int_work = IRQ_NONE; 6196 u32 int_st; 6197 6198 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG); 6199 6200 if (int_st) { 6201 int_work = abnormal_interrupt_basic(hr_dev, int_st); 6202 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { 6203 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work); 6204 int_work = IRQ_HANDLED; 6205 } else { 6206 dev_err(hr_dev->dev, "there is no abnormal irq found.\n"); 6207 } 6208 6209 return IRQ_RETVAL(int_work); 6210 } 6211 6212 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, 6213 int eq_num, u32 enable_flag) 6214 { 6215 int i; 6216 6217 for (i = 0; i < eq_num; i++) 6218 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG + 6219 i * EQ_REG_OFFSET, enable_flag); 6220 6221 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag); 6222 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); 6223 } 6224 6225 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) 6226 { 6227 struct device *dev = hr_dev->dev; 6228 int ret; 6229 u8 cmd; 6230 6231 if (eqn < hr_dev->caps.num_comp_vectors) 6232 cmd = HNS_ROCE_CMD_DESTROY_CEQC; 6233 else 6234 cmd = HNS_ROCE_CMD_DESTROY_AEQC; 6235 6236 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); 6237 if (ret) 6238 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); 6239 } 6240 6241 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6242 { 6243 hns_roce_mtr_destroy(hr_dev, &eq->mtr); 6244 } 6245 6246 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6247 { 6248 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; 6249 eq->cons_index = 0; 6250 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0; 6251 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0; 6252 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; 6253 eq->shift = ilog2((unsigned int)eq->entries); 6254 } 6255 6256 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, 6257 void *mb_buf) 6258 { 6259 u64 eqe_ba[MTT_MIN_COUNT] = { 0 }; 6260 struct hns_roce_eq_context *eqc; 6261 u64 bt_ba = 0; 6262 int count; 6263 6264 eqc = mb_buf; 6265 memset(eqc, 0, sizeof(struct hns_roce_eq_context)); 6266 6267 init_eq_config(hr_dev, eq); 6268 6269 /* if not multi-hop, eqe buffer only use one trunk */ 6270 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT, 6271 &bt_ba); 6272 if (count < 1) { 6273 dev_err(hr_dev->dev, "failed to find EQE mtr\n"); 6274 return -ENOBUFS; 6275 } 6276 6277 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); 6278 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); 6279 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore); 6280 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce); 6281 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st); 6282 hr_reg_write(eqc, EQC_EQN, eq->eqn); 6283 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT); 6284 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ, 6285 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); 6286 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ, 6287 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); 6288 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); 6289 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); 6290 6291 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 6292 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { 6293 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n", 6294 eq->eq_period); 6295 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; 6296 } 6297 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; 6298 } 6299 6300 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period); 6301 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER); 6302 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3); 6303 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35); 6304 hr_reg_write(eqc, EQC_SHIFT, eq->shift); 6305 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX); 6306 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12); 6307 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28); 6308 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60); 6309 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX); 6310 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12); 6311 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44); 6312 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE); 6313 6314 return 0; 6315 } 6316 6317 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) 6318 { 6319 struct hns_roce_buf_attr buf_attr = {}; 6320 int err; 6321 6322 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) 6323 eq->hop_num = 0; 6324 else 6325 eq->hop_num = hr_dev->caps.eqe_hop_num; 6326 6327 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT; 6328 buf_attr.region[0].size = eq->entries * eq->eqe_size; 6329 buf_attr.region[0].hopnum = eq->hop_num; 6330 buf_attr.region_count = 1; 6331 6332 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, 6333 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, 6334 0); 6335 if (err) 6336 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err); 6337 6338 return err; 6339 } 6340 6341 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, 6342 struct hns_roce_eq *eq, u8 eq_cmd) 6343 { 6344 struct hns_roce_cmd_mailbox *mailbox; 6345 int ret; 6346 6347 /* Allocate mailbox memory */ 6348 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 6349 if (IS_ERR(mailbox)) 6350 return PTR_ERR(mailbox); 6351 6352 ret = alloc_eq_buf(hr_dev, eq); 6353 if (ret) 6354 goto free_cmd_mbox; 6355 6356 ret = config_eqc(hr_dev, eq, mailbox->buf); 6357 if (ret) 6358 goto err_cmd_mbox; 6359 6360 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); 6361 if (ret) { 6362 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); 6363 goto err_cmd_mbox; 6364 } 6365 6366 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6367 6368 return 0; 6369 6370 err_cmd_mbox: 6371 free_eq_buf(hr_dev, eq); 6372 6373 free_cmd_mbox: 6374 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 6375 6376 return ret; 6377 } 6378 6379 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, 6380 int comp_num, int aeq_num, int other_num) 6381 { 6382 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6383 int i, j; 6384 int ret; 6385 6386 for (i = 0; i < irq_num; i++) { 6387 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, 6388 GFP_KERNEL); 6389 if (!hr_dev->irq_names[i]) { 6390 ret = -ENOMEM; 6391 goto err_kzalloc_failed; 6392 } 6393 } 6394 6395 /* irq contains: abnormal + AEQ + CEQ */ 6396 for (j = 0; j < other_num; j++) 6397 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6398 "hns-abn-%d", j); 6399 6400 for (j = other_num; j < (other_num + aeq_num); j++) 6401 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6402 "hns-aeq-%d", j - other_num); 6403 6404 for (j = (other_num + aeq_num); j < irq_num; j++) 6405 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, 6406 "hns-ceq-%d", j - other_num - aeq_num); 6407 6408 for (j = 0; j < irq_num; j++) { 6409 if (j < other_num) 6410 ret = request_irq(hr_dev->irq[j], 6411 hns_roce_v2_msix_interrupt_abn, 6412 0, hr_dev->irq_names[j], hr_dev); 6413 6414 else if (j < (other_num + comp_num)) 6415 ret = request_irq(eq_table->eq[j - other_num].irq, 6416 hns_roce_v2_msix_interrupt_eq, 6417 0, hr_dev->irq_names[j + aeq_num], 6418 &eq_table->eq[j - other_num]); 6419 else 6420 ret = request_irq(eq_table->eq[j - other_num].irq, 6421 hns_roce_v2_msix_interrupt_eq, 6422 0, hr_dev->irq_names[j - comp_num], 6423 &eq_table->eq[j - other_num]); 6424 if (ret) { 6425 dev_err(hr_dev->dev, "Request irq error!\n"); 6426 goto err_request_failed; 6427 } 6428 } 6429 6430 return 0; 6431 6432 err_request_failed: 6433 for (j -= 1; j >= 0; j--) 6434 if (j < other_num) 6435 free_irq(hr_dev->irq[j], hr_dev); 6436 else 6437 free_irq(eq_table->eq[j - other_num].irq, 6438 &eq_table->eq[j - other_num]); 6439 6440 err_kzalloc_failed: 6441 for (i -= 1; i >= 0; i--) 6442 kfree(hr_dev->irq_names[i]); 6443 6444 return ret; 6445 } 6446 6447 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) 6448 { 6449 int irq_num; 6450 int eq_num; 6451 int i; 6452 6453 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 6454 irq_num = eq_num + hr_dev->caps.num_other_vectors; 6455 6456 for (i = 0; i < hr_dev->caps.num_other_vectors; i++) 6457 free_irq(hr_dev->irq[i], hr_dev); 6458 6459 for (i = 0; i < eq_num; i++) 6460 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); 6461 6462 for (i = 0; i < irq_num; i++) 6463 kfree(hr_dev->irq_names[i]); 6464 } 6465 6466 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) 6467 { 6468 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6469 struct device *dev = hr_dev->dev; 6470 struct hns_roce_eq *eq; 6471 int other_num; 6472 int comp_num; 6473 int aeq_num; 6474 int irq_num; 6475 int eq_num; 6476 u8 eq_cmd; 6477 int ret; 6478 int i; 6479 6480 other_num = hr_dev->caps.num_other_vectors; 6481 comp_num = hr_dev->caps.num_comp_vectors; 6482 aeq_num = hr_dev->caps.num_aeq_vectors; 6483 6484 eq_num = comp_num + aeq_num; 6485 irq_num = eq_num + other_num; 6486 6487 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); 6488 if (!eq_table->eq) 6489 return -ENOMEM; 6490 6491 /* create eq */ 6492 for (i = 0; i < eq_num; i++) { 6493 eq = &eq_table->eq[i]; 6494 eq->hr_dev = hr_dev; 6495 eq->eqn = i; 6496 if (i < comp_num) { 6497 /* CEQ */ 6498 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC; 6499 eq->type_flag = HNS_ROCE_CEQ; 6500 eq->entries = hr_dev->caps.ceqe_depth; 6501 eq->eqe_size = hr_dev->caps.ceqe_size; 6502 eq->irq = hr_dev->irq[i + other_num + aeq_num]; 6503 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; 6504 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; 6505 } else { 6506 /* AEQ */ 6507 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC; 6508 eq->type_flag = HNS_ROCE_AEQ; 6509 eq->entries = hr_dev->caps.aeqe_depth; 6510 eq->eqe_size = hr_dev->caps.aeqe_size; 6511 eq->irq = hr_dev->irq[i - comp_num + other_num]; 6512 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; 6513 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; 6514 } 6515 6516 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd); 6517 if (ret) { 6518 dev_err(dev, "failed to create eq.\n"); 6519 goto err_create_eq_fail; 6520 } 6521 } 6522 6523 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work); 6524 6525 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); 6526 if (!hr_dev->irq_workq) { 6527 dev_err(dev, "failed to create irq workqueue.\n"); 6528 ret = -ENOMEM; 6529 goto err_create_eq_fail; 6530 } 6531 6532 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num, 6533 other_num); 6534 if (ret) { 6535 dev_err(dev, "failed to request irq.\n"); 6536 goto err_request_irq_fail; 6537 } 6538 6539 /* enable irq */ 6540 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE); 6541 6542 return 0; 6543 6544 err_request_irq_fail: 6545 destroy_workqueue(hr_dev->irq_workq); 6546 6547 err_create_eq_fail: 6548 for (i -= 1; i >= 0; i--) 6549 free_eq_buf(hr_dev, &eq_table->eq[i]); 6550 kfree(eq_table->eq); 6551 6552 return ret; 6553 } 6554 6555 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) 6556 { 6557 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; 6558 int eq_num; 6559 int i; 6560 6561 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; 6562 6563 /* Disable irq */ 6564 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE); 6565 6566 __hns_roce_free_irq(hr_dev); 6567 destroy_workqueue(hr_dev->irq_workq); 6568 6569 for (i = 0; i < eq_num; i++) { 6570 hns_roce_v2_destroy_eqc(hr_dev, i); 6571 6572 free_eq_buf(hr_dev, &eq_table->eq[i]); 6573 } 6574 6575 kfree(eq_table->eq); 6576 } 6577 6578 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { 6579 .query_cqc_info = hns_roce_v2_query_cqc_info, 6580 }; 6581 6582 static const struct ib_device_ops hns_roce_v2_dev_ops = { 6583 .destroy_qp = hns_roce_v2_destroy_qp, 6584 .modify_cq = hns_roce_v2_modify_cq, 6585 .poll_cq = hns_roce_v2_poll_cq, 6586 .post_recv = hns_roce_v2_post_recv, 6587 .post_send = hns_roce_v2_post_send, 6588 .query_qp = hns_roce_v2_query_qp, 6589 .req_notify_cq = hns_roce_v2_req_notify_cq, 6590 }; 6591 6592 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = { 6593 .modify_srq = hns_roce_v2_modify_srq, 6594 .post_srq_recv = hns_roce_v2_post_srq_recv, 6595 .query_srq = hns_roce_v2_query_srq, 6596 }; 6597 6598 static const struct hns_roce_hw hns_roce_hw_v2 = { 6599 .cmq_init = hns_roce_v2_cmq_init, 6600 .cmq_exit = hns_roce_v2_cmq_exit, 6601 .hw_profile = hns_roce_v2_profile, 6602 .hw_init = hns_roce_v2_init, 6603 .hw_exit = hns_roce_v2_exit, 6604 .post_mbox = v2_post_mbox, 6605 .poll_mbox_done = v2_poll_mbox_done, 6606 .chk_mbox_avail = v2_chk_mbox_is_avail, 6607 .set_gid = hns_roce_v2_set_gid, 6608 .set_mac = hns_roce_v2_set_mac, 6609 .write_mtpt = hns_roce_v2_write_mtpt, 6610 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt, 6611 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt, 6612 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt, 6613 .write_cqc = hns_roce_v2_write_cqc, 6614 .set_hem = hns_roce_v2_set_hem, 6615 .clear_hem = hns_roce_v2_clear_hem, 6616 .modify_qp = hns_roce_v2_modify_qp, 6617 .dereg_mr = hns_roce_v2_dereg_mr, 6618 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, 6619 .init_eq = hns_roce_v2_init_eq_table, 6620 .cleanup_eq = hns_roce_v2_cleanup_eq_table, 6621 .write_srqc = hns_roce_v2_write_srqc, 6622 .hns_roce_dev_ops = &hns_roce_v2_dev_ops, 6623 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, 6624 }; 6625 6626 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { 6627 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 6628 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 6629 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 6630 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 6631 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 6632 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 6633 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 6634 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 6635 /* required last entry */ 6636 {0, } 6637 }; 6638 6639 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); 6640 6641 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 6642 struct hnae3_handle *handle) 6643 { 6644 struct hns_roce_v2_priv *priv = hr_dev->priv; 6645 const struct pci_device_id *id; 6646 int i; 6647 6648 hr_dev->pci_dev = handle->pdev; 6649 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); 6650 hr_dev->is_vf = id->driver_data; 6651 hr_dev->dev = &handle->pdev->dev; 6652 hr_dev->hw = &hns_roce_hw_v2; 6653 hr_dev->dfx = &hns_roce_dfx_hw_v2; 6654 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; 6655 hr_dev->odb_offset = hr_dev->sdb_offset; 6656 6657 /* Get info from NIC driver. */ 6658 hr_dev->reg_base = handle->rinfo.roce_io_base; 6659 hr_dev->mem_base = handle->rinfo.roce_mem_base; 6660 hr_dev->caps.num_ports = 1; 6661 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; 6662 hr_dev->iboe.phy_port[0] = 0; 6663 6664 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, 6665 hr_dev->iboe.netdevs[0]->dev_addr); 6666 6667 for (i = 0; i < handle->rinfo.num_vectors; i++) 6668 hr_dev->irq[i] = pci_irq_vector(handle->pdev, 6669 i + handle->rinfo.base_vector); 6670 6671 /* cmd issue mode: 0 is poll, 1 is event */ 6672 hr_dev->cmd_mod = 1; 6673 hr_dev->loop_idc = 0; 6674 6675 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); 6676 priv->handle = handle; 6677 } 6678 6679 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) 6680 { 6681 struct hns_roce_dev *hr_dev; 6682 int ret; 6683 6684 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); 6685 if (!hr_dev) 6686 return -ENOMEM; 6687 6688 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL); 6689 if (!hr_dev->priv) { 6690 ret = -ENOMEM; 6691 goto error_failed_kzalloc; 6692 } 6693 6694 hns_roce_hw_v2_get_cfg(hr_dev, handle); 6695 6696 ret = hns_roce_init(hr_dev); 6697 if (ret) { 6698 dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); 6699 goto error_failed_cfg; 6700 } 6701 6702 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { 6703 ret = free_mr_init(hr_dev); 6704 if (ret) { 6705 dev_err(hr_dev->dev, "failed to init free mr!\n"); 6706 goto error_failed_roce_init; 6707 } 6708 } 6709 6710 handle->priv = hr_dev; 6711 6712 return 0; 6713 6714 error_failed_roce_init: 6715 hns_roce_exit(hr_dev); 6716 6717 error_failed_cfg: 6718 kfree(hr_dev->priv); 6719 6720 error_failed_kzalloc: 6721 ib_dealloc_device(&hr_dev->ib_dev); 6722 6723 return ret; 6724 } 6725 6726 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, 6727 bool reset) 6728 { 6729 struct hns_roce_dev *hr_dev = handle->priv; 6730 6731 if (!hr_dev) 6732 return; 6733 6734 handle->priv = NULL; 6735 6736 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; 6737 hns_roce_handle_device_err(hr_dev); 6738 6739 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) 6740 free_mr_exit(hr_dev); 6741 6742 hns_roce_exit(hr_dev); 6743 kfree(hr_dev->priv); 6744 ib_dealloc_device(&hr_dev->ib_dev); 6745 } 6746 6747 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) 6748 { 6749 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; 6750 const struct pci_device_id *id; 6751 struct device *dev = &handle->pdev->dev; 6752 int ret; 6753 6754 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; 6755 6756 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { 6757 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6758 goto reset_chk_err; 6759 } 6760 6761 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); 6762 if (!id) 6763 return 0; 6764 6765 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) 6766 return 0; 6767 6768 ret = __hns_roce_hw_v2_init_instance(handle); 6769 if (ret) { 6770 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6771 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret); 6772 if (ops->ae_dev_resetting(handle) || 6773 ops->get_hw_reset_stat(handle)) 6774 goto reset_chk_err; 6775 else 6776 return ret; 6777 } 6778 6779 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; 6780 6781 return 0; 6782 6783 reset_chk_err: 6784 dev_err(dev, "Device is busy in resetting state.\n" 6785 "please retry later.\n"); 6786 6787 return -EBUSY; 6788 } 6789 6790 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, 6791 bool reset) 6792 { 6793 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) 6794 return; 6795 6796 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; 6797 6798 __hns_roce_hw_v2_uninit_instance(handle, reset); 6799 6800 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; 6801 } 6802 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) 6803 { 6804 struct hns_roce_dev *hr_dev; 6805 6806 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { 6807 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); 6808 return 0; 6809 } 6810 6811 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; 6812 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); 6813 6814 hr_dev = handle->priv; 6815 if (!hr_dev) 6816 return 0; 6817 6818 hr_dev->active = false; 6819 hr_dev->dis_db = true; 6820 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; 6821 6822 return 0; 6823 } 6824 6825 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) 6826 { 6827 struct device *dev = &handle->pdev->dev; 6828 int ret; 6829 6830 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN, 6831 &handle->rinfo.state)) { 6832 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; 6833 return 0; 6834 } 6835 6836 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; 6837 6838 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); 6839 ret = __hns_roce_hw_v2_init_instance(handle); 6840 if (ret) { 6841 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify 6842 * callback function, RoCE Engine reinitialize. If RoCE reinit 6843 * failed, we should inform NIC driver. 6844 */ 6845 handle->priv = NULL; 6846 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); 6847 } else { 6848 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; 6849 dev_info(dev, "Reset done, RoCE client reinit finished.\n"); 6850 } 6851 6852 return ret; 6853 } 6854 6855 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) 6856 { 6857 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) 6858 return 0; 6859 6860 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; 6861 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); 6862 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY); 6863 __hns_roce_hw_v2_uninit_instance(handle, false); 6864 6865 return 0; 6866 } 6867 6868 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle, 6869 enum hnae3_reset_notify_type type) 6870 { 6871 int ret = 0; 6872 6873 switch (type) { 6874 case HNAE3_DOWN_CLIENT: 6875 ret = hns_roce_hw_v2_reset_notify_down(handle); 6876 break; 6877 case HNAE3_INIT_CLIENT: 6878 ret = hns_roce_hw_v2_reset_notify_init(handle); 6879 break; 6880 case HNAE3_UNINIT_CLIENT: 6881 ret = hns_roce_hw_v2_reset_notify_uninit(handle); 6882 break; 6883 default: 6884 break; 6885 } 6886 6887 return ret; 6888 } 6889 6890 static const struct hnae3_client_ops hns_roce_hw_v2_ops = { 6891 .init_instance = hns_roce_hw_v2_init_instance, 6892 .uninit_instance = hns_roce_hw_v2_uninit_instance, 6893 .reset_notify = hns_roce_hw_v2_reset_notify, 6894 }; 6895 6896 static struct hnae3_client hns_roce_hw_v2_client = { 6897 .name = "hns_roce_hw_v2", 6898 .type = HNAE3_CLIENT_ROCE, 6899 .ops = &hns_roce_hw_v2_ops, 6900 }; 6901 6902 static int __init hns_roce_hw_v2_init(void) 6903 { 6904 return hnae3_register_client(&hns_roce_hw_v2_client); 6905 } 6906 6907 static void __exit hns_roce_hw_v2_exit(void) 6908 { 6909 hnae3_unregister_client(&hns_roce_hw_v2_client); 6910 } 6911 6912 module_init(hns_roce_hw_v2_init); 6913 module_exit(hns_roce_hw_v2_exit); 6914 6915 MODULE_LICENSE("Dual BSD/GPL"); 6916 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); 6917 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); 6918 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>"); 6919 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver"); 6920