1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/pci.h> 35 #include <linux/platform_device.h> 36 #include <rdma/ib_addr.h> 37 #include <rdma/ib_umem.h> 38 #include <rdma/uverbs_ioctl.h> 39 #include "hns_roce_common.h" 40 #include "hns_roce_device.h" 41 #include "hns_roce_hem.h" 42 43 static void flush_work_handle(struct work_struct *work) 44 { 45 struct hns_roce_work *flush_work = container_of(work, 46 struct hns_roce_work, work); 47 struct hns_roce_qp *hr_qp = container_of(flush_work, 48 struct hns_roce_qp, flush_work); 49 struct device *dev = flush_work->hr_dev->dev; 50 struct ib_qp_attr attr; 51 int attr_mask; 52 int ret; 53 54 attr_mask = IB_QP_STATE; 55 attr.qp_state = IB_QPS_ERR; 56 57 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { 58 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); 59 if (ret) 60 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", 61 ret); 62 } 63 64 /* 65 * make sure we signal QP destroy leg that flush QP was completed 66 * so that it can safely proceed ahead now and destroy QP 67 */ 68 if (atomic_dec_and_test(&hr_qp->refcount)) 69 complete(&hr_qp->free); 70 } 71 72 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 73 { 74 struct hns_roce_work *flush_work = &hr_qp->flush_work; 75 76 flush_work->hr_dev = hr_dev; 77 INIT_WORK(&flush_work->work, flush_work_handle); 78 atomic_inc(&hr_qp->refcount); 79 queue_work(hr_dev->irq_workq, &flush_work->work); 80 } 81 82 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 83 { 84 struct device *dev = hr_dev->dev; 85 struct hns_roce_qp *qp; 86 87 xa_lock(&hr_dev->qp_table_xa); 88 qp = __hns_roce_qp_lookup(hr_dev, qpn); 89 if (qp) 90 atomic_inc(&qp->refcount); 91 xa_unlock(&hr_dev->qp_table_xa); 92 93 if (!qp) { 94 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); 95 return; 96 } 97 98 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && 99 (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 100 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 101 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) { 102 qp->state = IB_QPS_ERR; 103 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) 104 init_flush_work(hr_dev, qp); 105 } 106 107 qp->event(qp, (enum hns_roce_event)event_type); 108 109 if (atomic_dec_and_test(&qp->refcount)) 110 complete(&qp->free); 111 } 112 113 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 114 enum hns_roce_event type) 115 { 116 struct ib_qp *ibqp = &hr_qp->ibqp; 117 struct ib_event event; 118 119 if (ibqp->event_handler) { 120 event.device = ibqp->device; 121 event.element.qp = ibqp; 122 switch (type) { 123 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 124 event.event = IB_EVENT_PATH_MIG; 125 break; 126 case HNS_ROCE_EVENT_TYPE_COMM_EST: 127 event.event = IB_EVENT_COMM_EST; 128 break; 129 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 130 event.event = IB_EVENT_SQ_DRAINED; 131 break; 132 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 133 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 134 break; 135 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 136 event.event = IB_EVENT_QP_FATAL; 137 break; 138 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 139 event.event = IB_EVENT_PATH_MIG_ERR; 140 break; 141 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 142 event.event = IB_EVENT_QP_REQ_ERR; 143 break; 144 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 145 event.event = IB_EVENT_QP_ACCESS_ERR; 146 break; 147 default: 148 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 149 type, hr_qp->qpn); 150 return; 151 } 152 ibqp->event_handler(&event, ibqp->qp_context); 153 } 154 } 155 156 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank) 157 { 158 u32 least_load = bank[0].inuse; 159 u8 bankid = 0; 160 u32 bankcnt; 161 u8 i; 162 163 for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) { 164 bankcnt = bank[i].inuse; 165 if (bankcnt < least_load) { 166 least_load = bankcnt; 167 bankid = i; 168 } 169 } 170 171 return bankid; 172 } 173 174 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid, 175 unsigned long *qpn) 176 { 177 int id; 178 179 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); 180 if (id < 0) { 181 id = ida_alloc_range(&bank->ida, bank->min, bank->max, 182 GFP_KERNEL); 183 if (id < 0) 184 return id; 185 } 186 187 /* the QPN should keep increasing until the max value is reached. */ 188 bank->next = (id + 1) > bank->max ? bank->min : id + 1; 189 190 /* the lower 3 bits is bankid */ 191 *qpn = (id << 3) | bankid; 192 193 return 0; 194 } 195 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 196 { 197 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 198 unsigned long num = 0; 199 u8 bankid; 200 int ret; 201 202 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 203 /* when hw version is v1, the sqpn is allocated */ 204 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) 205 num = HNS_ROCE_MAX_PORTS + 206 hr_dev->iboe.phy_port[hr_qp->port]; 207 else 208 num = 1; 209 210 hr_qp->doorbell_qpn = 1; 211 } else { 212 mutex_lock(&qp_table->bank_mutex); 213 bankid = get_least_load_bankid_for_qp(qp_table->bank); 214 215 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, 216 &num); 217 if (ret) { 218 ibdev_err(&hr_dev->ib_dev, 219 "failed to alloc QPN, ret = %d\n", ret); 220 mutex_unlock(&qp_table->bank_mutex); 221 return ret; 222 } 223 224 qp_table->bank[bankid].inuse++; 225 mutex_unlock(&qp_table->bank_mutex); 226 227 hr_qp->doorbell_qpn = (u32)num; 228 } 229 230 hr_qp->qpn = num; 231 232 return 0; 233 } 234 235 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) 236 { 237 switch (state) { 238 case IB_QPS_RESET: 239 return HNS_ROCE_QP_STATE_RST; 240 case IB_QPS_INIT: 241 return HNS_ROCE_QP_STATE_INIT; 242 case IB_QPS_RTR: 243 return HNS_ROCE_QP_STATE_RTR; 244 case IB_QPS_RTS: 245 return HNS_ROCE_QP_STATE_RTS; 246 case IB_QPS_SQD: 247 return HNS_ROCE_QP_STATE_SQD; 248 case IB_QPS_ERR: 249 return HNS_ROCE_QP_STATE_ERR; 250 default: 251 return HNS_ROCE_QP_NUM_STATE; 252 } 253 } 254 255 static void add_qp_to_list(struct hns_roce_dev *hr_dev, 256 struct hns_roce_qp *hr_qp, 257 struct ib_cq *send_cq, struct ib_cq *recv_cq) 258 { 259 struct hns_roce_cq *hr_send_cq, *hr_recv_cq; 260 unsigned long flags; 261 262 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; 263 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; 264 265 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 266 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); 267 268 list_add_tail(&hr_qp->node, &hr_dev->qp_list); 269 if (hr_send_cq) 270 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); 271 if (hr_recv_cq) 272 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); 273 274 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); 275 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 276 } 277 278 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, 279 struct hns_roce_qp *hr_qp, 280 struct ib_qp_init_attr *init_attr) 281 { 282 struct xarray *xa = &hr_dev->qp_table_xa; 283 int ret; 284 285 if (!hr_qp->qpn) 286 return -EINVAL; 287 288 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); 289 if (ret) 290 dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); 291 else 292 /* add QP to device's QP list for softwc */ 293 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, 294 init_attr->recv_cq); 295 296 return ret; 297 } 298 299 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 300 { 301 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 302 struct device *dev = hr_dev->dev; 303 int ret; 304 305 if (!hr_qp->qpn) 306 return -EINVAL; 307 308 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 309 if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 310 hr_dev->hw_rev == HNS_ROCE_HW_VER1) 311 return 0; 312 313 /* Alloc memory for QPC */ 314 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 315 if (ret) { 316 dev_err(dev, "Failed to get QPC table\n"); 317 goto err_out; 318 } 319 320 /* Alloc memory for IRRL */ 321 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 322 if (ret) { 323 dev_err(dev, "Failed to get IRRL table\n"); 324 goto err_put_qp; 325 } 326 327 if (hr_dev->caps.trrl_entry_sz) { 328 /* Alloc memory for TRRL */ 329 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 330 hr_qp->qpn); 331 if (ret) { 332 dev_err(dev, "Failed to get TRRL table\n"); 333 goto err_put_irrl; 334 } 335 } 336 337 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 338 /* Alloc memory for SCC CTX */ 339 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, 340 hr_qp->qpn); 341 if (ret) { 342 dev_err(dev, "Failed to get SCC CTX table\n"); 343 goto err_put_trrl; 344 } 345 } 346 347 return 0; 348 349 err_put_trrl: 350 if (hr_dev->caps.trrl_entry_sz) 351 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 352 353 err_put_irrl: 354 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 355 356 err_put_qp: 357 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 358 359 err_out: 360 return ret; 361 } 362 363 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 364 { 365 struct xarray *xa = &hr_dev->qp_table_xa; 366 unsigned long flags; 367 368 list_del(&hr_qp->node); 369 list_del(&hr_qp->sq_node); 370 list_del(&hr_qp->rq_node); 371 372 xa_lock_irqsave(xa, flags); 373 __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); 374 xa_unlock_irqrestore(xa, flags); 375 } 376 377 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 378 { 379 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 380 381 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ 382 if (hr_qp->ibqp.qp_type == IB_QPT_GSI && 383 hr_dev->hw_rev == HNS_ROCE_HW_VER1) 384 return; 385 386 if (hr_dev->caps.trrl_entry_sz) 387 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 388 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 389 } 390 391 static inline u8 get_qp_bankid(unsigned long qpn) 392 { 393 /* The lower 3 bits of QPN are used to hash to different banks */ 394 return (u8)(qpn & GENMASK(2, 0)); 395 } 396 397 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 398 { 399 u8 bankid; 400 401 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) 402 return; 403 404 if (hr_qp->qpn < hr_dev->caps.reserved_qps) 405 return; 406 407 bankid = get_qp_bankid(hr_qp->qpn); 408 409 ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); 410 411 mutex_lock(&hr_dev->qp_table.bank_mutex); 412 hr_dev->qp_table.bank[bankid].inuse--; 413 mutex_unlock(&hr_dev->qp_table.bank_mutex); 414 } 415 416 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, 417 bool user) 418 { 419 u32 max_sge = dev->caps.max_rq_sg; 420 421 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 422 return max_sge; 423 424 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will 425 * calculate number of max_sge with reserved SGEs when allocating wqe 426 * buf, so there is no need to do this again in kernel. But the number 427 * may exceed the capacity of SGEs recorded in the firmware, so the 428 * kernel driver should just adapt the value accordingly. 429 */ 430 if (user) 431 max_sge = roundup_pow_of_two(max_sge + 1); 432 else 433 hr_qp->rq.rsv_sge = 1; 434 435 return max_sge; 436 } 437 438 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, 439 struct hns_roce_qp *hr_qp, int has_rq, bool user) 440 { 441 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); 442 u32 cnt; 443 444 /* If srq exist, set zero for relative number of rq */ 445 if (!has_rq) { 446 hr_qp->rq.wqe_cnt = 0; 447 hr_qp->rq.max_gs = 0; 448 hr_qp->rq_inl_buf.wqe_cnt = 0; 449 cap->max_recv_wr = 0; 450 cap->max_recv_sge = 0; 451 452 return 0; 453 } 454 455 /* Check the validity of QP support capacity */ 456 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || 457 cap->max_recv_sge > max_sge) { 458 ibdev_err(&hr_dev->ib_dev, 459 "RQ config error, depth = %u, sge = %u\n", 460 cap->max_recv_wr, cap->max_recv_sge); 461 return -EINVAL; 462 } 463 464 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); 465 if (cnt > hr_dev->caps.max_wqes) { 466 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 467 cap->max_recv_wr); 468 return -EINVAL; 469 } 470 471 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + 472 hr_qp->rq.rsv_sge); 473 474 if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) 475 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); 476 else 477 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 478 hr_qp->rq.max_gs); 479 480 hr_qp->rq.wqe_cnt = cnt; 481 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) 482 hr_qp->rq_inl_buf.wqe_cnt = cnt; 483 else 484 hr_qp->rq_inl_buf.wqe_cnt = 0; 485 486 cap->max_recv_wr = cnt; 487 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 488 489 return 0; 490 } 491 492 static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp) 493 { 494 /* GSI/UD QP only has extended sge */ 495 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) 496 return qp->sq.max_gs; 497 498 if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) 499 return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE; 500 501 return 0; 502 } 503 504 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, 505 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) 506 { 507 u32 total_sge_cnt; 508 u32 wqe_sge_cnt; 509 510 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 511 512 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 513 hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; 514 return; 515 } 516 517 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); 518 519 wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); 520 521 /* If the number of extended sge is not zero, they MUST use the 522 * space of HNS_HW_PAGE_SIZE at least. 523 */ 524 if (wqe_sge_cnt) { 525 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt); 526 hr_qp->sge.sge_cnt = max(total_sge_cnt, 527 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); 528 } 529 } 530 531 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, 532 struct ib_qp_cap *cap, 533 struct hns_roce_ib_create_qp *ucmd) 534 { 535 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 536 u8 max_sq_stride = ilog2(roundup_sq_stride); 537 538 /* Sanity check SQ size before proceeding */ 539 if (ucmd->log_sq_stride > max_sq_stride || 540 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 541 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); 542 return -EINVAL; 543 } 544 545 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 546 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", 547 cap->max_send_sge); 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 static int set_user_sq_size(struct hns_roce_dev *hr_dev, 555 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, 556 struct hns_roce_ib_create_qp *ucmd) 557 { 558 struct ib_device *ibdev = &hr_dev->ib_dev; 559 u32 cnt = 0; 560 int ret; 561 562 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || 563 cnt > hr_dev->caps.max_wqes) 564 return -EINVAL; 565 566 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); 567 if (ret) { 568 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", 569 ret); 570 return ret; 571 } 572 573 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 574 575 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 576 hr_qp->sq.wqe_cnt = cnt; 577 578 return 0; 579 } 580 581 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, 582 struct hns_roce_qp *hr_qp, 583 struct hns_roce_buf_attr *buf_attr) 584 { 585 int buf_size; 586 int idx = 0; 587 588 hr_qp->buff_size = 0; 589 590 /* SQ WQE */ 591 hr_qp->sq.offset = 0; 592 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, 593 hr_qp->sq.wqe_shift); 594 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 595 buf_attr->region[idx].size = buf_size; 596 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; 597 idx++; 598 hr_qp->buff_size += buf_size; 599 } 600 601 /* extend SGE WQE in SQ */ 602 hr_qp->sge.offset = hr_qp->buff_size; 603 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, 604 hr_qp->sge.sge_shift); 605 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 606 buf_attr->region[idx].size = buf_size; 607 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; 608 idx++; 609 hr_qp->buff_size += buf_size; 610 } 611 612 /* RQ WQE */ 613 hr_qp->rq.offset = hr_qp->buff_size; 614 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, 615 hr_qp->rq.wqe_shift); 616 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 617 buf_attr->region[idx].size = buf_size; 618 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; 619 idx++; 620 hr_qp->buff_size += buf_size; 621 } 622 623 if (hr_qp->buff_size < 1) 624 return -EINVAL; 625 626 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 627 buf_attr->region_count = idx; 628 629 return 0; 630 } 631 632 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, 633 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) 634 { 635 struct ib_device *ibdev = &hr_dev->ib_dev; 636 u32 cnt; 637 638 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || 639 cap->max_send_sge > hr_dev->caps.max_sq_sg) { 640 ibdev_err(ibdev, 641 "failed to check SQ WR or SGE num, ret = %d.\n", 642 -EINVAL); 643 return -EINVAL; 644 } 645 646 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); 647 if (cnt > hr_dev->caps.max_wqes) { 648 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", 649 cnt); 650 return -EINVAL; 651 } 652 653 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 654 hr_qp->sq.wqe_cnt = cnt; 655 656 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 657 658 /* sync the parameters of kernel QP to user's configuration */ 659 cap->max_send_wr = cnt; 660 cap->max_send_sge = hr_qp->sq.max_gs; 661 662 return 0; 663 } 664 665 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 666 { 667 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) 668 return 0; 669 670 return 1; 671 } 672 673 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 674 { 675 if (attr->qp_type == IB_QPT_XRC_INI || 676 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 677 !attr->cap.max_recv_wr) 678 return 0; 679 680 return 1; 681 } 682 683 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, 684 struct ib_qp_init_attr *init_attr) 685 { 686 u32 max_recv_sge = init_attr->cap.max_recv_sge; 687 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; 688 struct hns_roce_rinl_wqe *wqe_list; 689 int i; 690 691 /* allocate recv inline buf */ 692 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), 693 GFP_KERNEL); 694 695 if (!wqe_list) 696 goto err; 697 698 /* Allocate a continuous buffer for all inline sge we need */ 699 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * 700 sizeof(struct hns_roce_rinl_sge)), 701 GFP_KERNEL); 702 if (!wqe_list[0].sg_list) 703 goto err_wqe_list; 704 705 /* Assign buffers of sg_list to each inline wqe */ 706 for (i = 1; i < wqe_cnt; i++) 707 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; 708 709 hr_qp->rq_inl_buf.wqe_list = wqe_list; 710 711 return 0; 712 713 err_wqe_list: 714 kfree(wqe_list); 715 716 err: 717 return -ENOMEM; 718 } 719 720 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) 721 { 722 if (hr_qp->rq_inl_buf.wqe_list) 723 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); 724 kfree(hr_qp->rq_inl_buf.wqe_list); 725 } 726 727 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 728 struct ib_qp_init_attr *init_attr, 729 struct ib_udata *udata, unsigned long addr) 730 { 731 struct ib_device *ibdev = &hr_dev->ib_dev; 732 struct hns_roce_buf_attr buf_attr = {}; 733 int ret; 734 735 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) { 736 ret = alloc_rq_inline_buf(hr_qp, init_attr); 737 if (ret) { 738 ibdev_err(ibdev, 739 "failed to alloc inline buf, ret = %d.\n", 740 ret); 741 return ret; 742 } 743 } else { 744 hr_qp->rq_inl_buf.wqe_list = NULL; 745 } 746 747 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); 748 if (ret) { 749 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); 750 goto err_inline; 751 } 752 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, 753 HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, 754 udata, addr); 755 if (ret) { 756 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); 757 goto err_inline; 758 } 759 760 return 0; 761 err_inline: 762 free_rq_inline_buf(hr_qp); 763 764 return ret; 765 } 766 767 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 768 { 769 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); 770 free_rq_inline_buf(hr_qp); 771 } 772 773 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, 774 struct ib_qp_init_attr *init_attr, 775 struct ib_udata *udata, 776 struct hns_roce_ib_create_qp_resp *resp, 777 struct hns_roce_ib_create_qp *ucmd) 778 { 779 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && 780 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 781 hns_roce_qp_has_sq(init_attr) && 782 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); 783 } 784 785 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, 786 struct ib_qp_init_attr *init_attr, 787 struct ib_udata *udata, 788 struct hns_roce_ib_create_qp_resp *resp) 789 { 790 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 791 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 792 hns_roce_qp_has_rq(init_attr)); 793 } 794 795 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, 796 struct ib_qp_init_attr *init_attr) 797 { 798 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 799 hns_roce_qp_has_rq(init_attr)); 800 } 801 802 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 803 struct ib_qp_init_attr *init_attr, 804 struct ib_udata *udata, 805 struct hns_roce_ib_create_qp *ucmd, 806 struct hns_roce_ib_create_qp_resp *resp) 807 { 808 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 809 udata, struct hns_roce_ucontext, ibucontext); 810 struct ib_device *ibdev = &hr_dev->ib_dev; 811 int ret; 812 813 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) 814 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; 815 816 if (udata) { 817 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { 818 ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, 819 &hr_qp->sdb); 820 if (ret) { 821 ibdev_err(ibdev, 822 "failed to map user SQ doorbell, ret = %d.\n", 823 ret); 824 goto err_out; 825 } 826 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 827 resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 828 } 829 830 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { 831 ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, 832 &hr_qp->rdb); 833 if (ret) { 834 ibdev_err(ibdev, 835 "failed to map user RQ doorbell, ret = %d.\n", 836 ret); 837 goto err_sdb; 838 } 839 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 840 resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 841 } 842 } else { 843 /* QP doorbell register address */ 844 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + 845 DB_REG_OFFSET * hr_dev->priv_uar.index; 846 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + 847 DB_REG_OFFSET * hr_dev->priv_uar.index; 848 849 if (kernel_qp_has_rdb(hr_dev, init_attr)) { 850 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 851 if (ret) { 852 ibdev_err(ibdev, 853 "failed to alloc kernel RQ doorbell, ret = %d.\n", 854 ret); 855 goto err_out; 856 } 857 *hr_qp->rdb.db_record = 0; 858 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 859 } 860 } 861 862 return 0; 863 err_sdb: 864 if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 865 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 866 err_out: 867 return ret; 868 } 869 870 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 871 struct ib_udata *udata) 872 { 873 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 874 udata, struct hns_roce_ucontext, ibucontext); 875 876 if (udata) { 877 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 878 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 879 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 880 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 881 } else { 882 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 883 hns_roce_free_db(hr_dev, &hr_qp->rdb); 884 } 885 } 886 887 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, 888 struct hns_roce_qp *hr_qp) 889 { 890 struct ib_device *ibdev = &hr_dev->ib_dev; 891 u64 *sq_wrid = NULL; 892 u64 *rq_wrid = NULL; 893 int ret; 894 895 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); 896 if (ZERO_OR_NULL_PTR(sq_wrid)) { 897 ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); 898 return -ENOMEM; 899 } 900 901 if (hr_qp->rq.wqe_cnt) { 902 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); 903 if (ZERO_OR_NULL_PTR(rq_wrid)) { 904 ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); 905 ret = -ENOMEM; 906 goto err_sq; 907 } 908 } 909 910 hr_qp->sq.wrid = sq_wrid; 911 hr_qp->rq.wrid = rq_wrid; 912 return 0; 913 err_sq: 914 kfree(sq_wrid); 915 916 return ret; 917 } 918 919 static void free_kernel_wrid(struct hns_roce_qp *hr_qp) 920 { 921 kfree(hr_qp->rq.wrid); 922 kfree(hr_qp->sq.wrid); 923 } 924 925 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 926 struct ib_qp_init_attr *init_attr, 927 struct ib_udata *udata, 928 struct hns_roce_ib_create_qp *ucmd) 929 { 930 struct ib_device *ibdev = &hr_dev->ib_dev; 931 int ret; 932 933 hr_qp->ibqp.qp_type = init_attr->qp_type; 934 935 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline) 936 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline; 937 938 hr_qp->max_inline_data = init_attr->cap.max_inline_data; 939 940 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 941 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 942 else 943 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 944 945 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, 946 hns_roce_qp_has_rq(init_attr), !!udata); 947 if (ret) { 948 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", 949 ret); 950 return ret; 951 } 952 953 if (udata) { 954 ret = ib_copy_from_udata(ucmd, udata, 955 min(udata->inlen, sizeof(*ucmd))); 956 if (ret) { 957 ibdev_err(ibdev, 958 "failed to copy QP ucmd, ret = %d\n", ret); 959 return ret; 960 } 961 962 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); 963 if (ret) 964 ibdev_err(ibdev, 965 "failed to set user SQ size, ret = %d.\n", 966 ret); 967 } else { 968 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); 969 if (ret) 970 ibdev_err(ibdev, 971 "failed to set kernel SQ size, ret = %d.\n", 972 ret); 973 } 974 975 return ret; 976 } 977 978 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 979 struct ib_pd *ib_pd, 980 struct ib_qp_init_attr *init_attr, 981 struct ib_udata *udata, 982 struct hns_roce_qp *hr_qp) 983 { 984 struct hns_roce_ib_create_qp_resp resp = {}; 985 struct ib_device *ibdev = &hr_dev->ib_dev; 986 struct hns_roce_ib_create_qp ucmd; 987 int ret; 988 989 mutex_init(&hr_qp->mutex); 990 spin_lock_init(&hr_qp->sq.lock); 991 spin_lock_init(&hr_qp->rq.lock); 992 993 hr_qp->state = IB_QPS_RESET; 994 hr_qp->flush_flag = 0; 995 996 if (init_attr->create_flags) 997 return -EOPNOTSUPP; 998 999 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); 1000 if (ret) { 1001 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); 1002 return ret; 1003 } 1004 1005 if (!udata) { 1006 ret = alloc_kernel_wrid(hr_dev, hr_qp); 1007 if (ret) { 1008 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", 1009 ret); 1010 return ret; 1011 } 1012 } 1013 1014 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); 1015 if (ret) { 1016 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", 1017 ret); 1018 goto err_wrid; 1019 } 1020 1021 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); 1022 if (ret) { 1023 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); 1024 goto err_db; 1025 } 1026 1027 ret = alloc_qpn(hr_dev, hr_qp); 1028 if (ret) { 1029 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); 1030 goto err_buf; 1031 } 1032 1033 ret = alloc_qpc(hr_dev, hr_qp); 1034 if (ret) { 1035 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", 1036 ret); 1037 goto err_qpn; 1038 } 1039 1040 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); 1041 if (ret) { 1042 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); 1043 goto err_qpc; 1044 } 1045 1046 if (udata) { 1047 ret = ib_copy_to_udata(udata, &resp, 1048 min(udata->outlen, sizeof(resp))); 1049 if (ret) { 1050 ibdev_err(ibdev, "copy qp resp failed!\n"); 1051 goto err_store; 1052 } 1053 } 1054 1055 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 1056 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); 1057 if (ret) 1058 goto err_store; 1059 } 1060 1061 hr_qp->ibqp.qp_num = hr_qp->qpn; 1062 hr_qp->event = hns_roce_ib_qp_event; 1063 atomic_set(&hr_qp->refcount, 1); 1064 init_completion(&hr_qp->free); 1065 1066 return 0; 1067 1068 err_store: 1069 hns_roce_qp_remove(hr_dev, hr_qp); 1070 err_qpc: 1071 free_qpc(hr_dev, hr_qp); 1072 err_qpn: 1073 free_qpn(hr_dev, hr_qp); 1074 err_buf: 1075 free_qp_buf(hr_dev, hr_qp); 1076 err_db: 1077 free_qp_db(hr_dev, hr_qp, udata); 1078 err_wrid: 1079 free_kernel_wrid(hr_qp); 1080 return ret; 1081 } 1082 1083 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1084 struct ib_udata *udata) 1085 { 1086 if (atomic_dec_and_test(&hr_qp->refcount)) 1087 complete(&hr_qp->free); 1088 wait_for_completion(&hr_qp->free); 1089 1090 free_qpc(hr_dev, hr_qp); 1091 free_qpn(hr_dev, hr_qp); 1092 free_qp_buf(hr_dev, hr_qp); 1093 free_kernel_wrid(hr_qp); 1094 free_qp_db(hr_dev, hr_qp, udata); 1095 1096 kfree(hr_qp); 1097 } 1098 1099 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, 1100 bool is_user) 1101 { 1102 switch (type) { 1103 case IB_QPT_UD: 1104 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && 1105 is_user) 1106 goto out; 1107 fallthrough; 1108 case IB_QPT_RC: 1109 case IB_QPT_GSI: 1110 break; 1111 default: 1112 goto out; 1113 } 1114 1115 return 0; 1116 1117 out: 1118 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); 1119 1120 return -EOPNOTSUPP; 1121 } 1122 1123 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 1124 struct ib_qp_init_attr *init_attr, 1125 struct ib_udata *udata) 1126 { 1127 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1128 struct ib_device *ibdev = &hr_dev->ib_dev; 1129 struct hns_roce_qp *hr_qp; 1130 int ret; 1131 1132 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); 1133 if (ret) 1134 return ERR_PTR(ret); 1135 1136 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 1137 if (!hr_qp) 1138 return ERR_PTR(-ENOMEM); 1139 1140 if (init_attr->qp_type == IB_QPT_GSI) { 1141 hr_qp->port = init_attr->port_num - 1; 1142 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 1143 } 1144 1145 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); 1146 if (ret) { 1147 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n", 1148 init_attr->qp_type, ret); 1149 1150 kfree(hr_qp); 1151 return ERR_PTR(ret); 1152 } 1153 1154 return &hr_qp->ibqp; 1155 } 1156 1157 int to_hr_qp_type(int qp_type) 1158 { 1159 int transport_type; 1160 1161 if (qp_type == IB_QPT_RC) 1162 transport_type = SERV_TYPE_RC; 1163 else if (qp_type == IB_QPT_UC) 1164 transport_type = SERV_TYPE_UC; 1165 else if (qp_type == IB_QPT_UD) 1166 transport_type = SERV_TYPE_UD; 1167 else if (qp_type == IB_QPT_GSI) 1168 transport_type = SERV_TYPE_UD; 1169 else 1170 transport_type = -1; 1171 1172 return transport_type; 1173 } 1174 1175 static int check_mtu_validate(struct hns_roce_dev *hr_dev, 1176 struct hns_roce_qp *hr_qp, 1177 struct ib_qp_attr *attr, int attr_mask) 1178 { 1179 enum ib_mtu active_mtu; 1180 int p; 1181 1182 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1183 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1184 1185 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && 1186 attr->path_mtu > hr_dev->caps.max_mtu) || 1187 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { 1188 ibdev_err(&hr_dev->ib_dev, 1189 "attr path_mtu(%d)invalid while modify qp", 1190 attr->path_mtu); 1191 return -EINVAL; 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1198 int attr_mask) 1199 { 1200 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1201 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1202 int p; 1203 1204 if ((attr_mask & IB_QP_PORT) && 1205 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 1206 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", 1207 attr->port_num); 1208 return -EINVAL; 1209 } 1210 1211 if (attr_mask & IB_QP_PKEY_INDEX) { 1212 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1213 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 1214 ibdev_err(&hr_dev->ib_dev, 1215 "invalid attr, pkey_index = %u.\n", 1216 attr->pkey_index); 1217 return -EINVAL; 1218 } 1219 } 1220 1221 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1222 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1223 ibdev_err(&hr_dev->ib_dev, 1224 "invalid attr, max_rd_atomic = %u.\n", 1225 attr->max_rd_atomic); 1226 return -EINVAL; 1227 } 1228 1229 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1230 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1231 ibdev_err(&hr_dev->ib_dev, 1232 "invalid attr, max_dest_rd_atomic = %u.\n", 1233 attr->max_dest_rd_atomic); 1234 return -EINVAL; 1235 } 1236 1237 if (attr_mask & IB_QP_PATH_MTU) 1238 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); 1239 1240 return 0; 1241 } 1242 1243 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1244 int attr_mask, struct ib_udata *udata) 1245 { 1246 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1247 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1248 enum ib_qp_state cur_state, new_state; 1249 int ret = -EINVAL; 1250 1251 mutex_lock(&hr_qp->mutex); 1252 1253 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) 1254 goto out; 1255 1256 cur_state = hr_qp->state; 1257 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1258 1259 if (ibqp->uobject && 1260 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 1261 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { 1262 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 1263 1264 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1265 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 1266 } else { 1267 ibdev_warn(&hr_dev->ib_dev, 1268 "flush cqe is not supported in userspace!\n"); 1269 goto out; 1270 } 1271 } 1272 1273 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1274 attr_mask)) { 1275 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); 1276 goto out; 1277 } 1278 1279 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); 1280 if (ret) 1281 goto out; 1282 1283 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1284 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 1285 ret = -EPERM; 1286 ibdev_err(&hr_dev->ib_dev, 1287 "RST2RST state is not supported\n"); 1288 } else { 1289 ret = 0; 1290 } 1291 1292 goto out; 1293 } 1294 1295 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1296 new_state); 1297 1298 out: 1299 mutex_unlock(&hr_qp->mutex); 1300 1301 return ret; 1302 } 1303 1304 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1305 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1306 { 1307 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1308 __acquire(&send_cq->lock); 1309 __acquire(&recv_cq->lock); 1310 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1311 spin_lock_irq(&send_cq->lock); 1312 __acquire(&recv_cq->lock); 1313 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1314 spin_lock_irq(&recv_cq->lock); 1315 __acquire(&send_cq->lock); 1316 } else if (send_cq == recv_cq) { 1317 spin_lock_irq(&send_cq->lock); 1318 __acquire(&recv_cq->lock); 1319 } else if (send_cq->cqn < recv_cq->cqn) { 1320 spin_lock_irq(&send_cq->lock); 1321 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1322 } else { 1323 spin_lock_irq(&recv_cq->lock); 1324 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1325 } 1326 } 1327 1328 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1329 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1330 __releases(&recv_cq->lock) 1331 { 1332 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1333 __release(&recv_cq->lock); 1334 __release(&send_cq->lock); 1335 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1336 __release(&recv_cq->lock); 1337 spin_unlock(&send_cq->lock); 1338 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1339 __release(&send_cq->lock); 1340 spin_unlock(&recv_cq->lock); 1341 } else if (send_cq == recv_cq) { 1342 __release(&recv_cq->lock); 1343 spin_unlock_irq(&send_cq->lock); 1344 } else if (send_cq->cqn < recv_cq->cqn) { 1345 spin_unlock(&recv_cq->lock); 1346 spin_unlock_irq(&send_cq->lock); 1347 } else { 1348 spin_unlock(&send_cq->lock); 1349 spin_unlock_irq(&recv_cq->lock); 1350 } 1351 } 1352 1353 static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 1354 { 1355 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1356 } 1357 1358 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1359 { 1360 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1361 } 1362 1363 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1364 { 1365 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1366 } 1367 1368 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n) 1369 { 1370 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); 1371 } 1372 1373 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, 1374 struct ib_cq *ib_cq) 1375 { 1376 struct hns_roce_cq *hr_cq; 1377 u32 cur; 1378 1379 cur = hr_wq->head - hr_wq->tail; 1380 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1381 return false; 1382 1383 hr_cq = to_hr_cq(ib_cq); 1384 spin_lock(&hr_cq->lock); 1385 cur = hr_wq->head - hr_wq->tail; 1386 spin_unlock(&hr_cq->lock); 1387 1388 return cur + nreq >= hr_wq->wqe_cnt; 1389 } 1390 1391 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1392 { 1393 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1394 unsigned int reserved_from_bot; 1395 unsigned int i; 1396 1397 mutex_init(&qp_table->scc_mutex); 1398 mutex_init(&qp_table->bank_mutex); 1399 xa_init(&hr_dev->qp_table_xa); 1400 1401 reserved_from_bot = hr_dev->caps.reserved_qps; 1402 1403 for (i = 0; i < reserved_from_bot; i++) { 1404 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; 1405 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; 1406 } 1407 1408 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 1409 ida_init(&hr_dev->qp_table.bank[i].ida); 1410 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / 1411 HNS_ROCE_QP_BANK_NUM - 1; 1412 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; 1413 } 1414 1415 return 0; 1416 } 1417 1418 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1419 { 1420 int i; 1421 1422 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) 1423 ida_destroy(&hr_dev->qp_table.bank[i].ida); 1424 } 1425