1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/pci.h> 35 #include <rdma/ib_addr.h> 36 #include <rdma/ib_umem.h> 37 #include <rdma/uverbs_ioctl.h> 38 #include "hns_roce_common.h" 39 #include "hns_roce_device.h" 40 #include "hns_roce_hem.h" 41 42 static void flush_work_handle(struct work_struct *work) 43 { 44 struct hns_roce_work *flush_work = container_of(work, 45 struct hns_roce_work, work); 46 struct hns_roce_qp *hr_qp = container_of(flush_work, 47 struct hns_roce_qp, flush_work); 48 struct device *dev = flush_work->hr_dev->dev; 49 struct ib_qp_attr attr; 50 int attr_mask; 51 int ret; 52 53 attr_mask = IB_QP_STATE; 54 attr.qp_state = IB_QPS_ERR; 55 56 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { 57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); 58 if (ret) 59 dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n", 60 ret); 61 } 62 63 /* 64 * make sure we signal QP destroy leg that flush QP was completed 65 * so that it can safely proceed ahead now and destroy QP 66 */ 67 if (refcount_dec_and_test(&hr_qp->refcount)) 68 complete(&hr_qp->free); 69 } 70 71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 72 { 73 struct hns_roce_work *flush_work = &hr_qp->flush_work; 74 75 flush_work->hr_dev = hr_dev; 76 INIT_WORK(&flush_work->work, flush_work_handle); 77 refcount_inc(&hr_qp->refcount); 78 queue_work(hr_dev->irq_workq, &flush_work->work); 79 } 80 81 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) 82 { 83 /* 84 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state 85 * gets into errored mode. Hence, as a workaround to this 86 * hardware limitation, driver needs to assist in flushing. But 87 * the flushing operation uses mailbox to convey the QP state to 88 * the hardware and which can sleep due to the mutex protection 89 * around the mailbox calls. Hence, use the deferred flush for 90 * now. 91 */ 92 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) 93 init_flush_work(dev, qp); 94 } 95 96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 97 { 98 struct device *dev = hr_dev->dev; 99 struct hns_roce_qp *qp; 100 101 xa_lock(&hr_dev->qp_table_xa); 102 qp = __hns_roce_qp_lookup(hr_dev, qpn); 103 if (qp) 104 refcount_inc(&qp->refcount); 105 xa_unlock(&hr_dev->qp_table_xa); 106 107 if (!qp) { 108 dev_warn(dev, "async event for bogus QP %08x\n", qpn); 109 return; 110 } 111 112 if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 113 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 114 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || 115 event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || 116 event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { 117 qp->state = IB_QPS_ERR; 118 119 flush_cqe(hr_dev, qp); 120 } 121 122 qp->event(qp, (enum hns_roce_event)event_type); 123 124 if (refcount_dec_and_test(&qp->refcount)) 125 complete(&qp->free); 126 } 127 128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 129 enum hns_roce_event type) 130 { 131 struct ib_qp *ibqp = &hr_qp->ibqp; 132 struct ib_event event; 133 134 if (ibqp->event_handler) { 135 event.device = ibqp->device; 136 event.element.qp = ibqp; 137 switch (type) { 138 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 139 event.event = IB_EVENT_PATH_MIG; 140 break; 141 case HNS_ROCE_EVENT_TYPE_COMM_EST: 142 event.event = IB_EVENT_COMM_EST; 143 break; 144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 145 event.event = IB_EVENT_SQ_DRAINED; 146 break; 147 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 148 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 149 break; 150 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 151 event.event = IB_EVENT_QP_FATAL; 152 break; 153 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 154 event.event = IB_EVENT_PATH_MIG_ERR; 155 break; 156 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 157 event.event = IB_EVENT_QP_REQ_ERR; 158 break; 159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 160 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 161 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 162 event.event = IB_EVENT_QP_ACCESS_ERR; 163 break; 164 default: 165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 166 type, hr_qp->qpn); 167 return; 168 } 169 ibqp->event_handler(&event, ibqp->qp_context); 170 } 171 } 172 173 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank) 174 { 175 u32 least_load = bank[0].inuse; 176 u8 bankid = 0; 177 u32 bankcnt; 178 u8 i; 179 180 for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) { 181 bankcnt = bank[i].inuse; 182 if (bankcnt < least_load) { 183 least_load = bankcnt; 184 bankid = i; 185 } 186 } 187 188 return bankid; 189 } 190 191 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid, 192 unsigned long *qpn) 193 { 194 int id; 195 196 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); 197 if (id < 0) { 198 id = ida_alloc_range(&bank->ida, bank->min, bank->max, 199 GFP_KERNEL); 200 if (id < 0) 201 return id; 202 } 203 204 /* the QPN should keep increasing until the max value is reached. */ 205 bank->next = (id + 1) > bank->max ? bank->min : id + 1; 206 207 /* the lower 3 bits is bankid */ 208 *qpn = (id << 3) | bankid; 209 210 return 0; 211 } 212 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 213 { 214 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 215 unsigned long num = 0; 216 u8 bankid; 217 int ret; 218 219 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 220 num = 1; 221 } else { 222 mutex_lock(&qp_table->bank_mutex); 223 bankid = get_least_load_bankid_for_qp(qp_table->bank); 224 225 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, 226 &num); 227 if (ret) { 228 ibdev_err(&hr_dev->ib_dev, 229 "failed to alloc QPN, ret = %d\n", ret); 230 mutex_unlock(&qp_table->bank_mutex); 231 return ret; 232 } 233 234 qp_table->bank[bankid].inuse++; 235 mutex_unlock(&qp_table->bank_mutex); 236 } 237 238 hr_qp->qpn = num; 239 240 return 0; 241 } 242 243 static void add_qp_to_list(struct hns_roce_dev *hr_dev, 244 struct hns_roce_qp *hr_qp, 245 struct ib_cq *send_cq, struct ib_cq *recv_cq) 246 { 247 struct hns_roce_cq *hr_send_cq, *hr_recv_cq; 248 unsigned long flags; 249 250 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; 251 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; 252 253 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 254 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); 255 256 list_add_tail(&hr_qp->node, &hr_dev->qp_list); 257 if (hr_send_cq) 258 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); 259 if (hr_recv_cq) 260 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); 261 262 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); 263 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 264 } 265 266 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, 267 struct hns_roce_qp *hr_qp, 268 struct ib_qp_init_attr *init_attr) 269 { 270 struct xarray *xa = &hr_dev->qp_table_xa; 271 int ret; 272 273 if (!hr_qp->qpn) 274 return -EINVAL; 275 276 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); 277 if (ret) 278 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); 279 else 280 /* add QP to device's QP list for softwc */ 281 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, 282 init_attr->recv_cq); 283 284 return ret; 285 } 286 287 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 288 { 289 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 290 struct device *dev = hr_dev->dev; 291 int ret; 292 293 if (!hr_qp->qpn) 294 return -EINVAL; 295 296 /* Alloc memory for QPC */ 297 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 298 if (ret) { 299 dev_err(dev, "failed to get QPC table\n"); 300 goto err_out; 301 } 302 303 /* Alloc memory for IRRL */ 304 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 305 if (ret) { 306 dev_err(dev, "failed to get IRRL table\n"); 307 goto err_put_qp; 308 } 309 310 if (hr_dev->caps.trrl_entry_sz) { 311 /* Alloc memory for TRRL */ 312 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 313 hr_qp->qpn); 314 if (ret) { 315 dev_err(dev, "failed to get TRRL table\n"); 316 goto err_put_irrl; 317 } 318 } 319 320 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 321 /* Alloc memory for SCC CTX */ 322 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, 323 hr_qp->qpn); 324 if (ret) { 325 dev_err(dev, "failed to get SCC CTX table\n"); 326 goto err_put_trrl; 327 } 328 } 329 330 return 0; 331 332 err_put_trrl: 333 if (hr_dev->caps.trrl_entry_sz) 334 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 335 336 err_put_irrl: 337 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 338 339 err_put_qp: 340 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 341 342 err_out: 343 return ret; 344 } 345 346 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) 347 { 348 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); 349 } 350 351 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 352 { 353 struct xarray *xa = &hr_dev->qp_table_xa; 354 unsigned long flags; 355 356 list_del(&hr_qp->node); 357 358 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 359 list_del(&hr_qp->sq_node); 360 361 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && 362 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 363 list_del(&hr_qp->rq_node); 364 365 xa_lock_irqsave(xa, flags); 366 __xa_erase(xa, hr_qp->qpn); 367 xa_unlock_irqrestore(xa, flags); 368 } 369 370 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 371 { 372 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 373 374 if (hr_dev->caps.trrl_entry_sz) 375 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 376 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 377 } 378 379 static inline u8 get_qp_bankid(unsigned long qpn) 380 { 381 /* The lower 3 bits of QPN are used to hash to different banks */ 382 return (u8)(qpn & GENMASK(2, 0)); 383 } 384 385 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 386 { 387 u8 bankid; 388 389 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) 390 return; 391 392 if (hr_qp->qpn < hr_dev->caps.reserved_qps) 393 return; 394 395 bankid = get_qp_bankid(hr_qp->qpn); 396 397 ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); 398 399 mutex_lock(&hr_dev->qp_table.bank_mutex); 400 hr_dev->qp_table.bank[bankid].inuse--; 401 mutex_unlock(&hr_dev->qp_table.bank_mutex); 402 } 403 404 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, 405 bool user) 406 { 407 u32 max_sge = dev->caps.max_rq_sg; 408 409 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 410 return max_sge; 411 412 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will 413 * calculate number of max_sge with reserved SGEs when allocating wqe 414 * buf, so there is no need to do this again in kernel. But the number 415 * may exceed the capacity of SGEs recorded in the firmware, so the 416 * kernel driver should just adapt the value accordingly. 417 */ 418 if (user) 419 max_sge = roundup_pow_of_two(max_sge + 1); 420 else 421 hr_qp->rq.rsv_sge = 1; 422 423 return max_sge; 424 } 425 426 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, 427 struct hns_roce_qp *hr_qp, int has_rq, bool user) 428 { 429 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); 430 u32 cnt; 431 432 /* If srq exist, set zero for relative number of rq */ 433 if (!has_rq) { 434 hr_qp->rq.wqe_cnt = 0; 435 hr_qp->rq.max_gs = 0; 436 hr_qp->rq_inl_buf.wqe_cnt = 0; 437 cap->max_recv_wr = 0; 438 cap->max_recv_sge = 0; 439 440 return 0; 441 } 442 443 /* Check the validity of QP support capacity */ 444 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || 445 cap->max_recv_sge > max_sge) { 446 ibdev_err(&hr_dev->ib_dev, 447 "RQ config error, depth = %u, sge = %u\n", 448 cap->max_recv_wr, cap->max_recv_sge); 449 return -EINVAL; 450 } 451 452 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); 453 if (cnt > hr_dev->caps.max_wqes) { 454 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 455 cap->max_recv_wr); 456 return -EINVAL; 457 } 458 459 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + 460 hr_qp->rq.rsv_sge); 461 462 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 463 hr_qp->rq.max_gs); 464 465 hr_qp->rq.wqe_cnt = cnt; 466 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE && 467 hr_qp->ibqp.qp_type != IB_QPT_UD && 468 hr_qp->ibqp.qp_type != IB_QPT_GSI) 469 hr_qp->rq_inl_buf.wqe_cnt = cnt; 470 else 471 hr_qp->rq_inl_buf.wqe_cnt = 0; 472 473 cap->max_recv_wr = cnt; 474 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 475 476 return 0; 477 } 478 479 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, 480 struct ib_qp_cap *cap) 481 { 482 if (cap->max_inline_data) { 483 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); 484 return min(cap->max_inline_data, 485 hr_dev->caps.max_sq_inline); 486 } 487 488 return 0; 489 } 490 491 static void update_inline_data(struct hns_roce_qp *hr_qp, 492 struct ib_qp_cap *cap) 493 { 494 u32 sge_num = hr_qp->sq.ext_sge_cnt; 495 496 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 497 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || 498 hr_qp->ibqp.qp_type == IB_QPT_UD)) 499 sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num); 500 501 cap->max_inline_data = max(cap->max_inline_data, 502 sge_num * HNS_ROCE_SGE_SIZE); 503 } 504 505 hr_qp->max_inline_data = cap->max_inline_data; 506 } 507 508 static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi, 509 u32 max_send_sge) 510 { 511 unsigned int std_sge_num; 512 unsigned int min_sge; 513 514 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 515 min_sge = is_ud_or_gsi ? 1 : 0; 516 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : 517 min_sge; 518 } 519 520 static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, 521 u32 max_inline_data) 522 { 523 unsigned int inline_sge; 524 525 inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; 526 527 /* 528 * if max_inline_data less than 529 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, 530 * In addition to ud's mode, no need to extend sge. 531 */ 532 if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) 533 inline_sge = 0; 534 535 return inline_sge; 536 } 537 538 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, 539 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) 540 { 541 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || 542 hr_qp->ibqp.qp_type == IB_QPT_UD); 543 unsigned int std_sge_num; 544 u32 inline_ext_sge = 0; 545 u32 ext_wqe_sge_cnt; 546 u32 total_sge_cnt; 547 548 cap->max_inline_data = get_max_inline_data(hr_dev, cap); 549 550 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 551 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 552 ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi, 553 cap->max_send_sge); 554 555 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 556 inline_ext_sge = max(ext_wqe_sge_cnt, 557 get_sge_num_from_max_inl_data(is_ud_or_gsi, 558 cap->max_inline_data)); 559 hr_qp->sq.ext_sge_cnt = inline_ext_sge ? 560 roundup_pow_of_two(inline_ext_sge) : 0; 561 562 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); 563 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 564 565 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; 566 } else { 567 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); 568 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 569 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; 570 } 571 572 /* If the number of extended sge is not zero, they MUST use the 573 * space of HNS_HW_PAGE_SIZE at least. 574 */ 575 if (ext_wqe_sge_cnt) { 576 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt); 577 hr_qp->sge.sge_cnt = max(total_sge_cnt, 578 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); 579 } 580 581 update_inline_data(hr_qp, cap); 582 } 583 584 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, 585 struct ib_qp_cap *cap, 586 struct hns_roce_ib_create_qp *ucmd) 587 { 588 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 589 u8 max_sq_stride = ilog2(roundup_sq_stride); 590 591 /* Sanity check SQ size before proceeding */ 592 if (ucmd->log_sq_stride > max_sq_stride || 593 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 594 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); 595 return -EINVAL; 596 } 597 598 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 599 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", 600 cap->max_send_sge); 601 return -EINVAL; 602 } 603 604 return 0; 605 } 606 607 static int set_user_sq_size(struct hns_roce_dev *hr_dev, 608 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, 609 struct hns_roce_ib_create_qp *ucmd) 610 { 611 struct ib_device *ibdev = &hr_dev->ib_dev; 612 u32 cnt = 0; 613 int ret; 614 615 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || 616 cnt > hr_dev->caps.max_wqes) 617 return -EINVAL; 618 619 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); 620 if (ret) { 621 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", 622 ret); 623 return ret; 624 } 625 626 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 627 628 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 629 hr_qp->sq.wqe_cnt = cnt; 630 cap->max_send_sge = hr_qp->sq.max_gs; 631 632 return 0; 633 } 634 635 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, 636 struct hns_roce_qp *hr_qp, 637 struct hns_roce_buf_attr *buf_attr) 638 { 639 int buf_size; 640 int idx = 0; 641 642 hr_qp->buff_size = 0; 643 644 /* SQ WQE */ 645 hr_qp->sq.offset = 0; 646 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, 647 hr_qp->sq.wqe_shift); 648 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 649 buf_attr->region[idx].size = buf_size; 650 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; 651 idx++; 652 hr_qp->buff_size += buf_size; 653 } 654 655 /* extend SGE WQE in SQ */ 656 hr_qp->sge.offset = hr_qp->buff_size; 657 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, 658 hr_qp->sge.sge_shift); 659 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 660 buf_attr->region[idx].size = buf_size; 661 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; 662 idx++; 663 hr_qp->buff_size += buf_size; 664 } 665 666 /* RQ WQE */ 667 hr_qp->rq.offset = hr_qp->buff_size; 668 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, 669 hr_qp->rq.wqe_shift); 670 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 671 buf_attr->region[idx].size = buf_size; 672 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; 673 idx++; 674 hr_qp->buff_size += buf_size; 675 } 676 677 if (hr_qp->buff_size < 1) 678 return -EINVAL; 679 680 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 681 buf_attr->region_count = idx; 682 683 return 0; 684 } 685 686 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, 687 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) 688 { 689 struct ib_device *ibdev = &hr_dev->ib_dev; 690 u32 cnt; 691 692 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || 693 cap->max_send_sge > hr_dev->caps.max_sq_sg) { 694 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n"); 695 return -EINVAL; 696 } 697 698 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); 699 if (cnt > hr_dev->caps.max_wqes) { 700 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", 701 cnt); 702 return -EINVAL; 703 } 704 705 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 706 hr_qp->sq.wqe_cnt = cnt; 707 708 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 709 710 /* sync the parameters of kernel QP to user's configuration */ 711 cap->max_send_wr = cnt; 712 cap->max_send_sge = hr_qp->sq.max_gs; 713 714 return 0; 715 } 716 717 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 718 { 719 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) 720 return 0; 721 722 return 1; 723 } 724 725 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 726 { 727 if (attr->qp_type == IB_QPT_XRC_INI || 728 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 729 !attr->cap.max_recv_wr) 730 return 0; 731 732 return 1; 733 } 734 735 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, 736 struct ib_qp_init_attr *init_attr) 737 { 738 u32 max_recv_sge = init_attr->cap.max_recv_sge; 739 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; 740 struct hns_roce_rinl_wqe *wqe_list; 741 int i; 742 743 /* allocate recv inline buf */ 744 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), 745 GFP_KERNEL); 746 if (!wqe_list) 747 goto err; 748 749 /* Allocate a continuous buffer for all inline sge we need */ 750 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * 751 sizeof(struct hns_roce_rinl_sge)), 752 GFP_KERNEL); 753 if (!wqe_list[0].sg_list) 754 goto err_wqe_list; 755 756 /* Assign buffers of sg_list to each inline wqe */ 757 for (i = 1; i < wqe_cnt; i++) 758 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; 759 760 hr_qp->rq_inl_buf.wqe_list = wqe_list; 761 762 return 0; 763 764 err_wqe_list: 765 kfree(wqe_list); 766 767 err: 768 return -ENOMEM; 769 } 770 771 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) 772 { 773 if (hr_qp->rq_inl_buf.wqe_list) 774 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); 775 kfree(hr_qp->rq_inl_buf.wqe_list); 776 } 777 778 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 779 struct ib_qp_init_attr *init_attr, 780 struct ib_udata *udata, unsigned long addr) 781 { 782 struct ib_device *ibdev = &hr_dev->ib_dev; 783 struct hns_roce_buf_attr buf_attr = {}; 784 int ret; 785 786 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) { 787 ret = alloc_rq_inline_buf(hr_qp, init_attr); 788 if (ret) { 789 ibdev_err(ibdev, 790 "failed to alloc inline buf, ret = %d.\n", 791 ret); 792 return ret; 793 } 794 } else { 795 hr_qp->rq_inl_buf.wqe_list = NULL; 796 } 797 798 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); 799 if (ret) { 800 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); 801 goto err_inline; 802 } 803 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, 804 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, 805 udata, addr); 806 if (ret) { 807 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); 808 goto err_inline; 809 } 810 811 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) 812 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; 813 814 return 0; 815 816 err_inline: 817 free_rq_inline_buf(hr_qp); 818 819 return ret; 820 } 821 822 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 823 { 824 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); 825 free_rq_inline_buf(hr_qp); 826 } 827 828 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, 829 struct ib_qp_init_attr *init_attr, 830 struct ib_udata *udata, 831 struct hns_roce_ib_create_qp_resp *resp, 832 struct hns_roce_ib_create_qp *ucmd) 833 { 834 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 835 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 836 hns_roce_qp_has_sq(init_attr) && 837 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); 838 } 839 840 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, 841 struct ib_qp_init_attr *init_attr, 842 struct ib_udata *udata, 843 struct hns_roce_ib_create_qp_resp *resp) 844 { 845 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 846 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 847 hns_roce_qp_has_rq(init_attr)); 848 } 849 850 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, 851 struct ib_qp_init_attr *init_attr) 852 { 853 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 854 hns_roce_qp_has_rq(init_attr)); 855 } 856 857 static int qp_mmap_entry(struct hns_roce_qp *hr_qp, 858 struct hns_roce_dev *hr_dev, 859 struct ib_udata *udata, 860 struct hns_roce_ib_create_qp_resp *resp) 861 { 862 struct hns_roce_ucontext *uctx = 863 rdma_udata_to_drv_context(udata, 864 struct hns_roce_ucontext, ibucontext); 865 struct rdma_user_mmap_entry *rdma_entry; 866 u64 address; 867 868 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; 869 870 hr_qp->dwqe_mmap_entry = 871 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, 872 HNS_ROCE_DWQE_SIZE, 873 HNS_ROCE_MMAP_TYPE_DWQE); 874 875 if (!hr_qp->dwqe_mmap_entry) { 876 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); 877 return -ENOMEM; 878 } 879 880 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; 881 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); 882 883 return 0; 884 } 885 886 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, 887 struct hns_roce_qp *hr_qp, 888 struct ib_qp_init_attr *init_attr, 889 struct ib_udata *udata, 890 struct hns_roce_ib_create_qp *ucmd, 891 struct hns_roce_ib_create_qp_resp *resp) 892 { 893 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, 894 struct hns_roce_ucontext, ibucontext); 895 struct ib_device *ibdev = &hr_dev->ib_dev; 896 int ret; 897 898 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { 899 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); 900 if (ret) { 901 ibdev_err(ibdev, 902 "failed to map user SQ doorbell, ret = %d.\n", 903 ret); 904 goto err_out; 905 } 906 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 907 } 908 909 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { 910 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); 911 if (ret) { 912 ibdev_err(ibdev, 913 "failed to map user RQ doorbell, ret = %d.\n", 914 ret); 915 goto err_sdb; 916 } 917 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 918 } 919 920 return 0; 921 922 err_sdb: 923 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 924 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 925 err_out: 926 return ret; 927 } 928 929 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, 930 struct hns_roce_qp *hr_qp, 931 struct ib_qp_init_attr *init_attr) 932 { 933 struct ib_device *ibdev = &hr_dev->ib_dev; 934 int ret; 935 936 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 937 hr_qp->sq.db_reg = hr_dev->mem_base + 938 HNS_ROCE_DWQE_SIZE * hr_qp->qpn; 939 else 940 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + 941 DB_REG_OFFSET * hr_dev->priv_uar.index; 942 943 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + 944 DB_REG_OFFSET * hr_dev->priv_uar.index; 945 946 if (kernel_qp_has_rdb(hr_dev, init_attr)) { 947 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 948 if (ret) { 949 ibdev_err(ibdev, 950 "failed to alloc kernel RQ doorbell, ret = %d.\n", 951 ret); 952 return ret; 953 } 954 *hr_qp->rdb.db_record = 0; 955 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 956 } 957 958 return 0; 959 } 960 961 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 962 struct ib_qp_init_attr *init_attr, 963 struct ib_udata *udata, 964 struct hns_roce_ib_create_qp *ucmd, 965 struct hns_roce_ib_create_qp_resp *resp) 966 { 967 int ret; 968 969 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) 970 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; 971 972 if (udata) { 973 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { 974 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); 975 if (ret) 976 return ret; 977 } 978 979 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, 980 resp); 981 if (ret) 982 goto err_remove_qp; 983 } else { 984 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); 985 if (ret) 986 return ret; 987 } 988 989 return 0; 990 991 err_remove_qp: 992 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 993 qp_user_mmap_entry_remove(hr_qp); 994 995 return ret; 996 } 997 998 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 999 struct ib_udata *udata) 1000 { 1001 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 1002 udata, struct hns_roce_ucontext, ibucontext); 1003 1004 if (udata) { 1005 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1006 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 1007 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 1008 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 1009 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 1010 qp_user_mmap_entry_remove(hr_qp); 1011 } else { 1012 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1013 hns_roce_free_db(hr_dev, &hr_qp->rdb); 1014 } 1015 } 1016 1017 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, 1018 struct hns_roce_qp *hr_qp) 1019 { 1020 struct ib_device *ibdev = &hr_dev->ib_dev; 1021 u64 *sq_wrid = NULL; 1022 u64 *rq_wrid = NULL; 1023 int ret; 1024 1025 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); 1026 if (ZERO_OR_NULL_PTR(sq_wrid)) { 1027 ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); 1028 return -ENOMEM; 1029 } 1030 1031 if (hr_qp->rq.wqe_cnt) { 1032 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); 1033 if (ZERO_OR_NULL_PTR(rq_wrid)) { 1034 ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); 1035 ret = -ENOMEM; 1036 goto err_sq; 1037 } 1038 } 1039 1040 hr_qp->sq.wrid = sq_wrid; 1041 hr_qp->rq.wrid = rq_wrid; 1042 return 0; 1043 err_sq: 1044 kfree(sq_wrid); 1045 1046 return ret; 1047 } 1048 1049 static void free_kernel_wrid(struct hns_roce_qp *hr_qp) 1050 { 1051 kfree(hr_qp->rq.wrid); 1052 kfree(hr_qp->sq.wrid); 1053 } 1054 1055 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1056 struct ib_qp_init_attr *init_attr, 1057 struct ib_udata *udata, 1058 struct hns_roce_ib_create_qp *ucmd) 1059 { 1060 struct ib_device *ibdev = &hr_dev->ib_dev; 1061 struct hns_roce_ucontext *uctx; 1062 int ret; 1063 1064 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1065 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 1066 else 1067 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 1068 1069 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, 1070 hns_roce_qp_has_rq(init_attr), !!udata); 1071 if (ret) { 1072 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", 1073 ret); 1074 return ret; 1075 } 1076 1077 if (udata) { 1078 ret = ib_copy_from_udata(ucmd, udata, 1079 min(udata->inlen, sizeof(*ucmd))); 1080 if (ret) { 1081 ibdev_err(ibdev, 1082 "failed to copy QP ucmd, ret = %d\n", ret); 1083 return ret; 1084 } 1085 1086 uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, 1087 ibucontext); 1088 hr_qp->config = uctx->config; 1089 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); 1090 if (ret) 1091 ibdev_err(ibdev, 1092 "failed to set user SQ size, ret = %d.\n", 1093 ret); 1094 } else { 1095 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 1096 hr_qp->config = HNS_ROCE_EXSGE_FLAGS; 1097 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); 1098 if (ret) 1099 ibdev_err(ibdev, 1100 "failed to set kernel SQ size, ret = %d.\n", 1101 ret); 1102 } 1103 1104 return ret; 1105 } 1106 1107 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 1108 struct ib_pd *ib_pd, 1109 struct ib_qp_init_attr *init_attr, 1110 struct ib_udata *udata, 1111 struct hns_roce_qp *hr_qp) 1112 { 1113 struct hns_roce_ib_create_qp_resp resp = {}; 1114 struct ib_device *ibdev = &hr_dev->ib_dev; 1115 struct hns_roce_ib_create_qp ucmd; 1116 int ret; 1117 1118 mutex_init(&hr_qp->mutex); 1119 spin_lock_init(&hr_qp->sq.lock); 1120 spin_lock_init(&hr_qp->rq.lock); 1121 1122 hr_qp->state = IB_QPS_RESET; 1123 hr_qp->flush_flag = 0; 1124 1125 if (init_attr->create_flags) 1126 return -EOPNOTSUPP; 1127 1128 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); 1129 if (ret) { 1130 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); 1131 return ret; 1132 } 1133 1134 if (!udata) { 1135 ret = alloc_kernel_wrid(hr_dev, hr_qp); 1136 if (ret) { 1137 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", 1138 ret); 1139 return ret; 1140 } 1141 } 1142 1143 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); 1144 if (ret) { 1145 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); 1146 goto err_buf; 1147 } 1148 1149 ret = alloc_qpn(hr_dev, hr_qp); 1150 if (ret) { 1151 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); 1152 goto err_qpn; 1153 } 1154 1155 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); 1156 if (ret) { 1157 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", 1158 ret); 1159 goto err_db; 1160 } 1161 1162 ret = alloc_qpc(hr_dev, hr_qp); 1163 if (ret) { 1164 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", 1165 ret); 1166 goto err_qpc; 1167 } 1168 1169 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); 1170 if (ret) { 1171 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); 1172 goto err_store; 1173 } 1174 1175 if (udata) { 1176 resp.cap_flags = hr_qp->en_flags; 1177 ret = ib_copy_to_udata(udata, &resp, 1178 min(udata->outlen, sizeof(resp))); 1179 if (ret) { 1180 ibdev_err(ibdev, "copy qp resp failed!\n"); 1181 goto err_store; 1182 } 1183 } 1184 1185 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 1186 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); 1187 if (ret) 1188 goto err_flow_ctrl; 1189 } 1190 1191 hr_qp->ibqp.qp_num = hr_qp->qpn; 1192 hr_qp->event = hns_roce_ib_qp_event; 1193 refcount_set(&hr_qp->refcount, 1); 1194 init_completion(&hr_qp->free); 1195 1196 return 0; 1197 1198 err_flow_ctrl: 1199 hns_roce_qp_remove(hr_dev, hr_qp); 1200 err_store: 1201 free_qpc(hr_dev, hr_qp); 1202 err_qpc: 1203 free_qp_db(hr_dev, hr_qp, udata); 1204 err_db: 1205 free_qpn(hr_dev, hr_qp); 1206 err_qpn: 1207 free_qp_buf(hr_dev, hr_qp); 1208 err_buf: 1209 free_kernel_wrid(hr_qp); 1210 return ret; 1211 } 1212 1213 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1214 struct ib_udata *udata) 1215 { 1216 if (refcount_dec_and_test(&hr_qp->refcount)) 1217 complete(&hr_qp->free); 1218 wait_for_completion(&hr_qp->free); 1219 1220 free_qpc(hr_dev, hr_qp); 1221 free_qpn(hr_dev, hr_qp); 1222 free_qp_buf(hr_dev, hr_qp); 1223 free_kernel_wrid(hr_qp); 1224 free_qp_db(hr_dev, hr_qp, udata); 1225 } 1226 1227 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, 1228 bool is_user) 1229 { 1230 switch (type) { 1231 case IB_QPT_XRC_INI: 1232 case IB_QPT_XRC_TGT: 1233 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) 1234 goto out; 1235 break; 1236 case IB_QPT_UD: 1237 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && 1238 is_user) 1239 goto out; 1240 break; 1241 case IB_QPT_RC: 1242 case IB_QPT_GSI: 1243 break; 1244 default: 1245 goto out; 1246 } 1247 1248 return 0; 1249 1250 out: 1251 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); 1252 1253 return -EOPNOTSUPP; 1254 } 1255 1256 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1257 struct ib_udata *udata) 1258 { 1259 struct ib_device *ibdev = qp->device; 1260 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 1261 struct hns_roce_qp *hr_qp = to_hr_qp(qp); 1262 struct ib_pd *pd = qp->pd; 1263 int ret; 1264 1265 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); 1266 if (ret) 1267 return ret; 1268 1269 if (init_attr->qp_type == IB_QPT_XRC_TGT) 1270 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; 1271 1272 if (init_attr->qp_type == IB_QPT_GSI) { 1273 hr_qp->port = init_attr->port_num - 1; 1274 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 1275 } 1276 1277 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); 1278 if (ret) 1279 ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", 1280 init_attr->qp_type, ret); 1281 1282 return ret; 1283 } 1284 1285 int to_hr_qp_type(int qp_type) 1286 { 1287 switch (qp_type) { 1288 case IB_QPT_RC: 1289 return SERV_TYPE_RC; 1290 case IB_QPT_UD: 1291 case IB_QPT_GSI: 1292 return SERV_TYPE_UD; 1293 case IB_QPT_XRC_INI: 1294 case IB_QPT_XRC_TGT: 1295 return SERV_TYPE_XRC; 1296 default: 1297 return -1; 1298 } 1299 } 1300 1301 static int check_mtu_validate(struct hns_roce_dev *hr_dev, 1302 struct hns_roce_qp *hr_qp, 1303 struct ib_qp_attr *attr, int attr_mask) 1304 { 1305 enum ib_mtu active_mtu; 1306 int p; 1307 1308 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1309 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1310 1311 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && 1312 attr->path_mtu > hr_dev->caps.max_mtu) || 1313 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { 1314 ibdev_err(&hr_dev->ib_dev, 1315 "attr path_mtu(%d)invalid while modify qp", 1316 attr->path_mtu); 1317 return -EINVAL; 1318 } 1319 1320 return 0; 1321 } 1322 1323 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1324 int attr_mask) 1325 { 1326 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1327 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1328 int p; 1329 1330 if ((attr_mask & IB_QP_PORT) && 1331 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 1332 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", 1333 attr->port_num); 1334 return -EINVAL; 1335 } 1336 1337 if (attr_mask & IB_QP_PKEY_INDEX) { 1338 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1339 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 1340 ibdev_err(&hr_dev->ib_dev, 1341 "invalid attr, pkey_index = %u.\n", 1342 attr->pkey_index); 1343 return -EINVAL; 1344 } 1345 } 1346 1347 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1348 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1349 ibdev_err(&hr_dev->ib_dev, 1350 "invalid attr, max_rd_atomic = %u.\n", 1351 attr->max_rd_atomic); 1352 return -EINVAL; 1353 } 1354 1355 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1356 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1357 ibdev_err(&hr_dev->ib_dev, 1358 "invalid attr, max_dest_rd_atomic = %u.\n", 1359 attr->max_dest_rd_atomic); 1360 return -EINVAL; 1361 } 1362 1363 if (attr_mask & IB_QP_PATH_MTU) 1364 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); 1365 1366 return 0; 1367 } 1368 1369 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1370 int attr_mask, struct ib_udata *udata) 1371 { 1372 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1373 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1374 enum ib_qp_state cur_state, new_state; 1375 int ret = -EINVAL; 1376 1377 mutex_lock(&hr_qp->mutex); 1378 1379 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) 1380 goto out; 1381 1382 cur_state = hr_qp->state; 1383 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1384 1385 if (ibqp->uobject && 1386 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 1387 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { 1388 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 1389 1390 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1391 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 1392 } else { 1393 ibdev_warn(&hr_dev->ib_dev, 1394 "flush cqe is not supported in userspace!\n"); 1395 goto out; 1396 } 1397 } 1398 1399 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1400 attr_mask)) { 1401 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); 1402 goto out; 1403 } 1404 1405 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); 1406 if (ret) 1407 goto out; 1408 1409 if (cur_state == new_state && cur_state == IB_QPS_RESET) 1410 goto out; 1411 1412 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1413 new_state); 1414 1415 out: 1416 mutex_unlock(&hr_qp->mutex); 1417 1418 return ret; 1419 } 1420 1421 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1422 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1423 { 1424 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1425 __acquire(&send_cq->lock); 1426 __acquire(&recv_cq->lock); 1427 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1428 spin_lock_irq(&send_cq->lock); 1429 __acquire(&recv_cq->lock); 1430 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1431 spin_lock_irq(&recv_cq->lock); 1432 __acquire(&send_cq->lock); 1433 } else if (send_cq == recv_cq) { 1434 spin_lock_irq(&send_cq->lock); 1435 __acquire(&recv_cq->lock); 1436 } else if (send_cq->cqn < recv_cq->cqn) { 1437 spin_lock_irq(&send_cq->lock); 1438 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1439 } else { 1440 spin_lock_irq(&recv_cq->lock); 1441 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1442 } 1443 } 1444 1445 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1446 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1447 __releases(&recv_cq->lock) 1448 { 1449 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1450 __release(&recv_cq->lock); 1451 __release(&send_cq->lock); 1452 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1453 __release(&recv_cq->lock); 1454 spin_unlock(&send_cq->lock); 1455 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1456 __release(&send_cq->lock); 1457 spin_unlock(&recv_cq->lock); 1458 } else if (send_cq == recv_cq) { 1459 __release(&recv_cq->lock); 1460 spin_unlock_irq(&send_cq->lock); 1461 } else if (send_cq->cqn < recv_cq->cqn) { 1462 spin_unlock(&recv_cq->lock); 1463 spin_unlock_irq(&send_cq->lock); 1464 } else { 1465 spin_unlock(&send_cq->lock); 1466 spin_unlock_irq(&recv_cq->lock); 1467 } 1468 } 1469 1470 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) 1471 { 1472 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1473 } 1474 1475 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1476 { 1477 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1478 } 1479 1480 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1481 { 1482 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1483 } 1484 1485 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n) 1486 { 1487 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); 1488 } 1489 1490 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, 1491 struct ib_cq *ib_cq) 1492 { 1493 struct hns_roce_cq *hr_cq; 1494 u32 cur; 1495 1496 cur = hr_wq->head - hr_wq->tail; 1497 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1498 return false; 1499 1500 hr_cq = to_hr_cq(ib_cq); 1501 spin_lock(&hr_cq->lock); 1502 cur = hr_wq->head - hr_wq->tail; 1503 spin_unlock(&hr_cq->lock); 1504 1505 return cur + nreq >= hr_wq->wqe_cnt; 1506 } 1507 1508 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1509 { 1510 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1511 unsigned int reserved_from_bot; 1512 unsigned int i; 1513 1514 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps, 1515 sizeof(u32), GFP_KERNEL); 1516 if (!qp_table->idx_table.spare_idx) 1517 return -ENOMEM; 1518 1519 mutex_init(&qp_table->scc_mutex); 1520 mutex_init(&qp_table->bank_mutex); 1521 xa_init(&hr_dev->qp_table_xa); 1522 1523 reserved_from_bot = hr_dev->caps.reserved_qps; 1524 1525 for (i = 0; i < reserved_from_bot; i++) { 1526 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; 1527 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; 1528 } 1529 1530 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 1531 ida_init(&hr_dev->qp_table.bank[i].ida); 1532 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / 1533 HNS_ROCE_QP_BANK_NUM - 1; 1534 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; 1535 } 1536 1537 return 0; 1538 } 1539 1540 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1541 { 1542 int i; 1543 1544 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) 1545 ida_destroy(&hr_dev->qp_table.bank[i].ida); 1546 kfree(hr_dev->qp_table.idx_table.spare_idx); 1547 } 1548