1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/pci.h> 35 #include <linux/platform_device.h> 36 #include <rdma/ib_addr.h> 37 #include <rdma/ib_umem.h> 38 #include "hns_roce_common.h" 39 #include "hns_roce_device.h" 40 #include "hns_roce_hem.h" 41 #include <rdma/hns-abi.h> 42 43 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) 44 45 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 46 { 47 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 48 struct device *dev = hr_dev->dev; 49 struct hns_roce_qp *qp; 50 51 spin_lock(&qp_table->lock); 52 53 qp = __hns_roce_qp_lookup(hr_dev, qpn); 54 if (qp) 55 atomic_inc(&qp->refcount); 56 57 spin_unlock(&qp_table->lock); 58 59 if (!qp) { 60 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); 61 return; 62 } 63 64 qp->event(qp, (enum hns_roce_event)event_type); 65 66 if (atomic_dec_and_test(&qp->refcount)) 67 complete(&qp->free); 68 } 69 EXPORT_SYMBOL_GPL(hns_roce_qp_event); 70 71 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 72 enum hns_roce_event type) 73 { 74 struct ib_event event; 75 struct ib_qp *ibqp = &hr_qp->ibqp; 76 77 if (ibqp->event_handler) { 78 event.device = ibqp->device; 79 event.element.qp = ibqp; 80 switch (type) { 81 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 82 event.event = IB_EVENT_PATH_MIG; 83 break; 84 case HNS_ROCE_EVENT_TYPE_COMM_EST: 85 event.event = IB_EVENT_COMM_EST; 86 break; 87 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 88 event.event = IB_EVENT_SQ_DRAINED; 89 break; 90 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 91 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 92 break; 93 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 94 event.event = IB_EVENT_QP_FATAL; 95 break; 96 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 97 event.event = IB_EVENT_PATH_MIG_ERR; 98 break; 99 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 100 event.event = IB_EVENT_QP_REQ_ERR; 101 break; 102 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 103 event.event = IB_EVENT_QP_ACCESS_ERR; 104 break; 105 default: 106 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 107 type, hr_qp->qpn); 108 return; 109 } 110 ibqp->event_handler(&event, ibqp->qp_context); 111 } 112 } 113 114 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, 115 int align, unsigned long *base) 116 { 117 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 118 119 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, 120 base) ? 121 -ENOMEM : 122 0; 123 } 124 125 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) 126 { 127 switch (state) { 128 case IB_QPS_RESET: 129 return HNS_ROCE_QP_STATE_RST; 130 case IB_QPS_INIT: 131 return HNS_ROCE_QP_STATE_INIT; 132 case IB_QPS_RTR: 133 return HNS_ROCE_QP_STATE_RTR; 134 case IB_QPS_RTS: 135 return HNS_ROCE_QP_STATE_RTS; 136 case IB_QPS_SQD: 137 return HNS_ROCE_QP_STATE_SQD; 138 case IB_QPS_ERR: 139 return HNS_ROCE_QP_STATE_ERR; 140 default: 141 return HNS_ROCE_QP_NUM_STATE; 142 } 143 } 144 EXPORT_SYMBOL_GPL(to_hns_roce_state); 145 146 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 147 struct hns_roce_qp *hr_qp) 148 { 149 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 150 int ret; 151 152 if (!qpn) 153 return -EINVAL; 154 155 hr_qp->qpn = qpn; 156 157 spin_lock_irq(&qp_table->lock); 158 ret = radix_tree_insert(&hr_dev->qp_table_tree, 159 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 160 spin_unlock_irq(&qp_table->lock); 161 if (ret) { 162 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n"); 163 goto err_put_irrl; 164 } 165 166 atomic_set(&hr_qp->refcount, 1); 167 init_completion(&hr_qp->free); 168 169 return 0; 170 171 err_put_irrl: 172 173 return ret; 174 } 175 176 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 177 struct hns_roce_qp *hr_qp) 178 { 179 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 180 struct device *dev = hr_dev->dev; 181 int ret; 182 183 if (!qpn) 184 return -EINVAL; 185 186 hr_qp->qpn = qpn; 187 188 /* Alloc memory for QPC */ 189 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 190 if (ret) { 191 dev_err(dev, "QPC table get failed\n"); 192 goto err_out; 193 } 194 195 /* Alloc memory for IRRL */ 196 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 197 if (ret) { 198 dev_err(dev, "IRRL table get failed\n"); 199 goto err_put_qp; 200 } 201 202 if (hr_dev->caps.trrl_entry_sz) { 203 /* Alloc memory for TRRL */ 204 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 205 hr_qp->qpn); 206 if (ret) { 207 dev_err(dev, "TRRL table get failed\n"); 208 goto err_put_irrl; 209 } 210 } 211 212 spin_lock_irq(&qp_table->lock); 213 ret = radix_tree_insert(&hr_dev->qp_table_tree, 214 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 215 spin_unlock_irq(&qp_table->lock); 216 if (ret) { 217 dev_err(dev, "QPC radix_tree_insert failed\n"); 218 goto err_put_trrl; 219 } 220 221 atomic_set(&hr_qp->refcount, 1); 222 init_completion(&hr_qp->free); 223 224 return 0; 225 226 err_put_trrl: 227 if (hr_dev->caps.trrl_entry_sz) 228 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 229 230 err_put_irrl: 231 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 232 233 err_put_qp: 234 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 235 236 err_out: 237 return ret; 238 } 239 240 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 241 { 242 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 243 unsigned long flags; 244 245 spin_lock_irqsave(&qp_table->lock, flags); 246 radix_tree_delete(&hr_dev->qp_table_tree, 247 hr_qp->qpn & (hr_dev->caps.num_qps - 1)); 248 spin_unlock_irqrestore(&qp_table->lock, flags); 249 } 250 EXPORT_SYMBOL_GPL(hns_roce_qp_remove); 251 252 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 253 { 254 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 255 256 if (atomic_dec_and_test(&hr_qp->refcount)) 257 complete(&hr_qp->free); 258 wait_for_completion(&hr_qp->free); 259 260 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { 261 if (hr_dev->caps.trrl_entry_sz) 262 hns_roce_table_put(hr_dev, &qp_table->trrl_table, 263 hr_qp->qpn); 264 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 265 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 266 } 267 } 268 EXPORT_SYMBOL_GPL(hns_roce_qp_free); 269 270 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 271 int cnt) 272 { 273 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 274 275 if (base_qpn < SQP_NUM) 276 return; 277 278 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); 279 } 280 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp); 281 282 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, 283 struct ib_qp_cap *cap, int is_user, int has_srq, 284 struct hns_roce_qp *hr_qp) 285 { 286 struct device *dev = hr_dev->dev; 287 u32 max_cnt; 288 289 /* Check the validity of QP support capacity */ 290 if (cap->max_recv_wr > hr_dev->caps.max_wqes || 291 cap->max_recv_sge > hr_dev->caps.max_rq_sg) { 292 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", 293 cap->max_recv_wr, cap->max_recv_sge); 294 return -EINVAL; 295 } 296 297 /* If srq exit, set zero for relative number of rq */ 298 if (has_srq) { 299 if (cap->max_recv_wr) { 300 dev_dbg(dev, "srq no need config max_recv_wr\n"); 301 return -EINVAL; 302 } 303 304 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0; 305 } else { 306 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { 307 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); 308 return -EINVAL; 309 } 310 311 if (hr_dev->caps.min_wqes) 312 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes); 313 else 314 max_cnt = cap->max_recv_wr; 315 316 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); 317 318 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { 319 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n"); 320 return -EINVAL; 321 } 322 323 max_cnt = max(1U, cap->max_recv_sge); 324 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); 325 if (hr_dev->caps.max_rq_sg <= 2) 326 hr_qp->rq.wqe_shift = 327 ilog2(hr_dev->caps.max_rq_desc_sz); 328 else 329 hr_qp->rq.wqe_shift = 330 ilog2(hr_dev->caps.max_rq_desc_sz 331 * hr_qp->rq.max_gs); 332 } 333 334 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; 335 cap->max_recv_sge = hr_qp->rq.max_gs; 336 337 return 0; 338 } 339 340 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, 341 struct ib_qp_cap *cap, 342 struct hns_roce_qp *hr_qp, 343 struct hns_roce_ib_create_qp *ucmd) 344 { 345 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 346 u8 max_sq_stride = ilog2(roundup_sq_stride); 347 u32 ex_sge_num; 348 u32 page_size; 349 u32 max_cnt; 350 351 /* Sanity check SQ size before proceeding */ 352 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || 353 ucmd->log_sq_stride > max_sq_stride || 354 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 355 dev_err(hr_dev->dev, "check SQ size error!\n"); 356 return -EINVAL; 357 } 358 359 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 360 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", 361 cap->max_send_sge); 362 return -EINVAL; 363 } 364 365 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; 366 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 367 368 max_cnt = max(1U, cap->max_send_sge); 369 if (hr_dev->caps.max_sq_sg <= 2) 370 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); 371 else 372 hr_qp->sq.max_gs = max_cnt; 373 374 if (hr_qp->sq.max_gs > 2) 375 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * 376 (hr_qp->sq.max_gs - 2)); 377 378 if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { 379 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { 380 dev_err(hr_dev->dev, 381 "The extended sge cnt error! sge_cnt=%d\n", 382 hr_qp->sge.sge_cnt); 383 return -EINVAL; 384 } 385 } 386 387 hr_qp->sge.sge_shift = 4; 388 ex_sge_num = hr_qp->sge.sge_cnt; 389 390 /* Get buf size, SQ and RQ are aligned to page_szie */ 391 if (hr_dev->caps.max_sq_sg <= 2) { 392 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 393 hr_qp->rq.wqe_shift), PAGE_SIZE) + 394 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 395 hr_qp->sq.wqe_shift), PAGE_SIZE); 396 397 hr_qp->sq.offset = 0; 398 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 399 hr_qp->sq.wqe_shift), PAGE_SIZE); 400 } else { 401 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); 402 hr_qp->sge.sge_cnt = 403 max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num); 404 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 405 hr_qp->rq.wqe_shift), page_size) + 406 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << 407 hr_qp->sge.sge_shift), page_size) + 408 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 409 hr_qp->sq.wqe_shift), page_size); 410 411 hr_qp->sq.offset = 0; 412 if (ex_sge_num) { 413 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP( 414 (hr_qp->sq.wqe_cnt << 415 hr_qp->sq.wqe_shift), 416 page_size); 417 hr_qp->rq.offset = hr_qp->sge.offset + 418 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt << 419 hr_qp->sge.sge_shift), 420 page_size); 421 } else { 422 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP( 423 (hr_qp->sq.wqe_cnt << 424 hr_qp->sq.wqe_shift), 425 page_size); 426 } 427 } 428 429 return 0; 430 } 431 432 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, 433 struct ib_qp_cap *cap, 434 struct hns_roce_qp *hr_qp) 435 { 436 struct device *dev = hr_dev->dev; 437 u32 page_size; 438 u32 max_cnt; 439 int size; 440 441 if (cap->max_send_wr > hr_dev->caps.max_wqes || 442 cap->max_send_sge > hr_dev->caps.max_sq_sg || 443 cap->max_inline_data > hr_dev->caps.max_sq_inline) { 444 dev_err(dev, "SQ WR or sge or inline data error!\n"); 445 return -EINVAL; 446 } 447 448 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 449 hr_qp->sq_max_wqes_per_wr = 1; 450 hr_qp->sq_spare_wqes = 0; 451 452 if (hr_dev->caps.min_wqes) 453 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes); 454 else 455 max_cnt = cap->max_send_wr; 456 457 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); 458 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { 459 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n"); 460 return -EINVAL; 461 } 462 463 /* Get data_seg numbers */ 464 max_cnt = max(1U, cap->max_send_sge); 465 if (hr_dev->caps.max_sq_sg <= 2) 466 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); 467 else 468 hr_qp->sq.max_gs = max_cnt; 469 470 if (hr_qp->sq.max_gs > 2) { 471 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * 472 (hr_qp->sq.max_gs - 2)); 473 hr_qp->sge.sge_shift = 4; 474 } 475 476 /* ud sqwqe's sge use extend sge */ 477 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { 478 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * 479 hr_qp->sq.max_gs); 480 hr_qp->sge.sge_shift = 4; 481 } 482 483 if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { 484 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { 485 dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", 486 hr_qp->sge.sge_cnt); 487 return -EINVAL; 488 } 489 } 490 491 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ 492 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); 493 hr_qp->sq.offset = 0; 494 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, 495 page_size); 496 497 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { 498 hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), 499 (u32)hr_qp->sge.sge_cnt); 500 hr_qp->sge.offset = size; 501 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt << 502 hr_qp->sge.sge_shift, page_size); 503 } 504 505 hr_qp->rq.offset = size; 506 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), 507 page_size); 508 hr_qp->buff_size = size; 509 510 /* Get wr and sge number which send */ 511 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; 512 cap->max_send_sge = hr_qp->sq.max_gs; 513 514 /* We don't support inline sends for kernel QPs (yet) */ 515 cap->max_inline_data = 0; 516 517 return 0; 518 } 519 520 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 521 { 522 if (attr->qp_type == IB_QPT_XRC_TGT) 523 return 0; 524 525 return 1; 526 } 527 528 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 529 { 530 if (attr->qp_type == IB_QPT_XRC_INI || 531 attr->qp_type == IB_QPT_XRC_TGT || attr->srq) 532 return 0; 533 534 return 1; 535 } 536 537 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 538 struct ib_pd *ib_pd, 539 struct ib_qp_init_attr *init_attr, 540 struct ib_udata *udata, unsigned long sqpn, 541 struct hns_roce_qp *hr_qp) 542 { 543 struct device *dev = hr_dev->dev; 544 struct hns_roce_ib_create_qp ucmd; 545 struct hns_roce_ib_create_qp_resp resp = {}; 546 unsigned long qpn = 0; 547 int ret = 0; 548 u32 page_shift; 549 u32 npages; 550 int i; 551 552 mutex_init(&hr_qp->mutex); 553 spin_lock_init(&hr_qp->sq.lock); 554 spin_lock_init(&hr_qp->rq.lock); 555 556 hr_qp->state = IB_QPS_RESET; 557 558 hr_qp->ibqp.qp_type = init_attr->qp_type; 559 560 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 561 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); 562 else 563 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); 564 565 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, 566 !!init_attr->srq, hr_qp); 567 if (ret) { 568 dev_err(dev, "hns_roce_set_rq_size failed\n"); 569 goto err_out; 570 } 571 572 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { 573 /* allocate recv inline buf */ 574 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, 575 sizeof(struct hns_roce_rinl_wqe), 576 GFP_KERNEL); 577 if (!hr_qp->rq_inl_buf.wqe_list) { 578 ret = -ENOMEM; 579 goto err_out; 580 } 581 582 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt; 583 584 /* Firstly, allocate a list of sge space buffer */ 585 hr_qp->rq_inl_buf.wqe_list[0].sg_list = 586 kcalloc(hr_qp->rq_inl_buf.wqe_cnt, 587 init_attr->cap.max_recv_sge * 588 sizeof(struct hns_roce_rinl_sge), 589 GFP_KERNEL); 590 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) { 591 ret = -ENOMEM; 592 goto err_wqe_list; 593 } 594 595 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++) 596 /* Secondly, reallocate the buffer */ 597 hr_qp->rq_inl_buf.wqe_list[i].sg_list = 598 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i * 599 init_attr->cap.max_recv_sge]; 600 } 601 602 if (ib_pd->uobject) { 603 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 604 dev_err(dev, "ib_copy_from_udata error for create qp\n"); 605 ret = -EFAULT; 606 goto err_rq_sge_list; 607 } 608 609 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, 610 &ucmd); 611 if (ret) { 612 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); 613 goto err_rq_sge_list; 614 } 615 616 hr_qp->umem = ib_umem_get(ib_pd->uobject->context, 617 ucmd.buf_addr, hr_qp->buff_size, 0, 618 0); 619 if (IS_ERR(hr_qp->umem)) { 620 dev_err(dev, "ib_umem_get error for create qp\n"); 621 ret = PTR_ERR(hr_qp->umem); 622 goto err_rq_sge_list; 623 } 624 625 hr_qp->mtt.mtt_type = MTT_TYPE_WQE; 626 if (hr_dev->caps.mtt_buf_pg_sz) { 627 npages = (ib_umem_page_count(hr_qp->umem) + 628 (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) / 629 (1 << hr_dev->caps.mtt_buf_pg_sz); 630 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 631 ret = hns_roce_mtt_init(hr_dev, npages, 632 page_shift, 633 &hr_qp->mtt); 634 } else { 635 ret = hns_roce_mtt_init(hr_dev, 636 ib_umem_page_count(hr_qp->umem), 637 hr_qp->umem->page_shift, 638 &hr_qp->mtt); 639 } 640 if (ret) { 641 dev_err(dev, "hns_roce_mtt_init error for create qp\n"); 642 goto err_buf; 643 } 644 645 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, 646 hr_qp->umem); 647 if (ret) { 648 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); 649 goto err_mtt; 650 } 651 652 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && 653 (udata->inlen >= sizeof(ucmd)) && 654 (udata->outlen >= sizeof(resp)) && 655 hns_roce_qp_has_sq(init_attr)) { 656 ret = hns_roce_db_map_user( 657 to_hr_ucontext(ib_pd->uobject->context), 658 ucmd.sdb_addr, &hr_qp->sdb); 659 if (ret) { 660 dev_err(dev, "sq record doorbell map failed!\n"); 661 goto err_mtt; 662 } 663 664 /* indicate kernel supports sq record db */ 665 resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; 666 hr_qp->sdb_en = 1; 667 } 668 669 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 670 (udata->outlen >= sizeof(resp)) && 671 hns_roce_qp_has_rq(init_attr)) { 672 ret = hns_roce_db_map_user( 673 to_hr_ucontext(ib_pd->uobject->context), 674 ucmd.db_addr, &hr_qp->rdb); 675 if (ret) { 676 dev_err(dev, "rq record doorbell map failed!\n"); 677 goto err_sq_dbmap; 678 } 679 } 680 } else { 681 if (init_attr->create_flags & 682 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 683 dev_err(dev, "init_attr->create_flags error!\n"); 684 ret = -EINVAL; 685 goto err_rq_sge_list; 686 } 687 688 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { 689 dev_err(dev, "init_attr->create_flags error!\n"); 690 ret = -EINVAL; 691 goto err_rq_sge_list; 692 } 693 694 /* Set SQ size */ 695 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, 696 hr_qp); 697 if (ret) { 698 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); 699 goto err_rq_sge_list; 700 } 701 702 /* QP doorbell register address */ 703 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + 704 DB_REG_OFFSET * hr_dev->priv_uar.index; 705 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + 706 DB_REG_OFFSET * hr_dev->priv_uar.index; 707 708 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 709 hns_roce_qp_has_rq(init_attr)) { 710 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 711 if (ret) { 712 dev_err(dev, "rq record doorbell alloc failed!\n"); 713 goto err_rq_sge_list; 714 } 715 *hr_qp->rdb.db_record = 0; 716 hr_qp->rdb_en = 1; 717 } 718 719 /* Allocate QP buf */ 720 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 721 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, 722 (1 << page_shift) * 2, 723 &hr_qp->hr_buf, page_shift)) { 724 dev_err(dev, "hns_roce_buf_alloc error!\n"); 725 ret = -ENOMEM; 726 goto err_db; 727 } 728 729 hr_qp->mtt.mtt_type = MTT_TYPE_WQE; 730 /* Write MTT */ 731 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, 732 hr_qp->hr_buf.page_shift, &hr_qp->mtt); 733 if (ret) { 734 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); 735 goto err_buf; 736 } 737 738 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, 739 &hr_qp->hr_buf); 740 if (ret) { 741 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); 742 goto err_mtt; 743 } 744 745 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), 746 GFP_KERNEL); 747 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), 748 GFP_KERNEL); 749 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { 750 ret = -ENOMEM; 751 goto err_wrid; 752 } 753 } 754 755 if (sqpn) { 756 qpn = sqpn; 757 } else { 758 /* Get QPN */ 759 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); 760 if (ret) { 761 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); 762 goto err_wrid; 763 } 764 } 765 766 if (init_attr->qp_type == IB_QPT_GSI && 767 hr_dev->hw_rev == HNS_ROCE_HW_VER1) { 768 /* In v1 engine, GSI QP context in RoCE engine's register */ 769 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); 770 if (ret) { 771 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 772 goto err_qpn; 773 } 774 } else { 775 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); 776 if (ret) { 777 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 778 goto err_qpn; 779 } 780 } 781 782 if (sqpn) 783 hr_qp->doorbell_qpn = 1; 784 else 785 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); 786 787 if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && 788 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) { 789 790 /* indicate kernel supports rq record db */ 791 resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; 792 ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); 793 if (ret) 794 goto err_qp; 795 796 hr_qp->rdb_en = 1; 797 } 798 hr_qp->event = hns_roce_ib_qp_event; 799 800 return 0; 801 802 err_qp: 803 if (init_attr->qp_type == IB_QPT_GSI && 804 hr_dev->hw_rev == HNS_ROCE_HW_VER1) 805 hns_roce_qp_remove(hr_dev, hr_qp); 806 else 807 hns_roce_qp_free(hr_dev, hr_qp); 808 809 err_qpn: 810 if (!sqpn) 811 hns_roce_release_range_qp(hr_dev, qpn, 1); 812 813 err_wrid: 814 if (ib_pd->uobject) { 815 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && 816 (udata->outlen >= sizeof(resp)) && 817 hns_roce_qp_has_rq(init_attr)) 818 hns_roce_db_unmap_user( 819 to_hr_ucontext(ib_pd->uobject->context), 820 &hr_qp->rdb); 821 } else { 822 kfree(hr_qp->sq.wrid); 823 kfree(hr_qp->rq.wrid); 824 } 825 826 err_sq_dbmap: 827 if (ib_pd->uobject) 828 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && 829 (udata->inlen >= sizeof(ucmd)) && 830 (udata->outlen >= sizeof(resp)) && 831 hns_roce_qp_has_sq(init_attr)) 832 hns_roce_db_unmap_user( 833 to_hr_ucontext(ib_pd->uobject->context), 834 &hr_qp->sdb); 835 836 err_mtt: 837 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); 838 839 err_buf: 840 if (ib_pd->uobject) 841 ib_umem_release(hr_qp->umem); 842 else 843 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); 844 845 err_db: 846 if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) && 847 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) 848 hns_roce_free_db(hr_dev, &hr_qp->rdb); 849 850 err_rq_sge_list: 851 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) 852 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); 853 854 err_wqe_list: 855 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) 856 kfree(hr_qp->rq_inl_buf.wqe_list); 857 858 err_out: 859 return ret; 860 } 861 862 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 863 struct ib_qp_init_attr *init_attr, 864 struct ib_udata *udata) 865 { 866 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 867 struct device *dev = hr_dev->dev; 868 struct hns_roce_sqp *hr_sqp; 869 struct hns_roce_qp *hr_qp; 870 int ret; 871 872 switch (init_attr->qp_type) { 873 case IB_QPT_RC: { 874 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 875 if (!hr_qp) 876 return ERR_PTR(-ENOMEM); 877 878 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, 879 hr_qp); 880 if (ret) { 881 dev_err(dev, "Create RC QP failed\n"); 882 kfree(hr_qp); 883 return ERR_PTR(ret); 884 } 885 886 hr_qp->ibqp.qp_num = hr_qp->qpn; 887 888 break; 889 } 890 case IB_QPT_GSI: { 891 /* Userspace is not allowed to create special QPs: */ 892 if (pd->uobject) { 893 dev_err(dev, "not support usr space GSI\n"); 894 return ERR_PTR(-EINVAL); 895 } 896 897 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL); 898 if (!hr_sqp) 899 return ERR_PTR(-ENOMEM); 900 901 hr_qp = &hr_sqp->hr_qp; 902 hr_qp->port = init_attr->port_num - 1; 903 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 904 905 /* when hw version is v1, the sqpn is allocated */ 906 if (hr_dev->caps.max_sq_sg <= 2) 907 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + 908 hr_dev->iboe.phy_port[hr_qp->port]; 909 else 910 hr_qp->ibqp.qp_num = 1; 911 912 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 913 hr_qp->ibqp.qp_num, hr_qp); 914 if (ret) { 915 dev_err(dev, "Create GSI QP failed!\n"); 916 kfree(hr_sqp); 917 return ERR_PTR(ret); 918 } 919 920 break; 921 } 922 default:{ 923 dev_err(dev, "not support QP type %d\n", init_attr->qp_type); 924 return ERR_PTR(-EINVAL); 925 } 926 } 927 928 return &hr_qp->ibqp; 929 } 930 EXPORT_SYMBOL_GPL(hns_roce_create_qp); 931 932 int to_hr_qp_type(int qp_type) 933 { 934 int transport_type; 935 936 if (qp_type == IB_QPT_RC) 937 transport_type = SERV_TYPE_RC; 938 else if (qp_type == IB_QPT_UC) 939 transport_type = SERV_TYPE_UC; 940 else if (qp_type == IB_QPT_UD) 941 transport_type = SERV_TYPE_UD; 942 else if (qp_type == IB_QPT_GSI) 943 transport_type = SERV_TYPE_UD; 944 else 945 transport_type = -1; 946 947 return transport_type; 948 } 949 EXPORT_SYMBOL_GPL(to_hr_qp_type); 950 951 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 952 int attr_mask, struct ib_udata *udata) 953 { 954 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 955 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 956 enum ib_qp_state cur_state, new_state; 957 struct device *dev = hr_dev->dev; 958 int ret = -EINVAL; 959 int p; 960 enum ib_mtu active_mtu; 961 962 mutex_lock(&hr_qp->mutex); 963 964 cur_state = attr_mask & IB_QP_CUR_STATE ? 965 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; 966 new_state = attr_mask & IB_QP_STATE ? 967 attr->qp_state : cur_state; 968 969 if (ibqp->uobject && 970 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 971 if (hr_qp->sdb_en == 1) { 972 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 973 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 974 } else { 975 dev_warn(dev, "flush cqe is not supported in userspace!\n"); 976 goto out; 977 } 978 } 979 980 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 981 attr_mask)) { 982 dev_err(dev, "ib_modify_qp_is_ok failed\n"); 983 goto out; 984 } 985 986 if ((attr_mask & IB_QP_PORT) && 987 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 988 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", 989 attr->port_num); 990 goto out; 991 } 992 993 if (attr_mask & IB_QP_PKEY_INDEX) { 994 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 995 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 996 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", 997 attr->pkey_index); 998 goto out; 999 } 1000 } 1001 1002 if (attr_mask & IB_QP_PATH_MTU) { 1003 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1004 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1005 1006 if ((hr_dev->caps.max_mtu == IB_MTU_4096 && 1007 attr->path_mtu > IB_MTU_4096) || 1008 (hr_dev->caps.max_mtu == IB_MTU_2048 && 1009 attr->path_mtu > IB_MTU_2048) || 1010 attr->path_mtu < IB_MTU_256 || 1011 attr->path_mtu > active_mtu) { 1012 dev_err(dev, "attr path_mtu(%d)invalid while modify qp", 1013 attr->path_mtu); 1014 goto out; 1015 } 1016 } 1017 1018 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1019 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1020 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", 1021 attr->max_rd_atomic); 1022 goto out; 1023 } 1024 1025 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1026 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1027 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", 1028 attr->max_dest_rd_atomic); 1029 goto out; 1030 } 1031 1032 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1033 if (hr_dev->caps.min_wqes) { 1034 ret = -EPERM; 1035 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, 1036 new_state); 1037 } else { 1038 ret = 0; 1039 } 1040 1041 goto out; 1042 } 1043 1044 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1045 new_state); 1046 1047 out: 1048 mutex_unlock(&hr_qp->mutex); 1049 1050 return ret; 1051 } 1052 1053 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1054 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1055 { 1056 if (send_cq == recv_cq) { 1057 spin_lock_irq(&send_cq->lock); 1058 __acquire(&recv_cq->lock); 1059 } else if (send_cq->cqn < recv_cq->cqn) { 1060 spin_lock_irq(&send_cq->lock); 1061 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1062 } else { 1063 spin_lock_irq(&recv_cq->lock); 1064 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1065 } 1066 } 1067 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs); 1068 1069 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1070 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1071 __releases(&recv_cq->lock) 1072 { 1073 if (send_cq == recv_cq) { 1074 __release(&recv_cq->lock); 1075 spin_unlock_irq(&send_cq->lock); 1076 } else if (send_cq->cqn < recv_cq->cqn) { 1077 spin_unlock(&recv_cq->lock); 1078 spin_unlock_irq(&send_cq->lock); 1079 } else { 1080 spin_unlock(&send_cq->lock); 1081 spin_unlock_irq(&recv_cq->lock); 1082 } 1083 } 1084 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs); 1085 1086 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 1087 { 1088 1089 return hns_roce_buf_offset(&hr_qp->hr_buf, offset); 1090 } 1091 1092 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) 1093 { 1094 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1095 } 1096 EXPORT_SYMBOL_GPL(get_recv_wqe); 1097 1098 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) 1099 { 1100 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1101 } 1102 EXPORT_SYMBOL_GPL(get_send_wqe); 1103 1104 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n) 1105 { 1106 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset + 1107 (n << hr_qp->sge.sge_shift)); 1108 } 1109 EXPORT_SYMBOL_GPL(get_send_extend_sge); 1110 1111 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 1112 struct ib_cq *ib_cq) 1113 { 1114 struct hns_roce_cq *hr_cq; 1115 u32 cur; 1116 1117 cur = hr_wq->head - hr_wq->tail; 1118 if (likely(cur + nreq < hr_wq->max_post)) 1119 return false; 1120 1121 hr_cq = to_hr_cq(ib_cq); 1122 spin_lock(&hr_cq->lock); 1123 cur = hr_wq->head - hr_wq->tail; 1124 spin_unlock(&hr_cq->lock); 1125 1126 return cur + nreq >= hr_wq->max_post; 1127 } 1128 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow); 1129 1130 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1131 { 1132 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1133 int reserved_from_top = 0; 1134 int reserved_from_bot; 1135 int ret; 1136 1137 spin_lock_init(&qp_table->lock); 1138 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); 1139 1140 /* In hw v1, a port include two SQP, six ports total 12 */ 1141 if (hr_dev->caps.max_sq_sg <= 2) 1142 reserved_from_bot = SQP_NUM; 1143 else 1144 reserved_from_bot = hr_dev->caps.reserved_qps; 1145 1146 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, 1147 hr_dev->caps.num_qps - 1, reserved_from_bot, 1148 reserved_from_top); 1149 if (ret) { 1150 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", 1151 ret); 1152 return ret; 1153 } 1154 1155 return 0; 1156 } 1157 1158 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1159 { 1160 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); 1161 } 1162