1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <rdma/ib_addr.h> 36 #include <rdma/ib_umem.h> 37 #include "hns_roce_common.h" 38 #include "hns_roce_device.h" 39 #include "hns_roce_hem.h" 40 #include <rdma/hns-abi.h> 41 42 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) 43 44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 45 { 46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 47 struct device *dev = &hr_dev->pdev->dev; 48 struct hns_roce_qp *qp; 49 50 spin_lock(&qp_table->lock); 51 52 qp = __hns_roce_qp_lookup(hr_dev, qpn); 53 if (qp) 54 atomic_inc(&qp->refcount); 55 56 spin_unlock(&qp_table->lock); 57 58 if (!qp) { 59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); 60 return; 61 } 62 63 qp->event(qp, (enum hns_roce_event)event_type); 64 65 if (atomic_dec_and_test(&qp->refcount)) 66 complete(&qp->free); 67 } 68 69 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 70 enum hns_roce_event type) 71 { 72 struct ib_event event; 73 struct ib_qp *ibqp = &hr_qp->ibqp; 74 75 if (ibqp->event_handler) { 76 event.device = ibqp->device; 77 event.element.qp = ibqp; 78 switch (type) { 79 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 80 event.event = IB_EVENT_PATH_MIG; 81 break; 82 case HNS_ROCE_EVENT_TYPE_COMM_EST: 83 event.event = IB_EVENT_COMM_EST; 84 break; 85 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 86 event.event = IB_EVENT_SQ_DRAINED; 87 break; 88 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 89 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 90 break; 91 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 92 event.event = IB_EVENT_QP_FATAL; 93 break; 94 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 95 event.event = IB_EVENT_PATH_MIG_ERR; 96 break; 97 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 98 event.event = IB_EVENT_QP_REQ_ERR; 99 break; 100 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 101 event.event = IB_EVENT_QP_ACCESS_ERR; 102 break; 103 default: 104 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 105 type, hr_qp->qpn); 106 return; 107 } 108 ibqp->event_handler(&event, ibqp->qp_context); 109 } 110 } 111 112 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, 113 int align, unsigned long *base) 114 { 115 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 116 117 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); 118 } 119 120 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) 121 { 122 switch (state) { 123 case IB_QPS_RESET: 124 return HNS_ROCE_QP_STATE_RST; 125 case IB_QPS_INIT: 126 return HNS_ROCE_QP_STATE_INIT; 127 case IB_QPS_RTR: 128 return HNS_ROCE_QP_STATE_RTR; 129 case IB_QPS_RTS: 130 return HNS_ROCE_QP_STATE_RTS; 131 case IB_QPS_SQD: 132 return HNS_ROCE_QP_STATE_SQD; 133 case IB_QPS_ERR: 134 return HNS_ROCE_QP_STATE_ERR; 135 default: 136 return HNS_ROCE_QP_NUM_STATE; 137 } 138 } 139 140 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 141 struct hns_roce_qp *hr_qp) 142 { 143 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 144 int ret; 145 146 if (!qpn) 147 return -EINVAL; 148 149 hr_qp->qpn = qpn; 150 151 spin_lock_irq(&qp_table->lock); 152 ret = radix_tree_insert(&hr_dev->qp_table_tree, 153 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 154 spin_unlock_irq(&qp_table->lock); 155 if (ret) { 156 dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n"); 157 goto err_put_irrl; 158 } 159 160 atomic_set(&hr_qp->refcount, 1); 161 init_completion(&hr_qp->free); 162 163 return 0; 164 165 err_put_irrl: 166 167 return ret; 168 } 169 170 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 171 struct hns_roce_qp *hr_qp) 172 { 173 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 174 struct device *dev = &hr_dev->pdev->dev; 175 int ret; 176 177 if (!qpn) 178 return -EINVAL; 179 180 hr_qp->qpn = qpn; 181 182 /* Alloc memory for QPC */ 183 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 184 if (ret) { 185 dev_err(dev, "QPC table get failed\n"); 186 goto err_out; 187 } 188 189 /* Alloc memory for IRRL */ 190 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 191 if (ret) { 192 dev_err(dev, "IRRL table get failed\n"); 193 goto err_put_qp; 194 } 195 196 spin_lock_irq(&qp_table->lock); 197 ret = radix_tree_insert(&hr_dev->qp_table_tree, 198 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 199 spin_unlock_irq(&qp_table->lock); 200 if (ret) { 201 dev_err(dev, "QPC radix_tree_insert failed\n"); 202 goto err_put_irrl; 203 } 204 205 atomic_set(&hr_qp->refcount, 1); 206 init_completion(&hr_qp->free); 207 208 return 0; 209 210 err_put_irrl: 211 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 212 213 err_put_qp: 214 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 215 216 err_out: 217 return ret; 218 } 219 220 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 221 { 222 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 223 unsigned long flags; 224 225 spin_lock_irqsave(&qp_table->lock, flags); 226 radix_tree_delete(&hr_dev->qp_table_tree, 227 hr_qp->qpn & (hr_dev->caps.num_qps - 1)); 228 spin_unlock_irqrestore(&qp_table->lock, flags); 229 } 230 231 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 232 { 233 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 234 235 if (atomic_dec_and_test(&hr_qp->refcount)) 236 complete(&hr_qp->free); 237 wait_for_completion(&hr_qp->free); 238 239 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { 240 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 241 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 242 } 243 } 244 245 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 246 int cnt) 247 { 248 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 249 250 if (base_qpn < SQP_NUM) 251 return; 252 253 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); 254 } 255 256 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, 257 struct ib_qp_cap *cap, int is_user, int has_srq, 258 struct hns_roce_qp *hr_qp) 259 { 260 u32 max_cnt; 261 struct device *dev = &hr_dev->pdev->dev; 262 263 /* Check the validity of QP support capacity */ 264 if (cap->max_recv_wr > hr_dev->caps.max_wqes || 265 cap->max_recv_sge > hr_dev->caps.max_rq_sg) { 266 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", 267 cap->max_recv_wr, cap->max_recv_sge); 268 return -EINVAL; 269 } 270 271 /* If srq exit, set zero for relative number of rq */ 272 if (has_srq) { 273 if (cap->max_recv_wr) { 274 dev_dbg(dev, "srq no need config max_recv_wr\n"); 275 return -EINVAL; 276 } 277 278 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0; 279 } else { 280 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { 281 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); 282 return -EINVAL; 283 } 284 285 /* In v1 engine, parameter verification procession */ 286 max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ? 287 cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM; 288 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); 289 290 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { 291 dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n"); 292 return -EINVAL; 293 } 294 295 max_cnt = max(1U, cap->max_recv_sge); 296 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); 297 /* WQE is fixed for 64B */ 298 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); 299 } 300 301 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; 302 cap->max_recv_sge = hr_qp->rq.max_gs; 303 304 return 0; 305 } 306 307 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, 308 struct hns_roce_qp *hr_qp, 309 struct hns_roce_ib_create_qp *ucmd) 310 { 311 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 312 u8 max_sq_stride = ilog2(roundup_sq_stride); 313 314 /* Sanity check SQ size before proceeding */ 315 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || 316 ucmd->log_sq_stride > max_sq_stride || 317 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 318 dev_err(&hr_dev->pdev->dev, "check SQ size error!\n"); 319 return -EINVAL; 320 } 321 322 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; 323 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 324 325 /* Get buf size, SQ and RQ are aligned to page_szie */ 326 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 327 hr_qp->rq.wqe_shift), PAGE_SIZE) + 328 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 329 hr_qp->sq.wqe_shift), PAGE_SIZE); 330 331 hr_qp->sq.offset = 0; 332 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 333 hr_qp->sq.wqe_shift), PAGE_SIZE); 334 335 return 0; 336 } 337 338 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, 339 struct ib_qp_cap *cap, 340 struct hns_roce_qp *hr_qp) 341 { 342 struct device *dev = &hr_dev->pdev->dev; 343 u32 max_cnt; 344 345 if (cap->max_send_wr > hr_dev->caps.max_wqes || 346 cap->max_send_sge > hr_dev->caps.max_sq_sg || 347 cap->max_inline_data > hr_dev->caps.max_sq_inline) { 348 dev_err(dev, "hns_roce_set_kernel_sq_size error1\n"); 349 return -EINVAL; 350 } 351 352 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 353 hr_qp->sq_max_wqes_per_wr = 1; 354 hr_qp->sq_spare_wqes = 0; 355 356 /* In v1 engine, parameter verification procession */ 357 max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ? 358 cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM; 359 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); 360 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { 361 dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n"); 362 return -EINVAL; 363 } 364 365 /* Get data_seg numbers */ 366 max_cnt = max(1U, cap->max_send_sge); 367 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); 368 369 /* Get buf size, SQ and RQ are aligned to page_szie */ 370 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 371 hr_qp->rq.wqe_shift), PAGE_SIZE) + 372 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 373 hr_qp->sq.wqe_shift), PAGE_SIZE); 374 hr_qp->sq.offset = 0; 375 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 376 hr_qp->sq.wqe_shift), PAGE_SIZE); 377 378 /* Get wr and sge number which send */ 379 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; 380 cap->max_send_sge = hr_qp->sq.max_gs; 381 382 /* We don't support inline sends for kernel QPs (yet) */ 383 cap->max_inline_data = 0; 384 385 return 0; 386 } 387 388 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 389 struct ib_pd *ib_pd, 390 struct ib_qp_init_attr *init_attr, 391 struct ib_udata *udata, unsigned long sqpn, 392 struct hns_roce_qp *hr_qp) 393 { 394 struct device *dev = &hr_dev->pdev->dev; 395 struct hns_roce_ib_create_qp ucmd; 396 unsigned long qpn = 0; 397 int ret = 0; 398 399 mutex_init(&hr_qp->mutex); 400 spin_lock_init(&hr_qp->sq.lock); 401 spin_lock_init(&hr_qp->rq.lock); 402 403 hr_qp->state = IB_QPS_RESET; 404 405 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 406 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 407 else 408 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 409 410 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, 411 !!init_attr->srq, hr_qp); 412 if (ret) { 413 dev_err(dev, "hns_roce_set_rq_size failed\n"); 414 goto err_out; 415 } 416 417 if (ib_pd->uobject) { 418 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 419 dev_err(dev, "ib_copy_from_udata error for create qp\n"); 420 ret = -EFAULT; 421 goto err_out; 422 } 423 424 ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd); 425 if (ret) { 426 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); 427 goto err_out; 428 } 429 430 hr_qp->umem = ib_umem_get(ib_pd->uobject->context, 431 ucmd.buf_addr, hr_qp->buff_size, 0, 432 0); 433 if (IS_ERR(hr_qp->umem)) { 434 dev_err(dev, "ib_umem_get error for create qp\n"); 435 ret = PTR_ERR(hr_qp->umem); 436 goto err_out; 437 } 438 439 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem), 440 hr_qp->umem->page_shift, &hr_qp->mtt); 441 if (ret) { 442 dev_err(dev, "hns_roce_mtt_init error for create qp\n"); 443 goto err_buf; 444 } 445 446 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, 447 hr_qp->umem); 448 if (ret) { 449 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); 450 goto err_mtt; 451 } 452 } else { 453 if (init_attr->create_flags & 454 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 455 dev_err(dev, "init_attr->create_flags error!\n"); 456 ret = -EINVAL; 457 goto err_out; 458 } 459 460 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { 461 dev_err(dev, "init_attr->create_flags error!\n"); 462 ret = -EINVAL; 463 goto err_out; 464 } 465 466 /* Set SQ size */ 467 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, 468 hr_qp); 469 if (ret) { 470 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); 471 goto err_out; 472 } 473 474 /* QP doorbell register address */ 475 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG + 476 DB_REG_OFFSET * hr_dev->priv_uar.index; 477 hr_qp->rq.db_reg_l = hr_dev->reg_base + 478 ROCEE_DB_OTHERS_L_0_REG + 479 DB_REG_OFFSET * hr_dev->priv_uar.index; 480 481 /* Allocate QP buf */ 482 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2, 483 &hr_qp->hr_buf)) { 484 dev_err(dev, "hns_roce_buf_alloc error!\n"); 485 ret = -ENOMEM; 486 goto err_out; 487 } 488 489 /* Write MTT */ 490 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, 491 hr_qp->hr_buf.page_shift, &hr_qp->mtt); 492 if (ret) { 493 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); 494 goto err_buf; 495 } 496 497 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, 498 &hr_qp->hr_buf); 499 if (ret) { 500 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); 501 goto err_mtt; 502 } 503 504 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), 505 GFP_KERNEL); 506 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), 507 GFP_KERNEL); 508 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { 509 ret = -ENOMEM; 510 goto err_wrid; 511 } 512 } 513 514 if (sqpn) { 515 qpn = sqpn; 516 } else { 517 /* Get QPN */ 518 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); 519 if (ret) { 520 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); 521 goto err_wrid; 522 } 523 } 524 525 if ((init_attr->qp_type) == IB_QPT_GSI) { 526 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); 527 if (ret) { 528 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 529 goto err_qpn; 530 } 531 } else { 532 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); 533 if (ret) { 534 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 535 goto err_qpn; 536 } 537 } 538 539 if (sqpn) 540 hr_qp->doorbell_qpn = 1; 541 else 542 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); 543 544 hr_qp->event = hns_roce_ib_qp_event; 545 546 return 0; 547 548 err_qpn: 549 if (!sqpn) 550 hns_roce_release_range_qp(hr_dev, qpn, 1); 551 552 err_wrid: 553 kfree(hr_qp->sq.wrid); 554 kfree(hr_qp->rq.wrid); 555 556 err_mtt: 557 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); 558 559 err_buf: 560 if (ib_pd->uobject) 561 ib_umem_release(hr_qp->umem); 562 else 563 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); 564 565 err_out: 566 return ret; 567 } 568 569 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 570 struct ib_qp_init_attr *init_attr, 571 struct ib_udata *udata) 572 { 573 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 574 struct device *dev = &hr_dev->pdev->dev; 575 struct hns_roce_sqp *hr_sqp; 576 struct hns_roce_qp *hr_qp; 577 int ret; 578 579 switch (init_attr->qp_type) { 580 case IB_QPT_RC: { 581 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 582 if (!hr_qp) 583 return ERR_PTR(-ENOMEM); 584 585 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, 586 hr_qp); 587 if (ret) { 588 dev_err(dev, "Create RC QP failed\n"); 589 kfree(hr_qp); 590 return ERR_PTR(ret); 591 } 592 593 hr_qp->ibqp.qp_num = hr_qp->qpn; 594 595 break; 596 } 597 case IB_QPT_GSI: { 598 /* Userspace is not allowed to create special QPs: */ 599 if (pd->uobject) { 600 dev_err(dev, "not support usr space GSI\n"); 601 return ERR_PTR(-EINVAL); 602 } 603 604 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL); 605 if (!hr_sqp) 606 return ERR_PTR(-ENOMEM); 607 608 hr_qp = &hr_sqp->hr_qp; 609 hr_qp->port = init_attr->port_num - 1; 610 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 611 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + 612 hr_dev->iboe.phy_port[hr_qp->port]; 613 614 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 615 hr_qp->ibqp.qp_num, hr_qp); 616 if (ret) { 617 dev_err(dev, "Create GSI QP failed!\n"); 618 kfree(hr_sqp); 619 return ERR_PTR(ret); 620 } 621 622 break; 623 } 624 default:{ 625 dev_err(dev, "not support QP type %d\n", init_attr->qp_type); 626 return ERR_PTR(-EINVAL); 627 } 628 } 629 630 return &hr_qp->ibqp; 631 } 632 633 int to_hr_qp_type(int qp_type) 634 { 635 int transport_type; 636 637 if (qp_type == IB_QPT_RC) 638 transport_type = SERV_TYPE_RC; 639 else if (qp_type == IB_QPT_UC) 640 transport_type = SERV_TYPE_UC; 641 else if (qp_type == IB_QPT_UD) 642 transport_type = SERV_TYPE_UD; 643 else if (qp_type == IB_QPT_GSI) 644 transport_type = SERV_TYPE_UD; 645 else 646 transport_type = -1; 647 648 return transport_type; 649 } 650 651 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 652 int attr_mask, struct ib_udata *udata) 653 { 654 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 655 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 656 enum ib_qp_state cur_state, new_state; 657 struct device *dev = &hr_dev->pdev->dev; 658 int ret = -EINVAL; 659 int p; 660 enum ib_mtu active_mtu; 661 662 mutex_lock(&hr_qp->mutex); 663 664 cur_state = attr_mask & IB_QP_CUR_STATE ? 665 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; 666 new_state = attr_mask & IB_QP_STATE ? 667 attr->qp_state : cur_state; 668 669 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, 670 IB_LINK_LAYER_ETHERNET)) { 671 dev_err(dev, "ib_modify_qp_is_ok failed\n"); 672 goto out; 673 } 674 675 if ((attr_mask & IB_QP_PORT) && 676 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 677 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", 678 attr->port_num); 679 goto out; 680 } 681 682 if (attr_mask & IB_QP_PKEY_INDEX) { 683 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 684 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 685 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", 686 attr->pkey_index); 687 goto out; 688 } 689 } 690 691 if (attr_mask & IB_QP_PATH_MTU) { 692 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 693 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 694 695 if (attr->path_mtu > IB_MTU_2048 || 696 attr->path_mtu < IB_MTU_256 || 697 attr->path_mtu > active_mtu) { 698 dev_err(dev, "attr path_mtu(%d)invalid while modify qp", 699 attr->path_mtu); 700 goto out; 701 } 702 } 703 704 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 705 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 706 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", 707 attr->max_rd_atomic); 708 goto out; 709 } 710 711 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 712 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 713 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", 714 attr->max_dest_rd_atomic); 715 goto out; 716 } 717 718 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 719 ret = -EPERM; 720 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, 721 new_state); 722 goto out; 723 } 724 725 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 726 new_state); 727 728 out: 729 mutex_unlock(&hr_qp->mutex); 730 731 return ret; 732 } 733 734 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 735 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 736 { 737 if (send_cq == recv_cq) { 738 spin_lock_irq(&send_cq->lock); 739 __acquire(&recv_cq->lock); 740 } else if (send_cq->cqn < recv_cq->cqn) { 741 spin_lock_irq(&send_cq->lock); 742 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 743 } else { 744 spin_lock_irq(&recv_cq->lock); 745 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 746 } 747 } 748 749 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 750 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 751 __releases(&recv_cq->lock) 752 { 753 if (send_cq == recv_cq) { 754 __release(&recv_cq->lock); 755 spin_unlock_irq(&send_cq->lock); 756 } else if (send_cq->cqn < recv_cq->cqn) { 757 spin_unlock(&recv_cq->lock); 758 spin_unlock_irq(&send_cq->lock); 759 } else { 760 spin_unlock(&send_cq->lock); 761 spin_unlock_irq(&recv_cq->lock); 762 } 763 } 764 765 __be32 send_ieth(struct ib_send_wr *wr) 766 { 767 switch (wr->opcode) { 768 case IB_WR_SEND_WITH_IMM: 769 case IB_WR_RDMA_WRITE_WITH_IMM: 770 return cpu_to_le32(wr->ex.imm_data); 771 case IB_WR_SEND_WITH_INV: 772 return cpu_to_le32(wr->ex.invalidate_rkey); 773 default: 774 return 0; 775 } 776 } 777 778 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 779 { 780 781 return hns_roce_buf_offset(&hr_qp->hr_buf, offset); 782 } 783 784 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) 785 { 786 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 787 } 788 789 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) 790 { 791 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 792 } 793 794 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 795 struct ib_cq *ib_cq) 796 { 797 struct hns_roce_cq *hr_cq; 798 u32 cur; 799 800 cur = hr_wq->head - hr_wq->tail; 801 if (likely(cur + nreq < hr_wq->max_post)) 802 return false; 803 804 hr_cq = to_hr_cq(ib_cq); 805 spin_lock(&hr_cq->lock); 806 cur = hr_wq->head - hr_wq->tail; 807 spin_unlock(&hr_cq->lock); 808 809 return cur + nreq >= hr_wq->max_post; 810 } 811 812 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 813 { 814 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 815 int reserved_from_top = 0; 816 int ret; 817 818 spin_lock_init(&qp_table->lock); 819 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); 820 821 /* A port include two SQP, six port total 12 */ 822 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, 823 hr_dev->caps.num_qps - 1, SQP_NUM, 824 reserved_from_top); 825 if (ret) { 826 dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", 827 ret); 828 return ret; 829 } 830 831 return 0; 832 } 833 834 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 835 { 836 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); 837 } 838