1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <rdma/ib_addr.h> 36 #include <rdma/ib_umem.h> 37 #include "hns_roce_common.h" 38 #include "hns_roce_device.h" 39 #include "hns_roce_hem.h" 40 #include "hns_roce_user.h" 41 42 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) 43 44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 45 { 46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 47 struct device *dev = &hr_dev->pdev->dev; 48 struct hns_roce_qp *qp; 49 50 spin_lock(&qp_table->lock); 51 52 qp = __hns_roce_qp_lookup(hr_dev, qpn); 53 if (qp) 54 atomic_inc(&qp->refcount); 55 56 spin_unlock(&qp_table->lock); 57 58 if (!qp) { 59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn); 60 return; 61 } 62 63 qp->event(qp, (enum hns_roce_event)event_type); 64 65 if (atomic_dec_and_test(&qp->refcount)) 66 complete(&qp->free); 67 } 68 69 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 70 enum hns_roce_event type) 71 { 72 struct ib_event event; 73 struct ib_qp *ibqp = &hr_qp->ibqp; 74 75 if (ibqp->event_handler) { 76 event.device = ibqp->device; 77 event.element.qp = ibqp; 78 switch (type) { 79 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 80 event.event = IB_EVENT_PATH_MIG; 81 break; 82 case HNS_ROCE_EVENT_TYPE_COMM_EST: 83 event.event = IB_EVENT_COMM_EST; 84 break; 85 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 86 event.event = IB_EVENT_SQ_DRAINED; 87 break; 88 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 89 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 90 break; 91 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 92 event.event = IB_EVENT_QP_FATAL; 93 break; 94 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 95 event.event = IB_EVENT_PATH_MIG_ERR; 96 break; 97 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 98 event.event = IB_EVENT_QP_REQ_ERR; 99 break; 100 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 101 event.event = IB_EVENT_QP_ACCESS_ERR; 102 break; 103 default: 104 dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n", 105 type, hr_qp->qpn); 106 return; 107 } 108 ibqp->event_handler(&event, ibqp->qp_context); 109 } 110 } 111 112 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, 113 int align, unsigned long *base) 114 { 115 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 116 117 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base); 118 } 119 120 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) 121 { 122 switch (state) { 123 case IB_QPS_RESET: 124 return HNS_ROCE_QP_STATE_RST; 125 case IB_QPS_INIT: 126 return HNS_ROCE_QP_STATE_INIT; 127 case IB_QPS_RTR: 128 return HNS_ROCE_QP_STATE_RTR; 129 case IB_QPS_RTS: 130 return HNS_ROCE_QP_STATE_RTS; 131 case IB_QPS_SQD: 132 return HNS_ROCE_QP_STATE_SQD; 133 case IB_QPS_ERR: 134 return HNS_ROCE_QP_STATE_ERR; 135 default: 136 return HNS_ROCE_QP_NUM_STATE; 137 } 138 } 139 140 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 141 struct hns_roce_qp *hr_qp) 142 { 143 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 144 int ret; 145 146 if (!qpn) 147 return -EINVAL; 148 149 hr_qp->qpn = qpn; 150 151 spin_lock_irq(&qp_table->lock); 152 ret = radix_tree_insert(&hr_dev->qp_table_tree, 153 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 154 spin_unlock_irq(&qp_table->lock); 155 if (ret) { 156 dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n"); 157 goto err_put_irrl; 158 } 159 160 atomic_set(&hr_qp->refcount, 1); 161 init_completion(&hr_qp->free); 162 163 return 0; 164 165 err_put_irrl: 166 167 return ret; 168 } 169 170 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, 171 struct hns_roce_qp *hr_qp) 172 { 173 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 174 struct device *dev = &hr_dev->pdev->dev; 175 int ret; 176 177 if (!qpn) 178 return -EINVAL; 179 180 hr_qp->qpn = qpn; 181 182 /* Alloc memory for QPC */ 183 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 184 if (ret) { 185 dev_err(dev, "QPC table get failed\n"); 186 goto err_out; 187 } 188 189 /* Alloc memory for IRRL */ 190 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 191 if (ret) { 192 dev_err(dev, "IRRL table get failed\n"); 193 goto err_put_qp; 194 } 195 196 spin_lock_irq(&qp_table->lock); 197 ret = radix_tree_insert(&hr_dev->qp_table_tree, 198 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); 199 spin_unlock_irq(&qp_table->lock); 200 if (ret) { 201 dev_err(dev, "QPC radix_tree_insert failed\n"); 202 goto err_put_irrl; 203 } 204 205 atomic_set(&hr_qp->refcount, 1); 206 init_completion(&hr_qp->free); 207 208 return 0; 209 210 err_put_irrl: 211 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 212 213 err_put_qp: 214 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 215 216 err_out: 217 return ret; 218 } 219 220 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 221 { 222 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 223 unsigned long flags; 224 225 spin_lock_irqsave(&qp_table->lock, flags); 226 radix_tree_delete(&hr_dev->qp_table_tree, 227 hr_qp->qpn & (hr_dev->caps.num_qps - 1)); 228 spin_unlock_irqrestore(&qp_table->lock, flags); 229 } 230 231 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 232 { 233 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 234 235 if (atomic_dec_and_test(&hr_qp->refcount)) 236 complete(&hr_qp->free); 237 wait_for_completion(&hr_qp->free); 238 239 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { 240 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 241 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 242 } 243 } 244 245 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, 246 int cnt) 247 { 248 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 249 250 if (base_qpn < SQP_NUM) 251 return; 252 253 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); 254 } 255 256 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, 257 struct ib_qp_cap *cap, int is_user, int has_srq, 258 struct hns_roce_qp *hr_qp) 259 { 260 u32 max_cnt; 261 struct device *dev = &hr_dev->pdev->dev; 262 263 /* Check the validity of QP support capacity */ 264 if (cap->max_recv_wr > hr_dev->caps.max_wqes || 265 cap->max_recv_sge > hr_dev->caps.max_rq_sg) { 266 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n", 267 cap->max_recv_wr, cap->max_recv_sge); 268 return -EINVAL; 269 } 270 271 /* If srq exit, set zero for relative number of rq */ 272 if (has_srq) { 273 if (cap->max_recv_wr) { 274 dev_dbg(dev, "srq no need config max_recv_wr\n"); 275 return -EINVAL; 276 } 277 278 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0; 279 } else { 280 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { 281 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n"); 282 return -EINVAL; 283 } 284 285 /* In v1 engine, parameter verification procession */ 286 max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ? 287 cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM; 288 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt); 289 290 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) { 291 dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n"); 292 return -EINVAL; 293 } 294 295 max_cnt = max(1U, cap->max_recv_sge); 296 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); 297 /* WQE is fixed for 64B */ 298 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); 299 } 300 301 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; 302 cap->max_recv_sge = hr_qp->rq.max_gs; 303 304 return 0; 305 } 306 307 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, 308 struct hns_roce_qp *hr_qp, 309 struct hns_roce_ib_create_qp *ucmd) 310 { 311 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 312 u8 max_sq_stride = ilog2(roundup_sq_stride); 313 314 /* Sanity check SQ size before proceeding */ 315 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || 316 ucmd->log_sq_stride > max_sq_stride || 317 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 318 dev_err(&hr_dev->pdev->dev, "check SQ size error!\n"); 319 return -EINVAL; 320 } 321 322 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; 323 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 324 325 /* Get buf size, SQ and RQ are aligned to page_szie */ 326 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 327 hr_qp->rq.wqe_shift), PAGE_SIZE) + 328 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 329 hr_qp->sq.wqe_shift), PAGE_SIZE); 330 331 hr_qp->sq.offset = 0; 332 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 333 hr_qp->sq.wqe_shift), PAGE_SIZE); 334 335 return 0; 336 } 337 338 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, 339 struct ib_qp_cap *cap, 340 struct hns_roce_qp *hr_qp) 341 { 342 struct device *dev = &hr_dev->pdev->dev; 343 u32 max_cnt; 344 345 if (cap->max_send_wr > hr_dev->caps.max_wqes || 346 cap->max_send_sge > hr_dev->caps.max_sq_sg || 347 cap->max_inline_data > hr_dev->caps.max_sq_inline) { 348 dev_err(dev, "hns_roce_set_kernel_sq_size error1\n"); 349 return -EINVAL; 350 } 351 352 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 353 hr_qp->sq_max_wqes_per_wr = 1; 354 hr_qp->sq_spare_wqes = 0; 355 356 /* In v1 engine, parameter verification procession */ 357 max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ? 358 cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM; 359 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); 360 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { 361 dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n"); 362 return -EINVAL; 363 } 364 365 /* Get data_seg numbers */ 366 max_cnt = max(1U, cap->max_send_sge); 367 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); 368 369 /* Get buf size, SQ and RQ are aligned to page_szie */ 370 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << 371 hr_qp->rq.wqe_shift), PAGE_SIZE) + 372 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 373 hr_qp->sq.wqe_shift), PAGE_SIZE); 374 hr_qp->sq.offset = 0; 375 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << 376 hr_qp->sq.wqe_shift), PAGE_SIZE); 377 378 /* Get wr and sge number which send */ 379 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; 380 cap->max_send_sge = hr_qp->sq.max_gs; 381 382 /* We don't support inline sends for kernel QPs (yet) */ 383 cap->max_inline_data = 0; 384 385 return 0; 386 } 387 388 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 389 struct ib_pd *ib_pd, 390 struct ib_qp_init_attr *init_attr, 391 struct ib_udata *udata, unsigned long sqpn, 392 struct hns_roce_qp *hr_qp) 393 { 394 struct device *dev = &hr_dev->pdev->dev; 395 struct hns_roce_ib_create_qp ucmd; 396 unsigned long qpn = 0; 397 int ret = 0; 398 399 mutex_init(&hr_qp->mutex); 400 spin_lock_init(&hr_qp->sq.lock); 401 spin_lock_init(&hr_qp->rq.lock); 402 403 hr_qp->state = IB_QPS_RESET; 404 405 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 406 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 407 else 408 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 409 410 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject, 411 !!init_attr->srq, hr_qp); 412 if (ret) { 413 dev_err(dev, "hns_roce_set_rq_size failed\n"); 414 goto err_out; 415 } 416 417 if (ib_pd->uobject) { 418 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 419 dev_err(dev, "ib_copy_from_udata error for create qp\n"); 420 ret = -EFAULT; 421 goto err_out; 422 } 423 424 ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd); 425 if (ret) { 426 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); 427 goto err_out; 428 } 429 430 hr_qp->umem = ib_umem_get(ib_pd->uobject->context, 431 ucmd.buf_addr, hr_qp->buff_size, 0, 432 0); 433 if (IS_ERR(hr_qp->umem)) { 434 dev_err(dev, "ib_umem_get error for create qp\n"); 435 ret = PTR_ERR(hr_qp->umem); 436 goto err_out; 437 } 438 439 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem), 440 ilog2((unsigned int)hr_qp->umem->page_size), 441 &hr_qp->mtt); 442 if (ret) { 443 dev_err(dev, "hns_roce_mtt_init error for create qp\n"); 444 goto err_buf; 445 } 446 447 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt, 448 hr_qp->umem); 449 if (ret) { 450 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n"); 451 goto err_mtt; 452 } 453 } else { 454 if (init_attr->create_flags & 455 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 456 dev_err(dev, "init_attr->create_flags error!\n"); 457 ret = -EINVAL; 458 goto err_out; 459 } 460 461 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { 462 dev_err(dev, "init_attr->create_flags error!\n"); 463 ret = -EINVAL; 464 goto err_out; 465 } 466 467 /* Set SQ size */ 468 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, 469 hr_qp); 470 if (ret) { 471 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); 472 goto err_out; 473 } 474 475 /* QP doorbell register address */ 476 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG + 477 DB_REG_OFFSET * hr_dev->priv_uar.index; 478 hr_qp->rq.db_reg_l = hr_dev->reg_base + 479 ROCEE_DB_OTHERS_L_0_REG + 480 DB_REG_OFFSET * hr_dev->priv_uar.index; 481 482 /* Allocate QP buf */ 483 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2, 484 &hr_qp->hr_buf)) { 485 dev_err(dev, "hns_roce_buf_alloc error!\n"); 486 ret = -ENOMEM; 487 goto err_out; 488 } 489 490 /* Write MTT */ 491 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, 492 hr_qp->hr_buf.page_shift, &hr_qp->mtt); 493 if (ret) { 494 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n"); 495 goto err_buf; 496 } 497 498 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt, 499 &hr_qp->hr_buf); 500 if (ret) { 501 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n"); 502 goto err_mtt; 503 } 504 505 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), 506 GFP_KERNEL); 507 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), 508 GFP_KERNEL); 509 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { 510 ret = -ENOMEM; 511 goto err_wrid; 512 } 513 } 514 515 if (sqpn) { 516 qpn = sqpn; 517 } else { 518 /* Get QPN */ 519 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); 520 if (ret) { 521 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); 522 goto err_wrid; 523 } 524 } 525 526 if ((init_attr->qp_type) == IB_QPT_GSI) { 527 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); 528 if (ret) { 529 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 530 goto err_qpn; 531 } 532 } else { 533 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); 534 if (ret) { 535 dev_err(dev, "hns_roce_qp_alloc failed!\n"); 536 goto err_qpn; 537 } 538 } 539 540 if (sqpn) 541 hr_qp->doorbell_qpn = 1; 542 else 543 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); 544 545 hr_qp->event = hns_roce_ib_qp_event; 546 547 return 0; 548 549 err_qpn: 550 if (!sqpn) 551 hns_roce_release_range_qp(hr_dev, qpn, 1); 552 553 err_wrid: 554 kfree(hr_qp->sq.wrid); 555 kfree(hr_qp->rq.wrid); 556 557 err_mtt: 558 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); 559 560 err_buf: 561 if (ib_pd->uobject) 562 ib_umem_release(hr_qp->umem); 563 else 564 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); 565 566 err_out: 567 return ret; 568 } 569 570 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, 571 struct ib_qp_init_attr *init_attr, 572 struct ib_udata *udata) 573 { 574 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 575 struct device *dev = &hr_dev->pdev->dev; 576 struct hns_roce_sqp *hr_sqp; 577 struct hns_roce_qp *hr_qp; 578 int ret; 579 580 switch (init_attr->qp_type) { 581 case IB_QPT_RC: { 582 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); 583 if (!hr_qp) 584 return ERR_PTR(-ENOMEM); 585 586 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, 587 hr_qp); 588 if (ret) { 589 dev_err(dev, "Create RC QP failed\n"); 590 kfree(hr_qp); 591 return ERR_PTR(ret); 592 } 593 594 hr_qp->ibqp.qp_num = hr_qp->qpn; 595 596 break; 597 } 598 case IB_QPT_GSI: { 599 /* Userspace is not allowed to create special QPs: */ 600 if (pd->uobject) { 601 dev_err(dev, "not support usr space GSI\n"); 602 return ERR_PTR(-EINVAL); 603 } 604 605 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL); 606 if (!hr_sqp) 607 return ERR_PTR(-ENOMEM); 608 609 hr_qp = &hr_sqp->hr_qp; 610 hr_qp->port = init_attr->port_num - 1; 611 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 612 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + 613 hr_dev->iboe.phy_port[hr_qp->port]; 614 615 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 616 hr_qp->ibqp.qp_num, hr_qp); 617 if (ret) { 618 dev_err(dev, "Create GSI QP failed!\n"); 619 kfree(hr_sqp); 620 return ERR_PTR(ret); 621 } 622 623 break; 624 } 625 default:{ 626 dev_err(dev, "not support QP type %d\n", init_attr->qp_type); 627 return ERR_PTR(-EINVAL); 628 } 629 } 630 631 return &hr_qp->ibqp; 632 } 633 634 int to_hr_qp_type(int qp_type) 635 { 636 int transport_type; 637 638 if (qp_type == IB_QPT_RC) 639 transport_type = SERV_TYPE_RC; 640 else if (qp_type == IB_QPT_UC) 641 transport_type = SERV_TYPE_UC; 642 else if (qp_type == IB_QPT_UD) 643 transport_type = SERV_TYPE_UD; 644 else if (qp_type == IB_QPT_GSI) 645 transport_type = SERV_TYPE_UD; 646 else 647 transport_type = -1; 648 649 return transport_type; 650 } 651 652 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 653 int attr_mask, struct ib_udata *udata) 654 { 655 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 656 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 657 enum ib_qp_state cur_state, new_state; 658 struct device *dev = &hr_dev->pdev->dev; 659 int ret = -EINVAL; 660 int p; 661 enum ib_mtu active_mtu; 662 663 mutex_lock(&hr_qp->mutex); 664 665 cur_state = attr_mask & IB_QP_CUR_STATE ? 666 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; 667 new_state = attr_mask & IB_QP_STATE ? 668 attr->qp_state : cur_state; 669 670 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, 671 IB_LINK_LAYER_ETHERNET)) { 672 dev_err(dev, "ib_modify_qp_is_ok failed\n"); 673 goto out; 674 } 675 676 if ((attr_mask & IB_QP_PORT) && 677 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 678 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n", 679 attr->port_num); 680 goto out; 681 } 682 683 if (attr_mask & IB_QP_PKEY_INDEX) { 684 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 685 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 686 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n", 687 attr->pkey_index); 688 goto out; 689 } 690 } 691 692 if (attr_mask & IB_QP_PATH_MTU) { 693 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 694 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 695 696 if (attr->path_mtu > IB_MTU_2048 || 697 attr->path_mtu < IB_MTU_256 || 698 attr->path_mtu > active_mtu) { 699 dev_err(dev, "attr path_mtu(%d)invalid while modify qp", 700 attr->path_mtu); 701 goto out; 702 } 703 } 704 705 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 706 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 707 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", 708 attr->max_rd_atomic); 709 goto out; 710 } 711 712 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 713 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 714 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", 715 attr->max_dest_rd_atomic); 716 goto out; 717 } 718 719 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 720 ret = -EPERM; 721 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, 722 new_state); 723 goto out; 724 } 725 726 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 727 new_state); 728 729 out: 730 mutex_unlock(&hr_qp->mutex); 731 732 return ret; 733 } 734 735 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 736 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 737 { 738 if (send_cq == recv_cq) { 739 spin_lock_irq(&send_cq->lock); 740 __acquire(&recv_cq->lock); 741 } else if (send_cq->cqn < recv_cq->cqn) { 742 spin_lock_irq(&send_cq->lock); 743 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 744 } else { 745 spin_lock_irq(&recv_cq->lock); 746 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 747 } 748 } 749 750 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 751 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 752 __releases(&recv_cq->lock) 753 { 754 if (send_cq == recv_cq) { 755 __release(&recv_cq->lock); 756 spin_unlock_irq(&send_cq->lock); 757 } else if (send_cq->cqn < recv_cq->cqn) { 758 spin_unlock(&recv_cq->lock); 759 spin_unlock_irq(&send_cq->lock); 760 } else { 761 spin_unlock(&send_cq->lock); 762 spin_unlock_irq(&recv_cq->lock); 763 } 764 } 765 766 __be32 send_ieth(struct ib_send_wr *wr) 767 { 768 switch (wr->opcode) { 769 case IB_WR_SEND_WITH_IMM: 770 case IB_WR_RDMA_WRITE_WITH_IMM: 771 return cpu_to_le32(wr->ex.imm_data); 772 case IB_WR_SEND_WITH_INV: 773 return cpu_to_le32(wr->ex.invalidate_rkey); 774 default: 775 return 0; 776 } 777 } 778 779 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) 780 { 781 782 return hns_roce_buf_offset(&hr_qp->hr_buf, offset); 783 } 784 785 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) 786 { 787 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 788 } 789 790 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) 791 { 792 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 793 } 794 795 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, 796 struct ib_cq *ib_cq) 797 { 798 struct hns_roce_cq *hr_cq; 799 u32 cur; 800 801 cur = hr_wq->head - hr_wq->tail; 802 if (likely(cur + nreq < hr_wq->max_post)) 803 return 0; 804 805 hr_cq = to_hr_cq(ib_cq); 806 spin_lock(&hr_cq->lock); 807 cur = hr_wq->head - hr_wq->tail; 808 spin_unlock(&hr_cq->lock); 809 810 return cur + nreq >= hr_wq->max_post; 811 } 812 813 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 814 { 815 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 816 int reserved_from_top = 0; 817 int ret; 818 819 spin_lock_init(&qp_table->lock); 820 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); 821 822 /* A port include two SQP, six port total 12 */ 823 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, 824 hr_dev->caps.num_qps - 1, SQP_NUM, 825 reserved_from_top); 826 if (ret) { 827 dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", 828 ret); 829 return ret; 830 } 831 832 return 0; 833 } 834 835 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 836 { 837 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); 838 } 839