1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/platform_device.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/uverbs_ioctl.h> 36 #include "hns_roce_device.h" 37 #include "hns_roce_cmd.h" 38 #include "hns_roce_hem.h" 39 #include "hns_roce_common.h" 40 41 static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank) 42 { 43 u32 least_load = bank[0].inuse; 44 u8 bankid = 0; 45 u32 bankcnt; 46 u8 i; 47 48 for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) { 49 bankcnt = bank[i].inuse; 50 if (bankcnt < least_load) { 51 least_load = bankcnt; 52 bankid = i; 53 } 54 } 55 56 return bankid; 57 } 58 59 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) 60 { 61 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 62 struct hns_roce_bank *bank; 63 u8 bankid; 64 int id; 65 66 mutex_lock(&cq_table->bank_mutex); 67 bankid = get_least_load_bankid_for_cq(cq_table->bank); 68 bank = &cq_table->bank[bankid]; 69 70 id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL); 71 if (id < 0) { 72 mutex_unlock(&cq_table->bank_mutex); 73 return id; 74 } 75 76 /* the lower 2 bits is bankid */ 77 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; 78 bank->inuse++; 79 mutex_unlock(&cq_table->bank_mutex); 80 81 return 0; 82 } 83 84 static inline u8 get_cq_bankid(unsigned long cqn) 85 { 86 /* The lower 2 bits of CQN are used to hash to different banks */ 87 return (u8)(cqn & GENMASK(1, 0)); 88 } 89 90 static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) 91 { 92 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 93 struct hns_roce_bank *bank; 94 95 bank = &cq_table->bank[get_cq_bankid(cqn)]; 96 97 ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT); 98 99 mutex_lock(&cq_table->bank_mutex); 100 bank->inuse--; 101 mutex_unlock(&cq_table->bank_mutex); 102 } 103 104 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) 105 { 106 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 107 struct ib_device *ibdev = &hr_dev->ib_dev; 108 struct hns_roce_cmd_mailbox *mailbox; 109 u64 mtts[MTT_MIN_COUNT] = { 0 }; 110 dma_addr_t dma_handle; 111 int ret; 112 113 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), 114 &dma_handle); 115 if (!ret) { 116 ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); 117 return -EINVAL; 118 } 119 120 /* Get CQC memory HEM(Hardware Entry Memory) table */ 121 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); 122 if (ret) { 123 ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n", 124 hr_cq->cqn, ret); 125 goto err_out; 126 } 127 128 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); 129 if (ret) { 130 ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret); 131 goto err_put; 132 } 133 134 /* Allocate mailbox memory */ 135 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 136 if (IS_ERR(mailbox)) { 137 ret = PTR_ERR(mailbox); 138 goto err_xa; 139 } 140 141 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); 142 143 /* Send mailbox to hw */ 144 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, 145 HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); 146 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 147 if (ret) { 148 ibdev_err(ibdev, 149 "failed to send create cmd for CQ(0x%lx), ret = %d.\n", 150 hr_cq->cqn, ret); 151 goto err_xa; 152 } 153 154 hr_cq->cons_index = 0; 155 hr_cq->arm_sn = 1; 156 157 atomic_set(&hr_cq->refcount, 1); 158 init_completion(&hr_cq->free); 159 160 return 0; 161 162 err_xa: 163 xa_erase(&cq_table->array, hr_cq->cqn); 164 165 err_put: 166 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); 167 168 err_out: 169 return ret; 170 } 171 172 static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) 173 { 174 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 175 struct device *dev = hr_dev->dev; 176 int ret; 177 178 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1, 179 HNS_ROCE_CMD_DESTROY_CQC, 180 HNS_ROCE_CMD_TIMEOUT_MSECS); 181 if (ret) 182 dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, 183 hr_cq->cqn); 184 185 xa_erase(&cq_table->array, hr_cq->cqn); 186 187 /* Waiting interrupt process procedure carried out */ 188 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); 189 190 /* wait for all interrupt processed */ 191 if (atomic_dec_and_test(&hr_cq->refcount)) 192 complete(&hr_cq->free); 193 wait_for_completion(&hr_cq->free); 194 195 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); 196 } 197 198 static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, 199 struct ib_udata *udata, unsigned long addr) 200 { 201 struct ib_device *ibdev = &hr_dev->ib_dev; 202 struct hns_roce_buf_attr buf_attr = {}; 203 int ret; 204 205 buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; 206 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; 207 buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; 208 buf_attr.region_count = 1; 209 210 ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr, 211 hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT, 212 udata, addr); 213 if (ret) 214 ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret); 215 216 return ret; 217 } 218 219 static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) 220 { 221 hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr); 222 } 223 224 static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, 225 struct ib_udata *udata, unsigned long addr, 226 struct hns_roce_ib_create_cq_resp *resp) 227 { 228 bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB; 229 struct hns_roce_ucontext *uctx; 230 int err; 231 232 if (udata) { 233 if (has_db && 234 udata->outlen >= offsetofend(typeof(*resp), cap_flags)) { 235 uctx = rdma_udata_to_drv_context(udata, 236 struct hns_roce_ucontext, ibucontext); 237 err = hns_roce_db_map_user(uctx, udata, addr, 238 &hr_cq->db); 239 if (err) 240 return err; 241 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; 242 resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; 243 } 244 } else { 245 if (has_db) { 246 err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1); 247 if (err) 248 return err; 249 hr_cq->set_ci_db = hr_cq->db.db_record; 250 *hr_cq->set_ci_db = 0; 251 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB; 252 } 253 hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset + 254 DB_REG_OFFSET * hr_dev->priv_uar.index; 255 } 256 257 return 0; 258 } 259 260 static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, 261 struct ib_udata *udata) 262 { 263 struct hns_roce_ucontext *uctx; 264 265 if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) 266 return; 267 268 hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB; 269 if (udata) { 270 uctx = rdma_udata_to_drv_context(udata, 271 struct hns_roce_ucontext, 272 ibucontext); 273 hns_roce_db_unmap_user(uctx, &hr_cq->db); 274 } else { 275 hns_roce_free_db(hr_dev, &hr_cq->db); 276 } 277 } 278 279 static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, 280 struct hns_roce_ib_create_cq *ucmd) 281 { 282 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 283 284 if (udata) { 285 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) 286 hr_cq->cqe_size = ucmd->cqe_size; 287 else 288 hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; 289 } else { 290 hr_cq->cqe_size = hr_dev->caps.cqe_sz; 291 } 292 } 293 294 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, 295 struct ib_udata *udata) 296 { 297 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); 298 struct hns_roce_ib_create_cq_resp resp = {}; 299 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 300 struct ib_device *ibdev = &hr_dev->ib_dev; 301 struct hns_roce_ib_create_cq ucmd = {}; 302 int vector = attr->comp_vector; 303 u32 cq_entries = attr->cqe; 304 int ret; 305 306 if (attr->flags) 307 return -EOPNOTSUPP; 308 309 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { 310 ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n", 311 cq_entries, hr_dev->caps.max_cqes); 312 return -EINVAL; 313 } 314 315 if (vector >= hr_dev->caps.num_comp_vectors) { 316 ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n", 317 vector, hr_dev->caps.num_comp_vectors); 318 return -EINVAL; 319 } 320 321 cq_entries = max(cq_entries, hr_dev->caps.min_cqes); 322 cq_entries = roundup_pow_of_two(cq_entries); 323 hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */ 324 hr_cq->cq_depth = cq_entries; 325 hr_cq->vector = vector; 326 spin_lock_init(&hr_cq->lock); 327 INIT_LIST_HEAD(&hr_cq->sq_list); 328 INIT_LIST_HEAD(&hr_cq->rq_list); 329 330 if (udata) { 331 ret = ib_copy_from_udata(&ucmd, udata, 332 min(udata->inlen, sizeof(ucmd))); 333 if (ret) { 334 ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", 335 ret); 336 return ret; 337 } 338 } 339 340 set_cqe_size(hr_cq, udata, &ucmd); 341 342 ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); 343 if (ret) { 344 ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret); 345 return ret; 346 } 347 348 ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp); 349 if (ret) { 350 ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret); 351 goto err_cq_buf; 352 } 353 354 ret = alloc_cqn(hr_dev, hr_cq); 355 if (ret) { 356 ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret); 357 goto err_cq_db; 358 } 359 360 ret = alloc_cqc(hr_dev, hr_cq); 361 if (ret) { 362 ibdev_err(ibdev, 363 "failed to alloc CQ context, ret = %d.\n", ret); 364 goto err_cqn; 365 } 366 367 /* 368 * For the QP created by kernel space, tptr value should be initialized 369 * to zero; For the QP created by user space, it will cause synchronous 370 * problems if tptr is set to zero here, so we initialize it in user 371 * space. 372 */ 373 if (!udata && hr_cq->tptr_addr) 374 *hr_cq->tptr_addr = 0; 375 376 if (udata) { 377 resp.cqn = hr_cq->cqn; 378 ret = ib_copy_to_udata(udata, &resp, 379 min(udata->outlen, sizeof(resp))); 380 if (ret) 381 goto err_cqc; 382 } 383 384 return 0; 385 386 err_cqc: 387 free_cqc(hr_dev, hr_cq); 388 err_cqn: 389 free_cqn(hr_dev, hr_cq->cqn); 390 err_cq_db: 391 free_cq_db(hr_dev, hr_cq, udata); 392 err_cq_buf: 393 free_cq_buf(hr_dev, hr_cq); 394 return ret; 395 } 396 397 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 398 { 399 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); 400 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 401 402 if (hr_dev->hw->destroy_cq) 403 hr_dev->hw->destroy_cq(ib_cq, udata); 404 405 free_cqc(hr_dev, hr_cq); 406 free_cqn(hr_dev, hr_cq->cqn); 407 free_cq_db(hr_dev, hr_cq, udata); 408 free_cq_buf(hr_dev, hr_cq); 409 410 return 0; 411 } 412 413 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) 414 { 415 struct hns_roce_cq *hr_cq; 416 struct ib_cq *ibcq; 417 418 hr_cq = xa_load(&hr_dev->cq_table.array, 419 cqn & (hr_dev->caps.num_cqs - 1)); 420 if (!hr_cq) { 421 dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", 422 cqn); 423 return; 424 } 425 426 ++hr_cq->arm_sn; 427 ibcq = &hr_cq->ib_cq; 428 if (ibcq->comp_handler) 429 ibcq->comp_handler(ibcq, ibcq->cq_context); 430 } 431 432 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) 433 { 434 struct device *dev = hr_dev->dev; 435 struct hns_roce_cq *hr_cq; 436 struct ib_event event; 437 struct ib_cq *ibcq; 438 439 hr_cq = xa_load(&hr_dev->cq_table.array, 440 cqn & (hr_dev->caps.num_cqs - 1)); 441 if (!hr_cq) { 442 dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); 443 return; 444 } 445 446 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && 447 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && 448 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { 449 dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", 450 event_type, cqn); 451 return; 452 } 453 454 atomic_inc(&hr_cq->refcount); 455 456 ibcq = &hr_cq->ib_cq; 457 if (ibcq->event_handler) { 458 event.device = ibcq->device; 459 event.element.cq = ibcq; 460 event.event = IB_EVENT_CQ_ERR; 461 ibcq->event_handler(&event, ibcq->cq_context); 462 } 463 464 if (atomic_dec_and_test(&hr_cq->refcount)) 465 complete(&hr_cq->free); 466 } 467 468 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) 469 { 470 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; 471 unsigned int reserved_from_bot; 472 unsigned int i; 473 474 mutex_init(&cq_table->bank_mutex); 475 xa_init(&cq_table->array); 476 477 reserved_from_bot = hr_dev->caps.reserved_cqs; 478 479 for (i = 0; i < reserved_from_bot; i++) { 480 cq_table->bank[get_cq_bankid(i)].inuse++; 481 cq_table->bank[get_cq_bankid(i)].min++; 482 } 483 484 for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) { 485 ida_init(&cq_table->bank[i].ida); 486 cq_table->bank[i].max = hr_dev->caps.num_cqs / 487 HNS_ROCE_CQ_BANK_NUM - 1; 488 } 489 } 490 491 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev) 492 { 493 int i; 494 495 for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) 496 ida_destroy(&hr_dev->cq_table.bank[i].ida); 497 } 498