1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <linux/vmalloc.h> 36 #include <rdma/ib_umem.h> 37 #include "hns_roce_device.h" 38 #include "hns_roce_cmd.h" 39 #include "hns_roce_hem.h" 40 41 static u32 hw_index_to_key(unsigned long ind) 42 { 43 return (u32)(ind >> 24) | (ind << 8); 44 } 45 46 unsigned long key_to_hw_index(u32 key) 47 { 48 return (key << 24) | (key >> 8); 49 } 50 EXPORT_SYMBOL_GPL(key_to_hw_index); 51 52 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev, 53 struct hns_roce_cmd_mailbox *mailbox, 54 unsigned long mpt_index) 55 { 56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, 57 HNS_ROCE_CMD_SW2HW_MPT, 58 HNS_ROCE_CMD_TIMEOUT_MSECS); 59 } 60 61 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, 62 struct hns_roce_cmd_mailbox *mailbox, 63 unsigned long mpt_index) 64 { 65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, 66 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT, 67 HNS_ROCE_CMD_TIMEOUT_MSECS); 68 } 69 EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt); 70 71 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order, 72 unsigned long *seg) 73 { 74 int o; 75 u32 m; 76 77 spin_lock(&buddy->lock); 78 79 for (o = order; o <= buddy->max_order; ++o) { 80 if (buddy->num_free[o]) { 81 m = 1 << (buddy->max_order - o); 82 *seg = find_first_bit(buddy->bits[o], m); 83 if (*seg < m) 84 goto found; 85 } 86 } 87 spin_unlock(&buddy->lock); 88 return -1; 89 90 found: 91 clear_bit(*seg, buddy->bits[o]); 92 --buddy->num_free[o]; 93 94 while (o > order) { 95 --o; 96 *seg <<= 1; 97 set_bit(*seg ^ 1, buddy->bits[o]); 98 ++buddy->num_free[o]; 99 } 100 101 spin_unlock(&buddy->lock); 102 103 *seg <<= order; 104 return 0; 105 } 106 107 static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg, 108 int order) 109 { 110 seg >>= order; 111 112 spin_lock(&buddy->lock); 113 114 while (test_bit(seg ^ 1, buddy->bits[order])) { 115 clear_bit(seg ^ 1, buddy->bits[order]); 116 --buddy->num_free[order]; 117 seg >>= 1; 118 ++order; 119 } 120 121 set_bit(seg, buddy->bits[order]); 122 ++buddy->num_free[order]; 123 124 spin_unlock(&buddy->lock); 125 } 126 127 static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order) 128 { 129 int i, s; 130 131 buddy->max_order = max_order; 132 spin_lock_init(&buddy->lock); 133 buddy->bits = kcalloc(buddy->max_order + 1, 134 sizeof(*buddy->bits), 135 GFP_KERNEL); 136 buddy->num_free = kcalloc(buddy->max_order + 1, 137 sizeof(*buddy->num_free), 138 GFP_KERNEL); 139 if (!buddy->bits || !buddy->num_free) 140 goto err_out; 141 142 for (i = 0; i <= buddy->max_order; ++i) { 143 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 144 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL | 145 __GFP_NOWARN); 146 if (!buddy->bits[i]) { 147 buddy->bits[i] = vzalloc(array_size(s, sizeof(long))); 148 if (!buddy->bits[i]) 149 goto err_out_free; 150 } 151 } 152 153 set_bit(0, buddy->bits[buddy->max_order]); 154 buddy->num_free[buddy->max_order] = 1; 155 156 return 0; 157 158 err_out_free: 159 for (i = 0; i <= buddy->max_order; ++i) 160 kvfree(buddy->bits[i]); 161 162 err_out: 163 kfree(buddy->bits); 164 kfree(buddy->num_free); 165 return -ENOMEM; 166 } 167 168 static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy) 169 { 170 int i; 171 172 for (i = 0; i <= buddy->max_order; ++i) 173 kvfree(buddy->bits[i]); 174 175 kfree(buddy->bits); 176 kfree(buddy->num_free); 177 } 178 179 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, 180 unsigned long *seg, u32 mtt_type) 181 { 182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 183 struct hns_roce_hem_table *table; 184 struct hns_roce_buddy *buddy; 185 int ret; 186 187 switch (mtt_type) { 188 case MTT_TYPE_WQE: 189 buddy = &mr_table->mtt_buddy; 190 table = &mr_table->mtt_table; 191 break; 192 case MTT_TYPE_CQE: 193 buddy = &mr_table->mtt_cqe_buddy; 194 table = &mr_table->mtt_cqe_table; 195 break; 196 case MTT_TYPE_SRQWQE: 197 buddy = &mr_table->mtt_srqwqe_buddy; 198 table = &mr_table->mtt_srqwqe_table; 199 break; 200 case MTT_TYPE_IDX: 201 buddy = &mr_table->mtt_idx_buddy; 202 table = &mr_table->mtt_idx_table; 203 break; 204 default: 205 dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n", 206 mtt_type); 207 return -EINVAL; 208 } 209 210 ret = hns_roce_buddy_alloc(buddy, order, seg); 211 if (ret == -1) 212 return -1; 213 214 if (hns_roce_table_get_range(hr_dev, table, *seg, 215 *seg + (1 << order) - 1)) { 216 hns_roce_buddy_free(buddy, *seg, order); 217 return -1; 218 } 219 220 return 0; 221 } 222 223 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 224 struct hns_roce_mtt *mtt) 225 { 226 int ret; 227 int i; 228 229 /* Page num is zero, correspond to DMA memory register */ 230 if (!npages) { 231 mtt->order = -1; 232 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT; 233 return 0; 234 } 235 236 /* Note: if page_shift is zero, FAST memory register */ 237 mtt->page_shift = page_shift; 238 239 /* Compute MTT entry necessary */ 240 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; 241 i <<= 1) 242 ++mtt->order; 243 244 /* Allocate MTT entry */ 245 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, 246 mtt->mtt_type); 247 if (ret == -1) 248 return -ENOMEM; 249 250 return 0; 251 } 252 253 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) 254 { 255 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 256 257 if (mtt->order < 0) 258 return; 259 260 switch (mtt->mtt_type) { 261 case MTT_TYPE_WQE: 262 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, 263 mtt->order); 264 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, 265 mtt->first_seg, 266 mtt->first_seg + (1 << mtt->order) - 1); 267 break; 268 case MTT_TYPE_CQE: 269 hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg, 270 mtt->order); 271 hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table, 272 mtt->first_seg, 273 mtt->first_seg + (1 << mtt->order) - 1); 274 break; 275 case MTT_TYPE_SRQWQE: 276 hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg, 277 mtt->order); 278 hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table, 279 mtt->first_seg, 280 mtt->first_seg + (1 << mtt->order) - 1); 281 break; 282 case MTT_TYPE_IDX: 283 hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg, 284 mtt->order); 285 hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table, 286 mtt->first_seg, 287 mtt->first_seg + (1 << mtt->order) - 1); 288 break; 289 default: 290 dev_err(hr_dev->dev, 291 "Unsupport mtt type %d, clean mtt failed\n", 292 mtt->mtt_type); 293 break; 294 } 295 } 296 EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); 297 298 static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, 299 struct hns_roce_mr *mr, int err_loop_index, 300 int loop_i, int loop_j) 301 { 302 struct device *dev = hr_dev->dev; 303 u32 mhop_num; 304 u32 pbl_bt_sz; 305 u64 bt_idx; 306 int i, j; 307 308 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 309 mhop_num = hr_dev->caps.pbl_hop_num; 310 311 i = loop_i; 312 if (mhop_num == 3 && err_loop_index == 2) { 313 for (; i >= 0; i--) { 314 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 315 mr->pbl_l1_dma_addr[i]); 316 317 for (j = 0; j < pbl_bt_sz / 8; j++) { 318 if (i == loop_i && j >= loop_j) 319 break; 320 321 bt_idx = i * pbl_bt_sz / 8 + j; 322 dma_free_coherent(dev, pbl_bt_sz, 323 mr->pbl_bt_l2[bt_idx], 324 mr->pbl_l2_dma_addr[bt_idx]); 325 } 326 } 327 } else if (mhop_num == 3 && err_loop_index == 1) { 328 for (i -= 1; i >= 0; i--) { 329 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 330 mr->pbl_l1_dma_addr[i]); 331 332 for (j = 0; j < pbl_bt_sz / 8; j++) { 333 bt_idx = i * pbl_bt_sz / 8 + j; 334 dma_free_coherent(dev, pbl_bt_sz, 335 mr->pbl_bt_l2[bt_idx], 336 mr->pbl_l2_dma_addr[bt_idx]); 337 } 338 } 339 } else if (mhop_num == 2 && err_loop_index == 1) { 340 for (i -= 1; i >= 0; i--) 341 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 342 mr->pbl_l1_dma_addr[i]); 343 } else { 344 dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.", 345 mhop_num, err_loop_index); 346 return; 347 } 348 349 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr); 350 mr->pbl_bt_l0 = NULL; 351 mr->pbl_l0_dma_addr = 0; 352 } 353 354 /* PBL multi hop addressing */ 355 static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, 356 struct hns_roce_mr *mr) 357 { 358 struct device *dev = hr_dev->dev; 359 int mr_alloc_done = 0; 360 int npages_allocated; 361 int i = 0, j = 0; 362 u32 pbl_bt_sz; 363 u32 mhop_num; 364 u64 pbl_last_bt_num; 365 u64 pbl_bt_cnt = 0; 366 u64 bt_idx; 367 u64 size; 368 369 mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); 370 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 371 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); 372 373 if (mhop_num == HNS_ROCE_HOP_NUM_0) 374 return 0; 375 376 /* hop_num = 1 */ 377 if (mhop_num == 1) { 378 if (npages > pbl_bt_sz / 8) { 379 dev_err(dev, "npages %d is larger than buf_pg_sz!", 380 npages); 381 return -EINVAL; 382 } 383 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 384 &(mr->pbl_dma_addr), 385 GFP_KERNEL); 386 if (!mr->pbl_buf) 387 return -ENOMEM; 388 389 mr->pbl_size = npages; 390 mr->pbl_ba = mr->pbl_dma_addr; 391 mr->pbl_hop_num = mhop_num; 392 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 393 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 394 return 0; 395 } 396 397 mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, 398 sizeof(*mr->pbl_l1_dma_addr), 399 GFP_KERNEL); 400 if (!mr->pbl_l1_dma_addr) 401 return -ENOMEM; 402 403 mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), 404 GFP_KERNEL); 405 if (!mr->pbl_bt_l1) 406 goto err_kcalloc_bt_l1; 407 408 if (mhop_num == 3) { 409 mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, 410 sizeof(*mr->pbl_l2_dma_addr), 411 GFP_KERNEL); 412 if (!mr->pbl_l2_dma_addr) 413 goto err_kcalloc_l2_dma; 414 415 mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, 416 sizeof(*mr->pbl_bt_l2), 417 GFP_KERNEL); 418 if (!mr->pbl_bt_l2) 419 goto err_kcalloc_bt_l2; 420 } 421 422 /* alloc L0 BT */ 423 mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, 424 &(mr->pbl_l0_dma_addr), 425 GFP_KERNEL); 426 if (!mr->pbl_bt_l0) 427 goto err_dma_alloc_l0; 428 429 if (mhop_num == 2) { 430 /* alloc L1 BT */ 431 for (i = 0; i < pbl_bt_sz / 8; i++) { 432 if (pbl_bt_cnt + 1 < pbl_last_bt_num) { 433 size = pbl_bt_sz; 434 } else { 435 npages_allocated = i * (pbl_bt_sz / 8); 436 size = (npages - npages_allocated) * 8; 437 } 438 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, 439 &(mr->pbl_l1_dma_addr[i]), 440 GFP_KERNEL); 441 if (!mr->pbl_bt_l1[i]) { 442 hns_roce_loop_free(hr_dev, mr, 1, i, 0); 443 goto err_dma_alloc_l0; 444 } 445 446 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; 447 448 pbl_bt_cnt++; 449 if (pbl_bt_cnt >= pbl_last_bt_num) 450 break; 451 } 452 } else if (mhop_num == 3) { 453 /* alloc L1, L2 BT */ 454 for (i = 0; i < pbl_bt_sz / 8; i++) { 455 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, 456 &(mr->pbl_l1_dma_addr[i]), 457 GFP_KERNEL); 458 if (!mr->pbl_bt_l1[i]) { 459 hns_roce_loop_free(hr_dev, mr, 1, i, 0); 460 goto err_dma_alloc_l0; 461 } 462 463 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; 464 465 for (j = 0; j < pbl_bt_sz / 8; j++) { 466 bt_idx = i * pbl_bt_sz / 8 + j; 467 468 if (pbl_bt_cnt + 1 < pbl_last_bt_num) { 469 size = pbl_bt_sz; 470 } else { 471 npages_allocated = bt_idx * 472 (pbl_bt_sz / 8); 473 size = (npages - npages_allocated) * 8; 474 } 475 mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( 476 dev, size, 477 &(mr->pbl_l2_dma_addr[bt_idx]), 478 GFP_KERNEL); 479 if (!mr->pbl_bt_l2[bt_idx]) { 480 hns_roce_loop_free(hr_dev, mr, 2, i, j); 481 goto err_dma_alloc_l0; 482 } 483 484 *(mr->pbl_bt_l1[i] + j) = 485 mr->pbl_l2_dma_addr[bt_idx]; 486 487 pbl_bt_cnt++; 488 if (pbl_bt_cnt >= pbl_last_bt_num) { 489 mr_alloc_done = 1; 490 break; 491 } 492 } 493 494 if (mr_alloc_done) 495 break; 496 } 497 } 498 499 mr->l0_chunk_last_num = i + 1; 500 if (mhop_num == 3) 501 mr->l1_chunk_last_num = j + 1; 502 503 mr->pbl_size = npages; 504 mr->pbl_ba = mr->pbl_l0_dma_addr; 505 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num; 506 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 507 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 508 509 return 0; 510 511 err_dma_alloc_l0: 512 kfree(mr->pbl_bt_l2); 513 mr->pbl_bt_l2 = NULL; 514 515 err_kcalloc_bt_l2: 516 kfree(mr->pbl_l2_dma_addr); 517 mr->pbl_l2_dma_addr = NULL; 518 519 err_kcalloc_l2_dma: 520 kfree(mr->pbl_bt_l1); 521 mr->pbl_bt_l1 = NULL; 522 523 err_kcalloc_bt_l1: 524 kfree(mr->pbl_l1_dma_addr); 525 mr->pbl_l1_dma_addr = NULL; 526 527 return -ENOMEM; 528 } 529 530 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, 531 u64 size, u32 access, int npages, 532 struct hns_roce_mr *mr) 533 { 534 struct device *dev = hr_dev->dev; 535 unsigned long index = 0; 536 int ret = 0; 537 538 /* Allocate a key for mr from mr_table */ 539 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); 540 if (ret == -1) 541 return -ENOMEM; 542 543 mr->iova = iova; /* MR va starting addr */ 544 mr->size = size; /* MR addr range */ 545 mr->pd = pd; /* MR num */ 546 mr->access = access; /* MR access permit */ 547 mr->enabled = 0; /* MR active status */ 548 mr->key = hw_index_to_key(index); /* MR key */ 549 550 if (size == ~0ull) { 551 mr->pbl_buf = NULL; 552 mr->pbl_dma_addr = 0; 553 /* PBL multi-hop addressing parameters */ 554 mr->pbl_bt_l2 = NULL; 555 mr->pbl_bt_l1 = NULL; 556 mr->pbl_bt_l0 = NULL; 557 mr->pbl_l2_dma_addr = NULL; 558 mr->pbl_l1_dma_addr = NULL; 559 mr->pbl_l0_dma_addr = 0; 560 } else { 561 if (!hr_dev->caps.pbl_hop_num) { 562 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 563 &(mr->pbl_dma_addr), 564 GFP_KERNEL); 565 if (!mr->pbl_buf) 566 return -ENOMEM; 567 } else { 568 ret = hns_roce_mhop_alloc(hr_dev, npages, mr); 569 } 570 } 571 572 return ret; 573 } 574 575 static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, 576 struct hns_roce_mr *mr) 577 { 578 struct device *dev = hr_dev->dev; 579 int npages_allocated; 580 int npages; 581 int i, j; 582 u32 pbl_bt_sz; 583 u32 mhop_num; 584 u64 bt_idx; 585 586 npages = mr->pbl_size; 587 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 588 mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num; 589 590 if (mhop_num == HNS_ROCE_HOP_NUM_0) 591 return; 592 593 /* hop_num = 1 */ 594 if (mhop_num == 1) { 595 dma_free_coherent(dev, (unsigned int)(npages * 8), 596 mr->pbl_buf, mr->pbl_dma_addr); 597 return; 598 } 599 600 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, 601 mr->pbl_l0_dma_addr); 602 603 if (mhop_num == 2) { 604 for (i = 0; i < mr->l0_chunk_last_num; i++) { 605 if (i == mr->l0_chunk_last_num - 1) { 606 npages_allocated = i * (pbl_bt_sz / 8); 607 608 dma_free_coherent(dev, 609 (npages - npages_allocated) * 8, 610 mr->pbl_bt_l1[i], 611 mr->pbl_l1_dma_addr[i]); 612 613 break; 614 } 615 616 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 617 mr->pbl_l1_dma_addr[i]); 618 } 619 } else if (mhop_num == 3) { 620 for (i = 0; i < mr->l0_chunk_last_num; i++) { 621 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 622 mr->pbl_l1_dma_addr[i]); 623 624 for (j = 0; j < pbl_bt_sz / 8; j++) { 625 bt_idx = i * (pbl_bt_sz / 8) + j; 626 627 if ((i == mr->l0_chunk_last_num - 1) 628 && j == mr->l1_chunk_last_num - 1) { 629 npages_allocated = bt_idx * 630 (pbl_bt_sz / 8); 631 632 dma_free_coherent(dev, 633 (npages - npages_allocated) * 8, 634 mr->pbl_bt_l2[bt_idx], 635 mr->pbl_l2_dma_addr[bt_idx]); 636 637 break; 638 } 639 640 dma_free_coherent(dev, pbl_bt_sz, 641 mr->pbl_bt_l2[bt_idx], 642 mr->pbl_l2_dma_addr[bt_idx]); 643 } 644 } 645 } 646 647 kfree(mr->pbl_bt_l1); 648 kfree(mr->pbl_l1_dma_addr); 649 mr->pbl_bt_l1 = NULL; 650 mr->pbl_l1_dma_addr = NULL; 651 if (mhop_num == 3) { 652 kfree(mr->pbl_bt_l2); 653 kfree(mr->pbl_l2_dma_addr); 654 mr->pbl_bt_l2 = NULL; 655 mr->pbl_l2_dma_addr = NULL; 656 } 657 } 658 659 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, 660 struct hns_roce_mr *mr) 661 { 662 struct device *dev = hr_dev->dev; 663 int npages = 0; 664 int ret; 665 666 if (mr->enabled) { 667 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key) 668 & (hr_dev->caps.num_mtpts - 1)); 669 if (ret) 670 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); 671 } 672 673 if (mr->size != ~0ULL) { 674 if (mr->type == MR_TYPE_MR) 675 npages = ib_umem_page_count(mr->umem); 676 677 if (!hr_dev->caps.pbl_hop_num) 678 dma_free_coherent(dev, (unsigned int)(npages * 8), 679 mr->pbl_buf, mr->pbl_dma_addr); 680 else 681 hns_roce_mhop_free(hr_dev, mr); 682 } 683 684 if (mr->enabled) 685 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, 686 key_to_hw_index(mr->key)); 687 688 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 689 key_to_hw_index(mr->key), BITMAP_NO_RR); 690 } 691 692 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, 693 struct hns_roce_mr *mr) 694 { 695 int ret; 696 unsigned long mtpt_idx = key_to_hw_index(mr->key); 697 struct device *dev = hr_dev->dev; 698 struct hns_roce_cmd_mailbox *mailbox; 699 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 700 701 /* Prepare HEM entry memory */ 702 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); 703 if (ret) 704 return ret; 705 706 /* Allocate mailbox memory */ 707 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 708 if (IS_ERR(mailbox)) { 709 ret = PTR_ERR(mailbox); 710 goto err_table; 711 } 712 713 if (mr->type != MR_TYPE_FRMR) 714 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); 715 else 716 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); 717 if (ret) { 718 dev_err(dev, "Write mtpt fail!\n"); 719 goto err_page; 720 } 721 722 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, 723 mtpt_idx & (hr_dev->caps.num_mtpts - 1)); 724 if (ret) { 725 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); 726 goto err_page; 727 } 728 729 mr->enabled = 1; 730 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 731 732 return 0; 733 734 err_page: 735 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 736 737 err_table: 738 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); 739 return ret; 740 } 741 742 static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, 743 struct hns_roce_mtt *mtt, u32 start_index, 744 u32 npages, u64 *page_list) 745 { 746 struct hns_roce_hem_table *table; 747 dma_addr_t dma_handle; 748 __le64 *mtts; 749 u32 s = start_index * sizeof(u64); 750 u32 bt_page_size; 751 u32 i; 752 753 switch (mtt->mtt_type) { 754 case MTT_TYPE_WQE: 755 table = &hr_dev->mr_table.mtt_table; 756 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); 757 break; 758 case MTT_TYPE_CQE: 759 table = &hr_dev->mr_table.mtt_cqe_table; 760 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); 761 break; 762 case MTT_TYPE_SRQWQE: 763 table = &hr_dev->mr_table.mtt_srqwqe_table; 764 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); 765 break; 766 case MTT_TYPE_IDX: 767 table = &hr_dev->mr_table.mtt_idx_table; 768 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); 769 break; 770 default: 771 return -EINVAL; 772 } 773 774 /* All MTTs must fit in the same page */ 775 if (start_index / (bt_page_size / sizeof(u64)) != 776 (start_index + npages - 1) / (bt_page_size / sizeof(u64))) 777 return -EINVAL; 778 779 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) 780 return -EINVAL; 781 782 mtts = hns_roce_table_find(hr_dev, table, 783 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, 784 &dma_handle); 785 if (!mtts) 786 return -ENOMEM; 787 788 /* Save page addr, low 12 bits : 0 */ 789 for (i = 0; i < npages; ++i) { 790 if (!hr_dev->caps.mtt_hop_num) 791 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT); 792 else 793 mtts[i] = cpu_to_le64(page_list[i]); 794 } 795 796 return 0; 797 } 798 799 static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, 800 struct hns_roce_mtt *mtt, u32 start_index, 801 u32 npages, u64 *page_list) 802 { 803 int chunk; 804 int ret; 805 u32 bt_page_size; 806 807 if (mtt->order < 0) 808 return -EINVAL; 809 810 switch (mtt->mtt_type) { 811 case MTT_TYPE_WQE: 812 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); 813 break; 814 case MTT_TYPE_CQE: 815 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); 816 break; 817 case MTT_TYPE_SRQWQE: 818 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); 819 break; 820 case MTT_TYPE_IDX: 821 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); 822 break; 823 default: 824 dev_err(hr_dev->dev, 825 "Unsupport mtt type %d, write mtt failed\n", 826 mtt->mtt_type); 827 return -EINVAL; 828 } 829 830 while (npages > 0) { 831 chunk = min_t(int, bt_page_size / sizeof(u64), npages); 832 833 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk, 834 page_list); 835 if (ret) 836 return ret; 837 838 npages -= chunk; 839 start_index += chunk; 840 page_list += chunk; 841 } 842 843 return 0; 844 } 845 846 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, 847 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf) 848 { 849 u64 *page_list; 850 int ret; 851 u32 i; 852 853 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); 854 if (!page_list) 855 return -ENOMEM; 856 857 for (i = 0; i < buf->npages; ++i) { 858 if (buf->nbufs == 1) 859 page_list[i] = buf->direct.map + (i << buf->page_shift); 860 else 861 page_list[i] = buf->page_list[i].map; 862 863 } 864 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); 865 866 kfree(page_list); 867 868 return ret; 869 } 870 871 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) 872 { 873 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 874 int ret; 875 876 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, 877 hr_dev->caps.num_mtpts, 878 hr_dev->caps.num_mtpts - 1, 879 hr_dev->caps.reserved_mrws, 0); 880 if (ret) 881 return ret; 882 883 ret = hns_roce_buddy_init(&mr_table->mtt_buddy, 884 ilog2(hr_dev->caps.num_mtt_segs)); 885 if (ret) 886 goto err_buddy; 887 888 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) { 889 ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy, 890 ilog2(hr_dev->caps.num_cqe_segs)); 891 if (ret) 892 goto err_buddy_cqe; 893 } 894 895 if (hr_dev->caps.num_srqwqe_segs) { 896 ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy, 897 ilog2(hr_dev->caps.num_srqwqe_segs)); 898 if (ret) 899 goto err_buddy_srqwqe; 900 } 901 902 if (hr_dev->caps.num_idx_segs) { 903 ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy, 904 ilog2(hr_dev->caps.num_idx_segs)); 905 if (ret) 906 goto err_buddy_idx; 907 } 908 909 return 0; 910 911 err_buddy_idx: 912 if (hr_dev->caps.num_srqwqe_segs) 913 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); 914 915 err_buddy_srqwqe: 916 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) 917 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); 918 919 err_buddy_cqe: 920 hns_roce_buddy_cleanup(&mr_table->mtt_buddy); 921 922 err_buddy: 923 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); 924 return ret; 925 } 926 927 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) 928 { 929 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 930 931 if (hr_dev->caps.num_idx_segs) 932 hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy); 933 if (hr_dev->caps.num_srqwqe_segs) 934 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); 935 hns_roce_buddy_cleanup(&mr_table->mtt_buddy); 936 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) 937 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); 938 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); 939 } 940 941 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) 942 { 943 struct hns_roce_mr *mr; 944 int ret; 945 946 mr = kmalloc(sizeof(*mr), GFP_KERNEL); 947 if (mr == NULL) 948 return ERR_PTR(-ENOMEM); 949 950 mr->type = MR_TYPE_DMA; 951 952 /* Allocate memory region key */ 953 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, 954 ~0ULL, acc, 0, mr); 955 if (ret) 956 goto err_free; 957 958 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr); 959 if (ret) 960 goto err_mr; 961 962 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 963 mr->umem = NULL; 964 965 return &mr->ibmr; 966 967 err_mr: 968 hns_roce_mr_free(to_hr_dev(pd->device), mr); 969 970 err_free: 971 kfree(mr); 972 return ERR_PTR(ret); 973 } 974 975 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, 976 struct hns_roce_mtt *mtt, struct ib_umem *umem) 977 { 978 struct device *dev = hr_dev->dev; 979 struct scatterlist *sg; 980 unsigned int order; 981 int i, k, entry; 982 int npage = 0; 983 int ret = 0; 984 int len; 985 u64 page_addr; 986 u64 *pages; 987 u32 bt_page_size; 988 u32 n; 989 990 switch (mtt->mtt_type) { 991 case MTT_TYPE_WQE: 992 order = hr_dev->caps.mtt_ba_pg_sz; 993 break; 994 case MTT_TYPE_CQE: 995 order = hr_dev->caps.cqe_ba_pg_sz; 996 break; 997 case MTT_TYPE_SRQWQE: 998 order = hr_dev->caps.srqwqe_ba_pg_sz; 999 break; 1000 case MTT_TYPE_IDX: 1001 order = hr_dev->caps.idx_ba_pg_sz; 1002 break; 1003 default: 1004 dev_err(dev, "Unsupport mtt type %d, write mtt failed\n", 1005 mtt->mtt_type); 1006 return -EINVAL; 1007 } 1008 1009 bt_page_size = 1 << (order + PAGE_SHIFT); 1010 1011 pages = (u64 *) __get_free_pages(GFP_KERNEL, order); 1012 if (!pages) 1013 return -ENOMEM; 1014 1015 i = n = 0; 1016 1017 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 1018 len = sg_dma_len(sg) >> PAGE_SHIFT; 1019 for (k = 0; k < len; ++k) { 1020 page_addr = 1021 sg_dma_address(sg) + (k << umem->page_shift); 1022 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) { 1023 if (page_addr & ((1 << mtt->page_shift) - 1)) { 1024 dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n", 1025 page_addr, mtt->page_shift); 1026 ret = -EINVAL; 1027 goto out; 1028 } 1029 pages[i++] = page_addr; 1030 } 1031 npage++; 1032 if (i == bt_page_size / sizeof(u64)) { 1033 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, 1034 pages); 1035 if (ret) 1036 goto out; 1037 n += i; 1038 i = 0; 1039 } 1040 } 1041 } 1042 1043 if (i) 1044 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); 1045 1046 out: 1047 free_pages((unsigned long) pages, order); 1048 return ret; 1049 } 1050 1051 static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, 1052 struct hns_roce_mr *mr, 1053 struct ib_umem *umem) 1054 { 1055 struct scatterlist *sg; 1056 int i = 0, j = 0, k; 1057 int entry; 1058 int len; 1059 u64 page_addr; 1060 u32 pbl_bt_sz; 1061 1062 if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0) 1063 return 0; 1064 1065 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 1066 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 1067 len = sg_dma_len(sg) >> PAGE_SHIFT; 1068 for (k = 0; k < len; ++k) { 1069 page_addr = sg_dma_address(sg) + 1070 (k << umem->page_shift); 1071 1072 if (!hr_dev->caps.pbl_hop_num) { 1073 mr->pbl_buf[i++] = page_addr >> 12; 1074 } else if (hr_dev->caps.pbl_hop_num == 1) { 1075 mr->pbl_buf[i++] = page_addr; 1076 } else { 1077 if (hr_dev->caps.pbl_hop_num == 2) 1078 mr->pbl_bt_l1[i][j] = page_addr; 1079 else if (hr_dev->caps.pbl_hop_num == 3) 1080 mr->pbl_bt_l2[i][j] = page_addr; 1081 1082 j++; 1083 if (j >= (pbl_bt_sz / 8)) { 1084 i++; 1085 j = 0; 1086 } 1087 } 1088 } 1089 } 1090 1091 /* Memory barrier */ 1092 mb(); 1093 1094 return 0; 1095 } 1096 1097 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1098 u64 virt_addr, int access_flags, 1099 struct ib_udata *udata) 1100 { 1101 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1102 struct device *dev = hr_dev->dev; 1103 struct hns_roce_mr *mr; 1104 int bt_size; 1105 int ret; 1106 int n; 1107 int i; 1108 1109 mr = kmalloc(sizeof(*mr), GFP_KERNEL); 1110 if (!mr) 1111 return ERR_PTR(-ENOMEM); 1112 1113 mr->umem = ib_umem_get(pd->uobject->context, start, length, 1114 access_flags, 0); 1115 if (IS_ERR(mr->umem)) { 1116 ret = PTR_ERR(mr->umem); 1117 goto err_free; 1118 } 1119 1120 n = ib_umem_page_count(mr->umem); 1121 1122 if (!hr_dev->caps.pbl_hop_num) { 1123 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { 1124 dev_err(dev, 1125 " MR len %lld err. MR is limited to 4G at most!\n", 1126 length); 1127 ret = -EINVAL; 1128 goto err_umem; 1129 } 1130 } else { 1131 u64 pbl_size = 1; 1132 1133 bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; 1134 for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) 1135 pbl_size *= bt_size; 1136 if (n > pbl_size) { 1137 dev_err(dev, 1138 " MR len %lld err. MR page num is limited to %lld!\n", 1139 length, pbl_size); 1140 ret = -EINVAL; 1141 goto err_umem; 1142 } 1143 } 1144 1145 mr->type = MR_TYPE_MR; 1146 1147 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, 1148 access_flags, n, mr); 1149 if (ret) 1150 goto err_umem; 1151 1152 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); 1153 if (ret) 1154 goto err_mr; 1155 1156 ret = hns_roce_mr_enable(hr_dev, mr); 1157 if (ret) 1158 goto err_mr; 1159 1160 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 1161 1162 return &mr->ibmr; 1163 1164 err_mr: 1165 hns_roce_mr_free(hr_dev, mr); 1166 1167 err_umem: 1168 ib_umem_release(mr->umem); 1169 1170 err_free: 1171 kfree(mr); 1172 return ERR_PTR(ret); 1173 } 1174 1175 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, 1176 u64 virt_addr, int mr_access_flags, struct ib_pd *pd, 1177 struct ib_udata *udata) 1178 { 1179 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); 1180 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1181 struct hns_roce_cmd_mailbox *mailbox; 1182 struct device *dev = hr_dev->dev; 1183 unsigned long mtpt_idx; 1184 u32 pdn = 0; 1185 int npages; 1186 int ret; 1187 1188 if (!mr->enabled) 1189 return -EINVAL; 1190 1191 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1192 if (IS_ERR(mailbox)) 1193 return PTR_ERR(mailbox); 1194 1195 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1); 1196 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0, 1197 HNS_ROCE_CMD_QUERY_MPT, 1198 HNS_ROCE_CMD_TIMEOUT_MSECS); 1199 if (ret) 1200 goto free_cmd_mbox; 1201 1202 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx); 1203 if (ret) 1204 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); 1205 1206 mr->enabled = 0; 1207 1208 if (flags & IB_MR_REREG_PD) 1209 pdn = to_hr_pd(pd)->pdn; 1210 1211 if (flags & IB_MR_REREG_TRANS) { 1212 if (mr->size != ~0ULL) { 1213 npages = ib_umem_page_count(mr->umem); 1214 1215 if (hr_dev->caps.pbl_hop_num) 1216 hns_roce_mhop_free(hr_dev, mr); 1217 else 1218 dma_free_coherent(dev, npages * 8, mr->pbl_buf, 1219 mr->pbl_dma_addr); 1220 } 1221 ib_umem_release(mr->umem); 1222 1223 mr->umem = ib_umem_get(ibmr->uobject->context, start, length, 1224 mr_access_flags, 0); 1225 if (IS_ERR(mr->umem)) { 1226 ret = PTR_ERR(mr->umem); 1227 mr->umem = NULL; 1228 goto free_cmd_mbox; 1229 } 1230 npages = ib_umem_page_count(mr->umem); 1231 1232 if (hr_dev->caps.pbl_hop_num) { 1233 ret = hns_roce_mhop_alloc(hr_dev, npages, mr); 1234 if (ret) 1235 goto release_umem; 1236 } else { 1237 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 1238 &(mr->pbl_dma_addr), 1239 GFP_KERNEL); 1240 if (!mr->pbl_buf) { 1241 ret = -ENOMEM; 1242 goto release_umem; 1243 } 1244 } 1245 } 1246 1247 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, 1248 mr_access_flags, virt_addr, 1249 length, mailbox->buf); 1250 if (ret) { 1251 if (flags & IB_MR_REREG_TRANS) 1252 goto release_umem; 1253 else 1254 goto free_cmd_mbox; 1255 } 1256 1257 if (flags & IB_MR_REREG_TRANS) { 1258 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); 1259 if (ret) { 1260 if (mr->size != ~0ULL) { 1261 npages = ib_umem_page_count(mr->umem); 1262 1263 if (hr_dev->caps.pbl_hop_num) 1264 hns_roce_mhop_free(hr_dev, mr); 1265 else 1266 dma_free_coherent(dev, npages * 8, 1267 mr->pbl_buf, 1268 mr->pbl_dma_addr); 1269 } 1270 1271 goto release_umem; 1272 } 1273 } 1274 1275 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); 1276 if (ret) { 1277 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); 1278 goto release_umem; 1279 } 1280 1281 mr->enabled = 1; 1282 if (flags & IB_MR_REREG_ACCESS) 1283 mr->access = mr_access_flags; 1284 1285 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1286 1287 return 0; 1288 1289 release_umem: 1290 ib_umem_release(mr->umem); 1291 1292 free_cmd_mbox: 1293 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1294 1295 return ret; 1296 } 1297 1298 int hns_roce_dereg_mr(struct ib_mr *ibmr) 1299 { 1300 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); 1301 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1302 int ret = 0; 1303 1304 if (hr_dev->hw->dereg_mr) { 1305 ret = hr_dev->hw->dereg_mr(hr_dev, mr); 1306 } else { 1307 hns_roce_mr_free(hr_dev, mr); 1308 1309 if (mr->umem) 1310 ib_umem_release(mr->umem); 1311 1312 kfree(mr); 1313 } 1314 1315 return ret; 1316 } 1317 1318 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1319 u32 max_num_sg) 1320 { 1321 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1322 struct device *dev = hr_dev->dev; 1323 struct hns_roce_mr *mr; 1324 u64 length; 1325 u32 page_size; 1326 int ret; 1327 1328 page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT); 1329 length = max_num_sg * page_size; 1330 1331 if (mr_type != IB_MR_TYPE_MEM_REG) 1332 return ERR_PTR(-EINVAL); 1333 1334 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) { 1335 dev_err(dev, "max_num_sg larger than %d\n", 1336 HNS_ROCE_FRMR_MAX_PA); 1337 return ERR_PTR(-EINVAL); 1338 } 1339 1340 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1341 if (!mr) 1342 return ERR_PTR(-ENOMEM); 1343 1344 mr->type = MR_TYPE_FRMR; 1345 1346 /* Allocate memory region key */ 1347 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length, 1348 0, max_num_sg, mr); 1349 if (ret) 1350 goto err_free; 1351 1352 ret = hns_roce_mr_enable(hr_dev, mr); 1353 if (ret) 1354 goto err_mr; 1355 1356 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 1357 mr->umem = NULL; 1358 1359 return &mr->ibmr; 1360 1361 err_mr: 1362 hns_roce_mr_free(to_hr_dev(pd->device), mr); 1363 1364 err_free: 1365 kfree(mr); 1366 return ERR_PTR(ret); 1367 } 1368 1369 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) 1370 { 1371 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1372 1373 mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); 1374 1375 return 0; 1376 } 1377 1378 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1379 unsigned int *sg_offset) 1380 { 1381 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1382 1383 mr->npages = 0; 1384 1385 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); 1386 } 1387 1388 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, 1389 struct hns_roce_mw *mw) 1390 { 1391 struct device *dev = hr_dev->dev; 1392 int ret; 1393 1394 if (mw->enabled) { 1395 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey) 1396 & (hr_dev->caps.num_mtpts - 1)); 1397 if (ret) 1398 dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret); 1399 1400 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, 1401 key_to_hw_index(mw->rkey)); 1402 } 1403 1404 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 1405 key_to_hw_index(mw->rkey), BITMAP_NO_RR); 1406 } 1407 1408 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, 1409 struct hns_roce_mw *mw) 1410 { 1411 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 1412 struct hns_roce_cmd_mailbox *mailbox; 1413 struct device *dev = hr_dev->dev; 1414 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); 1415 int ret; 1416 1417 /* prepare HEM entry memory */ 1418 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); 1419 if (ret) 1420 return ret; 1421 1422 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1423 if (IS_ERR(mailbox)) { 1424 ret = PTR_ERR(mailbox); 1425 goto err_table; 1426 } 1427 1428 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); 1429 if (ret) { 1430 dev_err(dev, "MW write mtpt fail!\n"); 1431 goto err_page; 1432 } 1433 1434 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, 1435 mtpt_idx & (hr_dev->caps.num_mtpts - 1)); 1436 if (ret) { 1437 dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret); 1438 goto err_page; 1439 } 1440 1441 mw->enabled = 1; 1442 1443 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1444 1445 return 0; 1446 1447 err_page: 1448 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1449 1450 err_table: 1451 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); 1452 1453 return ret; 1454 } 1455 1456 struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 1457 struct ib_udata *udata) 1458 { 1459 struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device); 1460 struct hns_roce_mw *mw; 1461 unsigned long index = 0; 1462 int ret; 1463 1464 mw = kmalloc(sizeof(*mw), GFP_KERNEL); 1465 if (!mw) 1466 return ERR_PTR(-ENOMEM); 1467 1468 /* Allocate a key for mw from bitmap */ 1469 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); 1470 if (ret) 1471 goto err_bitmap; 1472 1473 mw->rkey = hw_index_to_key(index); 1474 1475 mw->ibmw.rkey = mw->rkey; 1476 mw->ibmw.type = type; 1477 mw->pdn = to_hr_pd(ib_pd)->pdn; 1478 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num; 1479 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 1480 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 1481 1482 ret = hns_roce_mw_enable(hr_dev, mw); 1483 if (ret) 1484 goto err_mw; 1485 1486 return &mw->ibmw; 1487 1488 err_mw: 1489 hns_roce_mw_free(hr_dev, mw); 1490 1491 err_bitmap: 1492 kfree(mw); 1493 1494 return ERR_PTR(ret); 1495 } 1496 1497 int hns_roce_dealloc_mw(struct ib_mw *ibmw) 1498 { 1499 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); 1500 struct hns_roce_mw *mw = to_hr_mw(ibmw); 1501 1502 hns_roce_mw_free(hr_dev, mw); 1503 kfree(mw); 1504 1505 return 0; 1506 } 1507