1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/platform_device.h> 35 #include <linux/vmalloc.h> 36 #include <rdma/ib_umem.h> 37 #include "hns_roce_device.h" 38 #include "hns_roce_cmd.h" 39 #include "hns_roce_hem.h" 40 41 static u32 hw_index_to_key(unsigned long ind) 42 { 43 return (u32)(ind >> 24) | (ind << 8); 44 } 45 46 unsigned long key_to_hw_index(u32 key) 47 { 48 return (key << 24) | (key >> 8); 49 } 50 EXPORT_SYMBOL_GPL(key_to_hw_index); 51 52 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev, 53 struct hns_roce_cmd_mailbox *mailbox, 54 unsigned long mpt_index) 55 { 56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, 57 HNS_ROCE_CMD_SW2HW_MPT, 58 HNS_ROCE_CMD_TIMEOUT_MSECS); 59 } 60 61 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev, 62 struct hns_roce_cmd_mailbox *mailbox, 63 unsigned long mpt_index) 64 { 65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, 66 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT, 67 HNS_ROCE_CMD_TIMEOUT_MSECS); 68 } 69 EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt); 70 71 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order, 72 unsigned long *seg) 73 { 74 int o; 75 u32 m; 76 77 spin_lock(&buddy->lock); 78 79 for (o = order; o <= buddy->max_order; ++o) { 80 if (buddy->num_free[o]) { 81 m = 1 << (buddy->max_order - o); 82 *seg = find_first_bit(buddy->bits[o], m); 83 if (*seg < m) 84 goto found; 85 } 86 } 87 spin_unlock(&buddy->lock); 88 return -1; 89 90 found: 91 clear_bit(*seg, buddy->bits[o]); 92 --buddy->num_free[o]; 93 94 while (o > order) { 95 --o; 96 *seg <<= 1; 97 set_bit(*seg ^ 1, buddy->bits[o]); 98 ++buddy->num_free[o]; 99 } 100 101 spin_unlock(&buddy->lock); 102 103 *seg <<= order; 104 return 0; 105 } 106 107 static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg, 108 int order) 109 { 110 seg >>= order; 111 112 spin_lock(&buddy->lock); 113 114 while (test_bit(seg ^ 1, buddy->bits[order])) { 115 clear_bit(seg ^ 1, buddy->bits[order]); 116 --buddy->num_free[order]; 117 seg >>= 1; 118 ++order; 119 } 120 121 set_bit(seg, buddy->bits[order]); 122 ++buddy->num_free[order]; 123 124 spin_unlock(&buddy->lock); 125 } 126 127 static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order) 128 { 129 int i, s; 130 131 buddy->max_order = max_order; 132 spin_lock_init(&buddy->lock); 133 buddy->bits = kcalloc(buddy->max_order + 1, 134 sizeof(*buddy->bits), 135 GFP_KERNEL); 136 buddy->num_free = kcalloc(buddy->max_order + 1, 137 sizeof(*buddy->num_free), 138 GFP_KERNEL); 139 if (!buddy->bits || !buddy->num_free) 140 goto err_out; 141 142 for (i = 0; i <= buddy->max_order; ++i) { 143 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 144 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL | 145 __GFP_NOWARN); 146 if (!buddy->bits[i]) { 147 buddy->bits[i] = vzalloc(array_size(s, sizeof(long))); 148 if (!buddy->bits[i]) 149 goto err_out_free; 150 } 151 } 152 153 set_bit(0, buddy->bits[buddy->max_order]); 154 buddy->num_free[buddy->max_order] = 1; 155 156 return 0; 157 158 err_out_free: 159 for (i = 0; i <= buddy->max_order; ++i) 160 kvfree(buddy->bits[i]); 161 162 err_out: 163 kfree(buddy->bits); 164 kfree(buddy->num_free); 165 return -ENOMEM; 166 } 167 168 static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy) 169 { 170 int i; 171 172 for (i = 0; i <= buddy->max_order; ++i) 173 kvfree(buddy->bits[i]); 174 175 kfree(buddy->bits); 176 kfree(buddy->num_free); 177 } 178 179 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, 180 unsigned long *seg, u32 mtt_type) 181 { 182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 183 struct hns_roce_hem_table *table; 184 struct hns_roce_buddy *buddy; 185 int ret; 186 187 switch (mtt_type) { 188 case MTT_TYPE_WQE: 189 buddy = &mr_table->mtt_buddy; 190 table = &mr_table->mtt_table; 191 break; 192 case MTT_TYPE_CQE: 193 buddy = &mr_table->mtt_cqe_buddy; 194 table = &mr_table->mtt_cqe_table; 195 break; 196 case MTT_TYPE_SRQWQE: 197 buddy = &mr_table->mtt_srqwqe_buddy; 198 table = &mr_table->mtt_srqwqe_table; 199 break; 200 case MTT_TYPE_IDX: 201 buddy = &mr_table->mtt_idx_buddy; 202 table = &mr_table->mtt_idx_table; 203 break; 204 default: 205 dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n", 206 mtt_type); 207 return -EINVAL; 208 } 209 210 ret = hns_roce_buddy_alloc(buddy, order, seg); 211 if (ret == -1) 212 return -1; 213 214 if (hns_roce_table_get_range(hr_dev, table, *seg, 215 *seg + (1 << order) - 1)) { 216 hns_roce_buddy_free(buddy, *seg, order); 217 return -1; 218 } 219 220 return 0; 221 } 222 223 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, 224 struct hns_roce_mtt *mtt) 225 { 226 int ret; 227 int i; 228 229 /* Page num is zero, correspond to DMA memory register */ 230 if (!npages) { 231 mtt->order = -1; 232 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT; 233 return 0; 234 } 235 236 /* Note: if page_shift is zero, FAST memory register */ 237 mtt->page_shift = page_shift; 238 239 /* Compute MTT entry necessary */ 240 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; 241 i <<= 1) 242 ++mtt->order; 243 244 /* Allocate MTT entry */ 245 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, 246 mtt->mtt_type); 247 if (ret == -1) 248 return -ENOMEM; 249 250 return 0; 251 } 252 253 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) 254 { 255 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 256 257 if (mtt->order < 0) 258 return; 259 260 switch (mtt->mtt_type) { 261 case MTT_TYPE_WQE: 262 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, 263 mtt->order); 264 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, 265 mtt->first_seg, 266 mtt->first_seg + (1 << mtt->order) - 1); 267 break; 268 case MTT_TYPE_CQE: 269 hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg, 270 mtt->order); 271 hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table, 272 mtt->first_seg, 273 mtt->first_seg + (1 << mtt->order) - 1); 274 break; 275 case MTT_TYPE_SRQWQE: 276 hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg, 277 mtt->order); 278 hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table, 279 mtt->first_seg, 280 mtt->first_seg + (1 << mtt->order) - 1); 281 break; 282 case MTT_TYPE_IDX: 283 hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg, 284 mtt->order); 285 hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table, 286 mtt->first_seg, 287 mtt->first_seg + (1 << mtt->order) - 1); 288 break; 289 default: 290 dev_err(hr_dev->dev, 291 "Unsupport mtt type %d, clean mtt failed\n", 292 mtt->mtt_type); 293 break; 294 } 295 } 296 EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); 297 298 static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, 299 struct hns_roce_mr *mr, int err_loop_index, 300 int loop_i, int loop_j) 301 { 302 struct device *dev = hr_dev->dev; 303 u32 mhop_num; 304 u32 pbl_bt_sz; 305 u64 bt_idx; 306 int i, j; 307 308 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 309 mhop_num = hr_dev->caps.pbl_hop_num; 310 311 i = loop_i; 312 if (mhop_num == 3 && err_loop_index == 2) { 313 for (; i >= 0; i--) { 314 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 315 mr->pbl_l1_dma_addr[i]); 316 317 for (j = 0; j < pbl_bt_sz / 8; j++) { 318 if (i == loop_i && j >= loop_j) 319 break; 320 321 bt_idx = i * pbl_bt_sz / 8 + j; 322 dma_free_coherent(dev, pbl_bt_sz, 323 mr->pbl_bt_l2[bt_idx], 324 mr->pbl_l2_dma_addr[bt_idx]); 325 } 326 } 327 } else if (mhop_num == 3 && err_loop_index == 1) { 328 for (i -= 1; i >= 0; i--) { 329 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 330 mr->pbl_l1_dma_addr[i]); 331 332 for (j = 0; j < pbl_bt_sz / 8; j++) { 333 bt_idx = i * pbl_bt_sz / 8 + j; 334 dma_free_coherent(dev, pbl_bt_sz, 335 mr->pbl_bt_l2[bt_idx], 336 mr->pbl_l2_dma_addr[bt_idx]); 337 } 338 } 339 } else if (mhop_num == 2 && err_loop_index == 1) { 340 for (i -= 1; i >= 0; i--) 341 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 342 mr->pbl_l1_dma_addr[i]); 343 } else { 344 dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.", 345 mhop_num, err_loop_index); 346 return; 347 } 348 349 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr); 350 mr->pbl_bt_l0 = NULL; 351 mr->pbl_l0_dma_addr = 0; 352 } 353 354 /* PBL multi hop addressing */ 355 static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, 356 struct hns_roce_mr *mr) 357 { 358 struct device *dev = hr_dev->dev; 359 int mr_alloc_done = 0; 360 int npages_allocated; 361 int i = 0, j = 0; 362 u32 pbl_bt_sz; 363 u32 mhop_num; 364 u64 pbl_last_bt_num; 365 u64 pbl_bt_cnt = 0; 366 u64 bt_idx; 367 u64 size; 368 369 mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); 370 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 371 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); 372 373 if (mhop_num == HNS_ROCE_HOP_NUM_0) 374 return 0; 375 376 /* hop_num = 1 */ 377 if (mhop_num == 1) { 378 if (npages > pbl_bt_sz / 8) { 379 dev_err(dev, "npages %d is larger than buf_pg_sz!", 380 npages); 381 return -EINVAL; 382 } 383 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 384 &(mr->pbl_dma_addr), 385 GFP_KERNEL); 386 if (!mr->pbl_buf) 387 return -ENOMEM; 388 389 mr->pbl_size = npages; 390 mr->pbl_ba = mr->pbl_dma_addr; 391 mr->pbl_hop_num = mhop_num; 392 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 393 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 394 return 0; 395 } 396 397 mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, 398 sizeof(*mr->pbl_l1_dma_addr), 399 GFP_KERNEL); 400 if (!mr->pbl_l1_dma_addr) 401 return -ENOMEM; 402 403 mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), 404 GFP_KERNEL); 405 if (!mr->pbl_bt_l1) 406 goto err_kcalloc_bt_l1; 407 408 if (mhop_num == 3) { 409 mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num, 410 sizeof(*mr->pbl_l2_dma_addr), 411 GFP_KERNEL); 412 if (!mr->pbl_l2_dma_addr) 413 goto err_kcalloc_l2_dma; 414 415 mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, 416 sizeof(*mr->pbl_bt_l2), 417 GFP_KERNEL); 418 if (!mr->pbl_bt_l2) 419 goto err_kcalloc_bt_l2; 420 } 421 422 /* alloc L0 BT */ 423 mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, 424 &(mr->pbl_l0_dma_addr), 425 GFP_KERNEL); 426 if (!mr->pbl_bt_l0) 427 goto err_dma_alloc_l0; 428 429 if (mhop_num == 2) { 430 /* alloc L1 BT */ 431 for (i = 0; i < pbl_bt_sz / 8; i++) { 432 if (pbl_bt_cnt + 1 < pbl_last_bt_num) { 433 size = pbl_bt_sz; 434 } else { 435 npages_allocated = i * (pbl_bt_sz / 8); 436 size = (npages - npages_allocated) * 8; 437 } 438 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size, 439 &(mr->pbl_l1_dma_addr[i]), 440 GFP_KERNEL); 441 if (!mr->pbl_bt_l1[i]) { 442 hns_roce_loop_free(hr_dev, mr, 1, i, 0); 443 goto err_dma_alloc_l0; 444 } 445 446 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; 447 448 pbl_bt_cnt++; 449 if (pbl_bt_cnt >= pbl_last_bt_num) 450 break; 451 } 452 } else if (mhop_num == 3) { 453 /* alloc L1, L2 BT */ 454 for (i = 0; i < pbl_bt_sz / 8; i++) { 455 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, 456 &(mr->pbl_l1_dma_addr[i]), 457 GFP_KERNEL); 458 if (!mr->pbl_bt_l1[i]) { 459 hns_roce_loop_free(hr_dev, mr, 1, i, 0); 460 goto err_dma_alloc_l0; 461 } 462 463 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; 464 465 for (j = 0; j < pbl_bt_sz / 8; j++) { 466 bt_idx = i * pbl_bt_sz / 8 + j; 467 468 if (pbl_bt_cnt + 1 < pbl_last_bt_num) { 469 size = pbl_bt_sz; 470 } else { 471 npages_allocated = bt_idx * 472 (pbl_bt_sz / 8); 473 size = (npages - npages_allocated) * 8; 474 } 475 mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent( 476 dev, size, 477 &(mr->pbl_l2_dma_addr[bt_idx]), 478 GFP_KERNEL); 479 if (!mr->pbl_bt_l2[bt_idx]) { 480 hns_roce_loop_free(hr_dev, mr, 2, i, j); 481 goto err_dma_alloc_l0; 482 } 483 484 *(mr->pbl_bt_l1[i] + j) = 485 mr->pbl_l2_dma_addr[bt_idx]; 486 487 pbl_bt_cnt++; 488 if (pbl_bt_cnt >= pbl_last_bt_num) { 489 mr_alloc_done = 1; 490 break; 491 } 492 } 493 494 if (mr_alloc_done) 495 break; 496 } 497 } 498 499 mr->l0_chunk_last_num = i + 1; 500 if (mhop_num == 3) 501 mr->l1_chunk_last_num = j + 1; 502 503 mr->pbl_size = npages; 504 mr->pbl_ba = mr->pbl_l0_dma_addr; 505 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num; 506 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 507 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 508 509 return 0; 510 511 err_dma_alloc_l0: 512 kfree(mr->pbl_bt_l2); 513 mr->pbl_bt_l2 = NULL; 514 515 err_kcalloc_bt_l2: 516 kfree(mr->pbl_l2_dma_addr); 517 mr->pbl_l2_dma_addr = NULL; 518 519 err_kcalloc_l2_dma: 520 kfree(mr->pbl_bt_l1); 521 mr->pbl_bt_l1 = NULL; 522 523 err_kcalloc_bt_l1: 524 kfree(mr->pbl_l1_dma_addr); 525 mr->pbl_l1_dma_addr = NULL; 526 527 return -ENOMEM; 528 } 529 530 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, 531 u64 size, u32 access, int npages, 532 struct hns_roce_mr *mr) 533 { 534 struct device *dev = hr_dev->dev; 535 unsigned long index = 0; 536 int ret = 0; 537 538 /* Allocate a key for mr from mr_table */ 539 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); 540 if (ret == -1) 541 return -ENOMEM; 542 543 mr->iova = iova; /* MR va starting addr */ 544 mr->size = size; /* MR addr range */ 545 mr->pd = pd; /* MR num */ 546 mr->access = access; /* MR access permit */ 547 mr->enabled = 0; /* MR active status */ 548 mr->key = hw_index_to_key(index); /* MR key */ 549 550 if (size == ~0ull) { 551 mr->pbl_buf = NULL; 552 mr->pbl_dma_addr = 0; 553 /* PBL multi-hop addressing parameters */ 554 mr->pbl_bt_l2 = NULL; 555 mr->pbl_bt_l1 = NULL; 556 mr->pbl_bt_l0 = NULL; 557 mr->pbl_l2_dma_addr = NULL; 558 mr->pbl_l1_dma_addr = NULL; 559 mr->pbl_l0_dma_addr = 0; 560 } else { 561 if (!hr_dev->caps.pbl_hop_num) { 562 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 563 &(mr->pbl_dma_addr), 564 GFP_KERNEL); 565 if (!mr->pbl_buf) 566 return -ENOMEM; 567 } else { 568 ret = hns_roce_mhop_alloc(hr_dev, npages, mr); 569 } 570 } 571 572 return ret; 573 } 574 575 static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, 576 struct hns_roce_mr *mr) 577 { 578 struct device *dev = hr_dev->dev; 579 int npages_allocated; 580 int npages; 581 int i, j; 582 u32 pbl_bt_sz; 583 u32 mhop_num; 584 u64 bt_idx; 585 586 npages = mr->pbl_size; 587 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 588 mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num; 589 590 if (mhop_num == HNS_ROCE_HOP_NUM_0) 591 return; 592 593 /* hop_num = 1 */ 594 if (mhop_num == 1) { 595 dma_free_coherent(dev, (unsigned int)(npages * 8), 596 mr->pbl_buf, mr->pbl_dma_addr); 597 return; 598 } 599 600 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, 601 mr->pbl_l0_dma_addr); 602 603 if (mhop_num == 2) { 604 for (i = 0; i < mr->l0_chunk_last_num; i++) { 605 if (i == mr->l0_chunk_last_num - 1) { 606 npages_allocated = i * (pbl_bt_sz / 8); 607 608 dma_free_coherent(dev, 609 (npages - npages_allocated) * 8, 610 mr->pbl_bt_l1[i], 611 mr->pbl_l1_dma_addr[i]); 612 613 break; 614 } 615 616 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 617 mr->pbl_l1_dma_addr[i]); 618 } 619 } else if (mhop_num == 3) { 620 for (i = 0; i < mr->l0_chunk_last_num; i++) { 621 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], 622 mr->pbl_l1_dma_addr[i]); 623 624 for (j = 0; j < pbl_bt_sz / 8; j++) { 625 bt_idx = i * (pbl_bt_sz / 8) + j; 626 627 if ((i == mr->l0_chunk_last_num - 1) 628 && j == mr->l1_chunk_last_num - 1) { 629 npages_allocated = bt_idx * 630 (pbl_bt_sz / 8); 631 632 dma_free_coherent(dev, 633 (npages - npages_allocated) * 8, 634 mr->pbl_bt_l2[bt_idx], 635 mr->pbl_l2_dma_addr[bt_idx]); 636 637 break; 638 } 639 640 dma_free_coherent(dev, pbl_bt_sz, 641 mr->pbl_bt_l2[bt_idx], 642 mr->pbl_l2_dma_addr[bt_idx]); 643 } 644 } 645 } 646 647 kfree(mr->pbl_bt_l1); 648 kfree(mr->pbl_l1_dma_addr); 649 mr->pbl_bt_l1 = NULL; 650 mr->pbl_l1_dma_addr = NULL; 651 if (mhop_num == 3) { 652 kfree(mr->pbl_bt_l2); 653 kfree(mr->pbl_l2_dma_addr); 654 mr->pbl_bt_l2 = NULL; 655 mr->pbl_l2_dma_addr = NULL; 656 } 657 } 658 659 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, 660 struct hns_roce_mr *mr) 661 { 662 struct device *dev = hr_dev->dev; 663 int npages = 0; 664 int ret; 665 666 if (mr->enabled) { 667 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key) 668 & (hr_dev->caps.num_mtpts - 1)); 669 if (ret) 670 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); 671 } 672 673 if (mr->size != ~0ULL) { 674 if (mr->type == MR_TYPE_MR) 675 npages = ib_umem_page_count(mr->umem); 676 677 if (!hr_dev->caps.pbl_hop_num) 678 dma_free_coherent(dev, (unsigned int)(npages * 8), 679 mr->pbl_buf, mr->pbl_dma_addr); 680 else 681 hns_roce_mhop_free(hr_dev, mr); 682 } 683 684 if (mr->enabled) 685 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, 686 key_to_hw_index(mr->key)); 687 688 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 689 key_to_hw_index(mr->key), BITMAP_NO_RR); 690 } 691 692 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, 693 struct hns_roce_mr *mr) 694 { 695 int ret; 696 unsigned long mtpt_idx = key_to_hw_index(mr->key); 697 struct device *dev = hr_dev->dev; 698 struct hns_roce_cmd_mailbox *mailbox; 699 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 700 701 /* Prepare HEM entry memory */ 702 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); 703 if (ret) 704 return ret; 705 706 /* Allocate mailbox memory */ 707 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 708 if (IS_ERR(mailbox)) { 709 ret = PTR_ERR(mailbox); 710 goto err_table; 711 } 712 713 if (mr->type != MR_TYPE_FRMR) 714 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); 715 else 716 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); 717 if (ret) { 718 dev_err(dev, "Write mtpt fail!\n"); 719 goto err_page; 720 } 721 722 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, 723 mtpt_idx & (hr_dev->caps.num_mtpts - 1)); 724 if (ret) { 725 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); 726 goto err_page; 727 } 728 729 mr->enabled = 1; 730 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 731 732 return 0; 733 734 err_page: 735 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 736 737 err_table: 738 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); 739 return ret; 740 } 741 742 static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, 743 struct hns_roce_mtt *mtt, u32 start_index, 744 u32 npages, u64 *page_list) 745 { 746 struct hns_roce_hem_table *table; 747 dma_addr_t dma_handle; 748 __le64 *mtts; 749 u32 bt_page_size; 750 u32 i; 751 752 switch (mtt->mtt_type) { 753 case MTT_TYPE_WQE: 754 table = &hr_dev->mr_table.mtt_table; 755 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); 756 break; 757 case MTT_TYPE_CQE: 758 table = &hr_dev->mr_table.mtt_cqe_table; 759 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); 760 break; 761 case MTT_TYPE_SRQWQE: 762 table = &hr_dev->mr_table.mtt_srqwqe_table; 763 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); 764 break; 765 case MTT_TYPE_IDX: 766 table = &hr_dev->mr_table.mtt_idx_table; 767 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); 768 break; 769 default: 770 return -EINVAL; 771 } 772 773 /* All MTTs must fit in the same page */ 774 if (start_index / (bt_page_size / sizeof(u64)) != 775 (start_index + npages - 1) / (bt_page_size / sizeof(u64))) 776 return -EINVAL; 777 778 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) 779 return -EINVAL; 780 781 mtts = hns_roce_table_find(hr_dev, table, 782 mtt->first_seg + 783 start_index / HNS_ROCE_MTT_ENTRY_PER_SEG, 784 &dma_handle); 785 if (!mtts) 786 return -ENOMEM; 787 788 /* Save page addr, low 12 bits : 0 */ 789 for (i = 0; i < npages; ++i) { 790 if (!hr_dev->caps.mtt_hop_num) 791 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT); 792 else 793 mtts[i] = cpu_to_le64(page_list[i]); 794 } 795 796 return 0; 797 } 798 799 static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, 800 struct hns_roce_mtt *mtt, u32 start_index, 801 u32 npages, u64 *page_list) 802 { 803 int chunk; 804 int ret; 805 u32 bt_page_size; 806 807 if (mtt->order < 0) 808 return -EINVAL; 809 810 switch (mtt->mtt_type) { 811 case MTT_TYPE_WQE: 812 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT); 813 break; 814 case MTT_TYPE_CQE: 815 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT); 816 break; 817 case MTT_TYPE_SRQWQE: 818 bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT); 819 break; 820 case MTT_TYPE_IDX: 821 bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT); 822 break; 823 default: 824 dev_err(hr_dev->dev, 825 "Unsupport mtt type %d, write mtt failed\n", 826 mtt->mtt_type); 827 return -EINVAL; 828 } 829 830 while (npages > 0) { 831 chunk = min_t(int, bt_page_size / sizeof(u64), npages); 832 833 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk, 834 page_list); 835 if (ret) 836 return ret; 837 838 npages -= chunk; 839 start_index += chunk; 840 page_list += chunk; 841 } 842 843 return 0; 844 } 845 846 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, 847 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf) 848 { 849 u64 *page_list; 850 int ret; 851 u32 i; 852 853 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); 854 if (!page_list) 855 return -ENOMEM; 856 857 for (i = 0; i < buf->npages; ++i) { 858 if (buf->nbufs == 1) 859 page_list[i] = buf->direct.map + (i << buf->page_shift); 860 else 861 page_list[i] = buf->page_list[i].map; 862 863 } 864 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); 865 866 kfree(page_list); 867 868 return ret; 869 } 870 871 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) 872 { 873 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 874 int ret; 875 876 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, 877 hr_dev->caps.num_mtpts, 878 hr_dev->caps.num_mtpts - 1, 879 hr_dev->caps.reserved_mrws, 0); 880 if (ret) 881 return ret; 882 883 ret = hns_roce_buddy_init(&mr_table->mtt_buddy, 884 ilog2(hr_dev->caps.num_mtt_segs)); 885 if (ret) 886 goto err_buddy; 887 888 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) { 889 ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy, 890 ilog2(hr_dev->caps.num_cqe_segs)); 891 if (ret) 892 goto err_buddy_cqe; 893 } 894 895 if (hr_dev->caps.num_srqwqe_segs) { 896 ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy, 897 ilog2(hr_dev->caps.num_srqwqe_segs)); 898 if (ret) 899 goto err_buddy_srqwqe; 900 } 901 902 if (hr_dev->caps.num_idx_segs) { 903 ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy, 904 ilog2(hr_dev->caps.num_idx_segs)); 905 if (ret) 906 goto err_buddy_idx; 907 } 908 909 return 0; 910 911 err_buddy_idx: 912 if (hr_dev->caps.num_srqwqe_segs) 913 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); 914 915 err_buddy_srqwqe: 916 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) 917 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); 918 919 err_buddy_cqe: 920 hns_roce_buddy_cleanup(&mr_table->mtt_buddy); 921 922 err_buddy: 923 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); 924 return ret; 925 } 926 927 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) 928 { 929 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 930 931 if (hr_dev->caps.num_idx_segs) 932 hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy); 933 if (hr_dev->caps.num_srqwqe_segs) 934 hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy); 935 hns_roce_buddy_cleanup(&mr_table->mtt_buddy); 936 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) 937 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); 938 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); 939 } 940 941 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) 942 { 943 struct hns_roce_mr *mr; 944 int ret; 945 946 mr = kmalloc(sizeof(*mr), GFP_KERNEL); 947 if (mr == NULL) 948 return ERR_PTR(-ENOMEM); 949 950 mr->type = MR_TYPE_DMA; 951 952 /* Allocate memory region key */ 953 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0, 954 ~0ULL, acc, 0, mr); 955 if (ret) 956 goto err_free; 957 958 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr); 959 if (ret) 960 goto err_mr; 961 962 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 963 mr->umem = NULL; 964 965 return &mr->ibmr; 966 967 err_mr: 968 hns_roce_mr_free(to_hr_dev(pd->device), mr); 969 970 err_free: 971 kfree(mr); 972 return ERR_PTR(ret); 973 } 974 975 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev, 976 struct hns_roce_mtt *mtt, struct ib_umem *umem) 977 { 978 struct device *dev = hr_dev->dev; 979 struct sg_dma_page_iter sg_iter; 980 unsigned int order; 981 int npage = 0; 982 int ret = 0; 983 int i; 984 u64 page_addr; 985 u64 *pages; 986 u32 bt_page_size; 987 u32 n; 988 989 switch (mtt->mtt_type) { 990 case MTT_TYPE_WQE: 991 order = hr_dev->caps.mtt_ba_pg_sz; 992 break; 993 case MTT_TYPE_CQE: 994 order = hr_dev->caps.cqe_ba_pg_sz; 995 break; 996 case MTT_TYPE_SRQWQE: 997 order = hr_dev->caps.srqwqe_ba_pg_sz; 998 break; 999 case MTT_TYPE_IDX: 1000 order = hr_dev->caps.idx_ba_pg_sz; 1001 break; 1002 default: 1003 dev_err(dev, "Unsupport mtt type %d, write mtt failed\n", 1004 mtt->mtt_type); 1005 return -EINVAL; 1006 } 1007 1008 bt_page_size = 1 << (order + PAGE_SHIFT); 1009 1010 pages = (u64 *) __get_free_pages(GFP_KERNEL, order); 1011 if (!pages) 1012 return -ENOMEM; 1013 1014 i = n = 0; 1015 1016 for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { 1017 page_addr = sg_page_iter_dma_address(&sg_iter); 1018 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) { 1019 if (page_addr & ((1 << mtt->page_shift) - 1)) { 1020 dev_err(dev, 1021 "page_addr 0x%llx is not page_shift %d alignment!\n", 1022 page_addr, mtt->page_shift); 1023 ret = -EINVAL; 1024 goto out; 1025 } 1026 pages[i++] = page_addr; 1027 } 1028 npage++; 1029 if (i == bt_page_size / sizeof(u64)) { 1030 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); 1031 if (ret) 1032 goto out; 1033 n += i; 1034 i = 0; 1035 } 1036 } 1037 1038 if (i) 1039 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); 1040 1041 out: 1042 free_pages((unsigned long) pages, order); 1043 return ret; 1044 } 1045 1046 static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, 1047 struct hns_roce_mr *mr, 1048 struct ib_umem *umem) 1049 { 1050 struct sg_dma_page_iter sg_iter; 1051 int i = 0, j = 0; 1052 u64 page_addr; 1053 u32 pbl_bt_sz; 1054 1055 if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0) 1056 return 0; 1057 1058 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); 1059 for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { 1060 page_addr = sg_page_iter_dma_address(&sg_iter); 1061 if (!hr_dev->caps.pbl_hop_num) { 1062 mr->pbl_buf[i++] = page_addr >> 12; 1063 } else if (hr_dev->caps.pbl_hop_num == 1) { 1064 mr->pbl_buf[i++] = page_addr; 1065 } else { 1066 if (hr_dev->caps.pbl_hop_num == 2) 1067 mr->pbl_bt_l1[i][j] = page_addr; 1068 else if (hr_dev->caps.pbl_hop_num == 3) 1069 mr->pbl_bt_l2[i][j] = page_addr; 1070 1071 j++; 1072 if (j >= (pbl_bt_sz / 8)) { 1073 i++; 1074 j = 0; 1075 } 1076 } 1077 } 1078 1079 /* Memory barrier */ 1080 mb(); 1081 1082 return 0; 1083 } 1084 1085 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1086 u64 virt_addr, int access_flags, 1087 struct ib_udata *udata) 1088 { 1089 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1090 struct device *dev = hr_dev->dev; 1091 struct hns_roce_mr *mr; 1092 int bt_size; 1093 int ret; 1094 int n; 1095 int i; 1096 1097 mr = kmalloc(sizeof(*mr), GFP_KERNEL); 1098 if (!mr) 1099 return ERR_PTR(-ENOMEM); 1100 1101 mr->umem = ib_umem_get(udata, start, length, access_flags, 0); 1102 if (IS_ERR(mr->umem)) { 1103 ret = PTR_ERR(mr->umem); 1104 goto err_free; 1105 } 1106 1107 n = ib_umem_page_count(mr->umem); 1108 1109 if (!hr_dev->caps.pbl_hop_num) { 1110 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { 1111 dev_err(dev, 1112 " MR len %lld err. MR is limited to 4G at most!\n", 1113 length); 1114 ret = -EINVAL; 1115 goto err_umem; 1116 } 1117 } else { 1118 u64 pbl_size = 1; 1119 1120 bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; 1121 for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) 1122 pbl_size *= bt_size; 1123 if (n > pbl_size) { 1124 dev_err(dev, 1125 " MR len %lld err. MR page num is limited to %lld!\n", 1126 length, pbl_size); 1127 ret = -EINVAL; 1128 goto err_umem; 1129 } 1130 } 1131 1132 mr->type = MR_TYPE_MR; 1133 1134 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length, 1135 access_flags, n, mr); 1136 if (ret) 1137 goto err_umem; 1138 1139 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); 1140 if (ret) 1141 goto err_mr; 1142 1143 ret = hns_roce_mr_enable(hr_dev, mr); 1144 if (ret) 1145 goto err_mr; 1146 1147 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 1148 1149 return &mr->ibmr; 1150 1151 err_mr: 1152 hns_roce_mr_free(hr_dev, mr); 1153 1154 err_umem: 1155 ib_umem_release(mr->umem); 1156 1157 err_free: 1158 kfree(mr); 1159 return ERR_PTR(ret); 1160 } 1161 1162 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, 1163 u64 virt_addr, int mr_access_flags, struct ib_pd *pd, 1164 struct ib_udata *udata) 1165 { 1166 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); 1167 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1168 struct hns_roce_cmd_mailbox *mailbox; 1169 struct device *dev = hr_dev->dev; 1170 unsigned long mtpt_idx; 1171 u32 pdn = 0; 1172 int npages; 1173 int ret; 1174 1175 if (!mr->enabled) 1176 return -EINVAL; 1177 1178 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1179 if (IS_ERR(mailbox)) 1180 return PTR_ERR(mailbox); 1181 1182 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1); 1183 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0, 1184 HNS_ROCE_CMD_QUERY_MPT, 1185 HNS_ROCE_CMD_TIMEOUT_MSECS); 1186 if (ret) 1187 goto free_cmd_mbox; 1188 1189 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx); 1190 if (ret) 1191 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret); 1192 1193 mr->enabled = 0; 1194 1195 if (flags & IB_MR_REREG_PD) 1196 pdn = to_hr_pd(pd)->pdn; 1197 1198 if (flags & IB_MR_REREG_TRANS) { 1199 if (mr->size != ~0ULL) { 1200 npages = ib_umem_page_count(mr->umem); 1201 1202 if (hr_dev->caps.pbl_hop_num) 1203 hns_roce_mhop_free(hr_dev, mr); 1204 else 1205 dma_free_coherent(dev, npages * 8, mr->pbl_buf, 1206 mr->pbl_dma_addr); 1207 } 1208 ib_umem_release(mr->umem); 1209 1210 mr->umem = 1211 ib_umem_get(udata, start, length, mr_access_flags, 0); 1212 if (IS_ERR(mr->umem)) { 1213 ret = PTR_ERR(mr->umem); 1214 mr->umem = NULL; 1215 goto free_cmd_mbox; 1216 } 1217 npages = ib_umem_page_count(mr->umem); 1218 1219 if (hr_dev->caps.pbl_hop_num) { 1220 ret = hns_roce_mhop_alloc(hr_dev, npages, mr); 1221 if (ret) 1222 goto release_umem; 1223 } else { 1224 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, 1225 &(mr->pbl_dma_addr), 1226 GFP_KERNEL); 1227 if (!mr->pbl_buf) { 1228 ret = -ENOMEM; 1229 goto release_umem; 1230 } 1231 } 1232 } 1233 1234 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn, 1235 mr_access_flags, virt_addr, 1236 length, mailbox->buf); 1237 if (ret) { 1238 if (flags & IB_MR_REREG_TRANS) 1239 goto release_umem; 1240 else 1241 goto free_cmd_mbox; 1242 } 1243 1244 if (flags & IB_MR_REREG_TRANS) { 1245 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem); 1246 if (ret) { 1247 if (mr->size != ~0ULL) { 1248 npages = ib_umem_page_count(mr->umem); 1249 1250 if (hr_dev->caps.pbl_hop_num) 1251 hns_roce_mhop_free(hr_dev, mr); 1252 else 1253 dma_free_coherent(dev, npages * 8, 1254 mr->pbl_buf, 1255 mr->pbl_dma_addr); 1256 } 1257 1258 goto release_umem; 1259 } 1260 } 1261 1262 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); 1263 if (ret) { 1264 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); 1265 goto release_umem; 1266 } 1267 1268 mr->enabled = 1; 1269 if (flags & IB_MR_REREG_ACCESS) 1270 mr->access = mr_access_flags; 1271 1272 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1273 1274 return 0; 1275 1276 release_umem: 1277 ib_umem_release(mr->umem); 1278 1279 free_cmd_mbox: 1280 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1281 1282 return ret; 1283 } 1284 1285 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1286 { 1287 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); 1288 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1289 int ret = 0; 1290 1291 if (hr_dev->hw->dereg_mr) { 1292 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); 1293 } else { 1294 hns_roce_mr_free(hr_dev, mr); 1295 1296 if (mr->umem) 1297 ib_umem_release(mr->umem); 1298 1299 kfree(mr); 1300 } 1301 1302 return ret; 1303 } 1304 1305 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 1306 u32 max_num_sg, struct ib_udata *udata) 1307 { 1308 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 1309 struct device *dev = hr_dev->dev; 1310 struct hns_roce_mr *mr; 1311 u64 length; 1312 u32 page_size; 1313 int ret; 1314 1315 page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT); 1316 length = max_num_sg * page_size; 1317 1318 if (mr_type != IB_MR_TYPE_MEM_REG) 1319 return ERR_PTR(-EINVAL); 1320 1321 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) { 1322 dev_err(dev, "max_num_sg larger than %d\n", 1323 HNS_ROCE_FRMR_MAX_PA); 1324 return ERR_PTR(-EINVAL); 1325 } 1326 1327 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1328 if (!mr) 1329 return ERR_PTR(-ENOMEM); 1330 1331 mr->type = MR_TYPE_FRMR; 1332 1333 /* Allocate memory region key */ 1334 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length, 1335 0, max_num_sg, mr); 1336 if (ret) 1337 goto err_free; 1338 1339 ret = hns_roce_mr_enable(hr_dev, mr); 1340 if (ret) 1341 goto err_mr; 1342 1343 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; 1344 mr->umem = NULL; 1345 1346 return &mr->ibmr; 1347 1348 err_mr: 1349 hns_roce_mr_free(to_hr_dev(pd->device), mr); 1350 1351 err_free: 1352 kfree(mr); 1353 return ERR_PTR(ret); 1354 } 1355 1356 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) 1357 { 1358 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1359 1360 mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); 1361 1362 return 0; 1363 } 1364 1365 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1366 unsigned int *sg_offset) 1367 { 1368 struct hns_roce_mr *mr = to_hr_mr(ibmr); 1369 1370 mr->npages = 0; 1371 1372 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); 1373 } 1374 1375 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, 1376 struct hns_roce_mw *mw) 1377 { 1378 struct device *dev = hr_dev->dev; 1379 int ret; 1380 1381 if (mw->enabled) { 1382 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey) 1383 & (hr_dev->caps.num_mtpts - 1)); 1384 if (ret) 1385 dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret); 1386 1387 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, 1388 key_to_hw_index(mw->rkey)); 1389 } 1390 1391 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 1392 key_to_hw_index(mw->rkey), BITMAP_NO_RR); 1393 } 1394 1395 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, 1396 struct hns_roce_mw *mw) 1397 { 1398 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 1399 struct hns_roce_cmd_mailbox *mailbox; 1400 struct device *dev = hr_dev->dev; 1401 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); 1402 int ret; 1403 1404 /* prepare HEM entry memory */ 1405 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx); 1406 if (ret) 1407 return ret; 1408 1409 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); 1410 if (IS_ERR(mailbox)) { 1411 ret = PTR_ERR(mailbox); 1412 goto err_table; 1413 } 1414 1415 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw); 1416 if (ret) { 1417 dev_err(dev, "MW write mtpt fail!\n"); 1418 goto err_page; 1419 } 1420 1421 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, 1422 mtpt_idx & (hr_dev->caps.num_mtpts - 1)); 1423 if (ret) { 1424 dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret); 1425 goto err_page; 1426 } 1427 1428 mw->enabled = 1; 1429 1430 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1431 1432 return 0; 1433 1434 err_page: 1435 hns_roce_free_cmd_mailbox(hr_dev, mailbox); 1436 1437 err_table: 1438 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx); 1439 1440 return ret; 1441 } 1442 1443 struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 1444 struct ib_udata *udata) 1445 { 1446 struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device); 1447 struct hns_roce_mw *mw; 1448 unsigned long index = 0; 1449 int ret; 1450 1451 mw = kmalloc(sizeof(*mw), GFP_KERNEL); 1452 if (!mw) 1453 return ERR_PTR(-ENOMEM); 1454 1455 /* Allocate a key for mw from bitmap */ 1456 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); 1457 if (ret) 1458 goto err_bitmap; 1459 1460 mw->rkey = hw_index_to_key(index); 1461 1462 mw->ibmw.rkey = mw->rkey; 1463 mw->ibmw.type = type; 1464 mw->pdn = to_hr_pd(ib_pd)->pdn; 1465 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num; 1466 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; 1467 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; 1468 1469 ret = hns_roce_mw_enable(hr_dev, mw); 1470 if (ret) 1471 goto err_mw; 1472 1473 return &mw->ibmw; 1474 1475 err_mw: 1476 hns_roce_mw_free(hr_dev, mw); 1477 1478 err_bitmap: 1479 kfree(mw); 1480 1481 return ERR_PTR(ret); 1482 } 1483 1484 int hns_roce_dealloc_mw(struct ib_mw *ibmw) 1485 { 1486 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); 1487 struct hns_roce_mw *mw = to_hr_mw(ibmw); 1488 1489 hns_roce_mw_free(hr_dev, mw); 1490 kfree(mw); 1491 1492 return 0; 1493 } 1494