1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2020, Intel Corporation. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 35 #include <linux/kref.h> 36 #include <linux/random.h> 37 #include <linux/debugfs.h> 38 #include <linux/export.h> 39 #include <linux/delay.h> 40 #include <linux/dma-buf.h> 41 #include <linux/dma-resv.h> 42 #include <rdma/ib_umem.h> 43 #include <rdma/ib_umem_odp.h> 44 #include <rdma/ib_verbs.h> 45 #include "dm.h" 46 #include "mlx5_ib.h" 47 48 /* 49 * We can't use an array for xlt_emergency_page because dma_map_single doesn't 50 * work on kernel modules memory 51 */ 52 void *xlt_emergency_page; 53 static DEFINE_MUTEX(xlt_emergency_page_mutex); 54 55 enum { 56 MAX_PENDING_REG_MR = 8, 57 }; 58 59 #define MLX5_UMR_ALIGN 2048 60 61 static void 62 create_mkey_callback(int status, struct mlx5_async_work *context); 63 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 64 u64 iova, int access_flags, 65 unsigned int page_size, bool populate); 66 67 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, 68 struct ib_pd *pd) 69 { 70 struct mlx5_ib_dev *dev = to_mdev(pd->device); 71 bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev); 72 73 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 74 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 75 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 76 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 77 MLX5_SET(mkc, mkc, lr, 1); 78 79 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) 80 MLX5_SET(mkc, mkc, relaxed_ordering_write, 81 (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled); 82 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) 83 MLX5_SET(mkc, mkc, relaxed_ordering_read, 84 (acc & IB_ACCESS_RELAXED_ORDERING) && ro_pci_enabled); 85 86 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 87 MLX5_SET(mkc, mkc, qpn, 0xffffff); 88 MLX5_SET64(mkc, mkc, start_addr, start_addr); 89 } 90 91 static void assign_mkey_variant(struct mlx5_ib_dev *dev, 92 struct mlx5_ib_mkey *mkey, u32 *in) 93 { 94 u8 key = atomic_inc_return(&dev->mkey_var); 95 void *mkc; 96 97 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 98 MLX5_SET(mkc, mkc, mkey_7_0, key); 99 mkey->key = key; 100 } 101 102 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, 103 struct mlx5_ib_mkey *mkey, u32 *in, int inlen) 104 { 105 int ret; 106 107 assign_mkey_variant(dev, mkey, in); 108 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); 109 if (!ret) 110 init_waitqueue_head(&mkey->wait); 111 112 return ret; 113 } 114 115 static int 116 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, 117 struct mlx5_ib_mkey *mkey, 118 struct mlx5_async_ctx *async_ctx, 119 u32 *in, int inlen, u32 *out, int outlen, 120 struct mlx5_async_work *context) 121 { 122 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 123 assign_mkey_variant(dev, mkey, in); 124 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen, 125 create_mkey_callback, context); 126 } 127 128 static int mr_cache_max_order(struct mlx5_ib_dev *dev); 129 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); 130 131 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) 132 { 133 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); 134 } 135 136 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 137 { 138 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); 139 140 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); 141 } 142 143 static void create_mkey_callback(int status, struct mlx5_async_work *context) 144 { 145 struct mlx5_ib_mr *mr = 146 container_of(context, struct mlx5_ib_mr, cb_work); 147 struct mlx5_cache_ent *ent = mr->cache_ent; 148 struct mlx5_ib_dev *dev = ent->dev; 149 unsigned long flags; 150 151 if (status) { 152 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); 153 kfree(mr); 154 spin_lock_irqsave(&ent->lock, flags); 155 ent->pending--; 156 WRITE_ONCE(dev->fill_delay, 1); 157 spin_unlock_irqrestore(&ent->lock, flags); 158 mod_timer(&dev->delay_timer, jiffies + HZ); 159 return; 160 } 161 162 mr->mmkey.type = MLX5_MKEY_MR; 163 mr->mmkey.key |= mlx5_idx_to_mkey( 164 MLX5_GET(create_mkey_out, mr->out, mkey_index)); 165 init_waitqueue_head(&mr->mmkey.wait); 166 167 WRITE_ONCE(dev->cache.last_add, jiffies); 168 169 spin_lock_irqsave(&ent->lock, flags); 170 list_add_tail(&mr->list, &ent->head); 171 ent->available_mrs++; 172 ent->total_mrs++; 173 /* If we are doing fill_to_high_water then keep going. */ 174 queue_adjust_cache_locked(ent); 175 ent->pending--; 176 spin_unlock_irqrestore(&ent->lock, flags); 177 } 178 179 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) 180 { 181 struct mlx5_ib_mr *mr; 182 183 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 184 if (!mr) 185 return NULL; 186 mr->cache_ent = ent; 187 188 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); 189 MLX5_SET(mkc, mkc, free, 1); 190 MLX5_SET(mkc, mkc, umr_en, 1); 191 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); 192 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); 193 194 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); 195 MLX5_SET(mkc, mkc, log_page_size, ent->page); 196 return mr; 197 } 198 199 /* Asynchronously schedule new MRs to be populated in the cache. */ 200 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) 201 { 202 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 203 struct mlx5_ib_mr *mr; 204 void *mkc; 205 u32 *in; 206 int err = 0; 207 int i; 208 209 in = kzalloc(inlen, GFP_KERNEL); 210 if (!in) 211 return -ENOMEM; 212 213 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 214 for (i = 0; i < num; i++) { 215 mr = alloc_cache_mr(ent, mkc); 216 if (!mr) { 217 err = -ENOMEM; 218 break; 219 } 220 spin_lock_irq(&ent->lock); 221 if (ent->pending >= MAX_PENDING_REG_MR) { 222 err = -EAGAIN; 223 spin_unlock_irq(&ent->lock); 224 kfree(mr); 225 break; 226 } 227 ent->pending++; 228 spin_unlock_irq(&ent->lock); 229 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, 230 &ent->dev->async_ctx, in, inlen, 231 mr->out, sizeof(mr->out), 232 &mr->cb_work); 233 if (err) { 234 spin_lock_irq(&ent->lock); 235 ent->pending--; 236 spin_unlock_irq(&ent->lock); 237 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); 238 kfree(mr); 239 break; 240 } 241 } 242 243 kfree(in); 244 return err; 245 } 246 247 /* Synchronously create a MR in the cache */ 248 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) 249 { 250 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 251 struct mlx5_ib_mr *mr; 252 void *mkc; 253 u32 *in; 254 int err; 255 256 in = kzalloc(inlen, GFP_KERNEL); 257 if (!in) 258 return ERR_PTR(-ENOMEM); 259 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 260 261 mr = alloc_cache_mr(ent, mkc); 262 if (!mr) { 263 err = -ENOMEM; 264 goto free_in; 265 } 266 267 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); 268 if (err) 269 goto free_mr; 270 271 init_waitqueue_head(&mr->mmkey.wait); 272 mr->mmkey.type = MLX5_MKEY_MR; 273 WRITE_ONCE(ent->dev->cache.last_add, jiffies); 274 spin_lock_irq(&ent->lock); 275 ent->total_mrs++; 276 spin_unlock_irq(&ent->lock); 277 kfree(in); 278 return mr; 279 free_mr: 280 kfree(mr); 281 free_in: 282 kfree(in); 283 return ERR_PTR(err); 284 } 285 286 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) 287 { 288 struct mlx5_ib_mr *mr; 289 290 lockdep_assert_held(&ent->lock); 291 if (list_empty(&ent->head)) 292 return; 293 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 294 list_del(&mr->list); 295 ent->available_mrs--; 296 ent->total_mrs--; 297 spin_unlock_irq(&ent->lock); 298 mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); 299 kfree(mr); 300 spin_lock_irq(&ent->lock); 301 } 302 303 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, 304 bool limit_fill) 305 { 306 int err; 307 308 lockdep_assert_held(&ent->lock); 309 310 while (true) { 311 if (limit_fill) 312 target = ent->limit * 2; 313 if (target == ent->available_mrs + ent->pending) 314 return 0; 315 if (target > ent->available_mrs + ent->pending) { 316 u32 todo = target - (ent->available_mrs + ent->pending); 317 318 spin_unlock_irq(&ent->lock); 319 err = add_keys(ent, todo); 320 if (err == -EAGAIN) 321 usleep_range(3000, 5000); 322 spin_lock_irq(&ent->lock); 323 if (err) { 324 if (err != -EAGAIN) 325 return err; 326 } else 327 return 0; 328 } else { 329 remove_cache_mr_locked(ent); 330 } 331 } 332 } 333 334 static ssize_t size_write(struct file *filp, const char __user *buf, 335 size_t count, loff_t *pos) 336 { 337 struct mlx5_cache_ent *ent = filp->private_data; 338 u32 target; 339 int err; 340 341 err = kstrtou32_from_user(buf, count, 0, &target); 342 if (err) 343 return err; 344 345 /* 346 * Target is the new value of total_mrs the user requests, however we 347 * cannot free MRs that are in use. Compute the target value for 348 * available_mrs. 349 */ 350 spin_lock_irq(&ent->lock); 351 if (target < ent->total_mrs - ent->available_mrs) { 352 err = -EINVAL; 353 goto err_unlock; 354 } 355 target = target - (ent->total_mrs - ent->available_mrs); 356 if (target < ent->limit || target > ent->limit*2) { 357 err = -EINVAL; 358 goto err_unlock; 359 } 360 err = resize_available_mrs(ent, target, false); 361 if (err) 362 goto err_unlock; 363 spin_unlock_irq(&ent->lock); 364 365 return count; 366 367 err_unlock: 368 spin_unlock_irq(&ent->lock); 369 return err; 370 } 371 372 static ssize_t size_read(struct file *filp, char __user *buf, size_t count, 373 loff_t *pos) 374 { 375 struct mlx5_cache_ent *ent = filp->private_data; 376 char lbuf[20]; 377 int err; 378 379 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); 380 if (err < 0) 381 return err; 382 383 return simple_read_from_buffer(buf, count, pos, lbuf, err); 384 } 385 386 static const struct file_operations size_fops = { 387 .owner = THIS_MODULE, 388 .open = simple_open, 389 .write = size_write, 390 .read = size_read, 391 }; 392 393 static ssize_t limit_write(struct file *filp, const char __user *buf, 394 size_t count, loff_t *pos) 395 { 396 struct mlx5_cache_ent *ent = filp->private_data; 397 u32 var; 398 int err; 399 400 err = kstrtou32_from_user(buf, count, 0, &var); 401 if (err) 402 return err; 403 404 /* 405 * Upon set we immediately fill the cache to high water mark implied by 406 * the limit. 407 */ 408 spin_lock_irq(&ent->lock); 409 ent->limit = var; 410 err = resize_available_mrs(ent, 0, true); 411 spin_unlock_irq(&ent->lock); 412 if (err) 413 return err; 414 return count; 415 } 416 417 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, 418 loff_t *pos) 419 { 420 struct mlx5_cache_ent *ent = filp->private_data; 421 char lbuf[20]; 422 int err; 423 424 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); 425 if (err < 0) 426 return err; 427 428 return simple_read_from_buffer(buf, count, pos, lbuf, err); 429 } 430 431 static const struct file_operations limit_fops = { 432 .owner = THIS_MODULE, 433 .open = simple_open, 434 .write = limit_write, 435 .read = limit_read, 436 }; 437 438 static bool someone_adding(struct mlx5_mr_cache *cache) 439 { 440 unsigned int i; 441 442 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 443 struct mlx5_cache_ent *ent = &cache->ent[i]; 444 bool ret; 445 446 spin_lock_irq(&ent->lock); 447 ret = ent->available_mrs < ent->limit; 448 spin_unlock_irq(&ent->lock); 449 if (ret) 450 return true; 451 } 452 return false; 453 } 454 455 /* 456 * Check if the bucket is outside the high/low water mark and schedule an async 457 * update. The cache refill has hysteresis, once the low water mark is hit it is 458 * refilled up to the high mark. 459 */ 460 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) 461 { 462 lockdep_assert_held(&ent->lock); 463 464 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) 465 return; 466 if (ent->available_mrs < ent->limit) { 467 ent->fill_to_high_water = true; 468 queue_work(ent->dev->cache.wq, &ent->work); 469 } else if (ent->fill_to_high_water && 470 ent->available_mrs + ent->pending < 2 * ent->limit) { 471 /* 472 * Once we start populating due to hitting a low water mark 473 * continue until we pass the high water mark. 474 */ 475 queue_work(ent->dev->cache.wq, &ent->work); 476 } else if (ent->available_mrs == 2 * ent->limit) { 477 ent->fill_to_high_water = false; 478 } else if (ent->available_mrs > 2 * ent->limit) { 479 /* Queue deletion of excess entries */ 480 ent->fill_to_high_water = false; 481 if (ent->pending) 482 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, 483 msecs_to_jiffies(1000)); 484 else 485 queue_work(ent->dev->cache.wq, &ent->work); 486 } 487 } 488 489 static void __cache_work_func(struct mlx5_cache_ent *ent) 490 { 491 struct mlx5_ib_dev *dev = ent->dev; 492 struct mlx5_mr_cache *cache = &dev->cache; 493 int err; 494 495 spin_lock_irq(&ent->lock); 496 if (ent->disabled) 497 goto out; 498 499 if (ent->fill_to_high_water && 500 ent->available_mrs + ent->pending < 2 * ent->limit && 501 !READ_ONCE(dev->fill_delay)) { 502 spin_unlock_irq(&ent->lock); 503 err = add_keys(ent, 1); 504 spin_lock_irq(&ent->lock); 505 if (ent->disabled) 506 goto out; 507 if (err) { 508 /* 509 * EAGAIN only happens if pending is positive, so we 510 * will be rescheduled from reg_mr_callback(). The only 511 * failure path here is ENOMEM. 512 */ 513 if (err != -EAGAIN) { 514 mlx5_ib_warn( 515 dev, 516 "command failed order %d, err %d\n", 517 ent->order, err); 518 queue_delayed_work(cache->wq, &ent->dwork, 519 msecs_to_jiffies(1000)); 520 } 521 } 522 } else if (ent->available_mrs > 2 * ent->limit) { 523 bool need_delay; 524 525 /* 526 * The remove_cache_mr() logic is performed as garbage 527 * collection task. Such task is intended to be run when no 528 * other active processes are running. 529 * 530 * The need_resched() will return TRUE if there are user tasks 531 * to be activated in near future. 532 * 533 * In such case, we don't execute remove_cache_mr() and postpone 534 * the garbage collection work to try to run in next cycle, in 535 * order to free CPU resources to other tasks. 536 */ 537 spin_unlock_irq(&ent->lock); 538 need_delay = need_resched() || someone_adding(cache) || 539 !time_after(jiffies, 540 READ_ONCE(cache->last_add) + 300 * HZ); 541 spin_lock_irq(&ent->lock); 542 if (ent->disabled) 543 goto out; 544 if (need_delay) 545 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); 546 remove_cache_mr_locked(ent); 547 queue_adjust_cache_locked(ent); 548 } 549 out: 550 spin_unlock_irq(&ent->lock); 551 } 552 553 static void delayed_cache_work_func(struct work_struct *work) 554 { 555 struct mlx5_cache_ent *ent; 556 557 ent = container_of(work, struct mlx5_cache_ent, dwork.work); 558 __cache_work_func(ent); 559 } 560 561 static void cache_work_func(struct work_struct *work) 562 { 563 struct mlx5_cache_ent *ent; 564 565 ent = container_of(work, struct mlx5_cache_ent, work); 566 __cache_work_func(ent); 567 } 568 569 /* Allocate a special entry from the cache */ 570 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 571 unsigned int entry, int access_flags) 572 { 573 struct mlx5_mr_cache *cache = &dev->cache; 574 struct mlx5_cache_ent *ent; 575 struct mlx5_ib_mr *mr; 576 577 if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY || 578 entry >= ARRAY_SIZE(cache->ent))) 579 return ERR_PTR(-EINVAL); 580 581 /* Matches access in alloc_cache_mr() */ 582 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) 583 return ERR_PTR(-EOPNOTSUPP); 584 585 ent = &cache->ent[entry]; 586 spin_lock_irq(&ent->lock); 587 if (list_empty(&ent->head)) { 588 spin_unlock_irq(&ent->lock); 589 mr = create_cache_mr(ent); 590 if (IS_ERR(mr)) 591 return mr; 592 } else { 593 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 594 list_del(&mr->list); 595 ent->available_mrs--; 596 queue_adjust_cache_locked(ent); 597 spin_unlock_irq(&ent->lock); 598 599 mlx5_clear_mr(mr); 600 } 601 mr->access_flags = access_flags; 602 return mr; 603 } 604 605 /* Return a MR already available in the cache */ 606 static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent) 607 { 608 struct mlx5_ib_dev *dev = req_ent->dev; 609 struct mlx5_ib_mr *mr = NULL; 610 struct mlx5_cache_ent *ent = req_ent; 611 612 /* Try larger MR pools from the cache to satisfy the allocation */ 613 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { 614 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, 615 ent - dev->cache.ent); 616 617 spin_lock_irq(&ent->lock); 618 if (!list_empty(&ent->head)) { 619 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 620 list); 621 list_del(&mr->list); 622 ent->available_mrs--; 623 queue_adjust_cache_locked(ent); 624 spin_unlock_irq(&ent->lock); 625 mlx5_clear_mr(mr); 626 return mr; 627 } 628 queue_adjust_cache_locked(ent); 629 spin_unlock_irq(&ent->lock); 630 } 631 req_ent->miss++; 632 return NULL; 633 } 634 635 static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 636 { 637 struct mlx5_cache_ent *ent = mr->cache_ent; 638 639 spin_lock_irq(&ent->lock); 640 list_add_tail(&mr->list, &ent->head); 641 ent->available_mrs++; 642 queue_adjust_cache_locked(ent); 643 spin_unlock_irq(&ent->lock); 644 } 645 646 static void clean_keys(struct mlx5_ib_dev *dev, int c) 647 { 648 struct mlx5_mr_cache *cache = &dev->cache; 649 struct mlx5_cache_ent *ent = &cache->ent[c]; 650 struct mlx5_ib_mr *tmp_mr; 651 struct mlx5_ib_mr *mr; 652 LIST_HEAD(del_list); 653 654 cancel_delayed_work(&ent->dwork); 655 while (1) { 656 spin_lock_irq(&ent->lock); 657 if (list_empty(&ent->head)) { 658 spin_unlock_irq(&ent->lock); 659 break; 660 } 661 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 662 list_move(&mr->list, &del_list); 663 ent->available_mrs--; 664 ent->total_mrs--; 665 spin_unlock_irq(&ent->lock); 666 mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); 667 } 668 669 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { 670 list_del(&mr->list); 671 kfree(mr); 672 } 673 } 674 675 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) 676 { 677 if (!mlx5_debugfs_root || dev->is_rep) 678 return; 679 680 debugfs_remove_recursive(dev->cache.root); 681 dev->cache.root = NULL; 682 } 683 684 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 685 { 686 struct mlx5_mr_cache *cache = &dev->cache; 687 struct mlx5_cache_ent *ent; 688 struct dentry *dir; 689 int i; 690 691 if (!mlx5_debugfs_root || dev->is_rep) 692 return; 693 694 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); 695 696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 697 ent = &cache->ent[i]; 698 sprintf(ent->name, "%d", ent->order); 699 dir = debugfs_create_dir(ent->name, cache->root); 700 debugfs_create_file("size", 0600, dir, ent, &size_fops); 701 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); 702 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); 703 debugfs_create_u32("miss", 0600, dir, &ent->miss); 704 } 705 } 706 707 static void delay_time_func(struct timer_list *t) 708 { 709 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); 710 711 WRITE_ONCE(dev->fill_delay, 0); 712 } 713 714 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) 715 { 716 struct mlx5_mr_cache *cache = &dev->cache; 717 struct mlx5_cache_ent *ent; 718 int i; 719 720 mutex_init(&dev->slow_path_mutex); 721 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 722 if (!cache->wq) { 723 mlx5_ib_warn(dev, "failed to create work queue\n"); 724 return -ENOMEM; 725 } 726 727 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); 728 timer_setup(&dev->delay_timer, delay_time_func, 0); 729 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 730 ent = &cache->ent[i]; 731 INIT_LIST_HEAD(&ent->head); 732 spin_lock_init(&ent->lock); 733 ent->order = i + 2; 734 ent->dev = dev; 735 ent->limit = 0; 736 737 INIT_WORK(&ent->work, cache_work_func); 738 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); 739 740 if (i > MR_CACHE_LAST_STD_ENTRY) { 741 mlx5_odp_init_mr_cache_entry(ent); 742 continue; 743 } 744 745 if (ent->order > mr_cache_max_order(dev)) 746 continue; 747 748 ent->page = PAGE_SHIFT; 749 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / 750 MLX5_IB_UMR_OCTOWORD; 751 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 752 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && 753 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && 754 mlx5_ib_can_load_pas_with_umr(dev, 0)) 755 ent->limit = dev->mdev->profile.mr_cache[i].limit; 756 else 757 ent->limit = 0; 758 spin_lock_irq(&ent->lock); 759 queue_adjust_cache_locked(ent); 760 spin_unlock_irq(&ent->lock); 761 } 762 763 mlx5_mr_cache_debugfs_init(dev); 764 765 return 0; 766 } 767 768 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) 769 { 770 unsigned int i; 771 772 if (!dev->cache.wq) 773 return 0; 774 775 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 776 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; 777 778 spin_lock_irq(&ent->lock); 779 ent->disabled = true; 780 spin_unlock_irq(&ent->lock); 781 cancel_work_sync(&ent->work); 782 cancel_delayed_work_sync(&ent->dwork); 783 } 784 785 mlx5_mr_cache_debugfs_cleanup(dev); 786 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); 787 788 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 789 clean_keys(dev, i); 790 791 destroy_workqueue(dev->cache.wq); 792 del_timer_sync(&dev->delay_timer); 793 794 return 0; 795 } 796 797 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 798 { 799 struct mlx5_ib_dev *dev = to_mdev(pd->device); 800 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 801 struct mlx5_ib_mr *mr; 802 void *mkc; 803 u32 *in; 804 int err; 805 806 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 807 if (!mr) 808 return ERR_PTR(-ENOMEM); 809 810 in = kzalloc(inlen, GFP_KERNEL); 811 if (!in) { 812 err = -ENOMEM; 813 goto err_free; 814 } 815 816 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 817 818 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); 819 MLX5_SET(mkc, mkc, length64, 1); 820 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, 821 pd); 822 823 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 824 if (err) 825 goto err_in; 826 827 kfree(in); 828 mr->mmkey.type = MLX5_MKEY_MR; 829 mr->ibmr.lkey = mr->mmkey.key; 830 mr->ibmr.rkey = mr->mmkey.key; 831 mr->umem = NULL; 832 833 return &mr->ibmr; 834 835 err_in: 836 kfree(in); 837 838 err_free: 839 kfree(mr); 840 841 return ERR_PTR(err); 842 } 843 844 static int get_octo_len(u64 addr, u64 len, int page_shift) 845 { 846 u64 page_size = 1ULL << page_shift; 847 u64 offset; 848 int npages; 849 850 offset = addr & (page_size - 1); 851 npages = ALIGN(len + offset, page_size) >> page_shift; 852 return (npages + 1) / 2; 853 } 854 855 static int mr_cache_max_order(struct mlx5_ib_dev *dev) 856 { 857 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 858 return MR_CACHE_LAST_STD_ENTRY + 2; 859 return MLX5_MAX_UMR_SHIFT; 860 } 861 862 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) 863 { 864 struct mlx5_ib_umr_context *context = 865 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); 866 867 context->status = wc->status; 868 complete(&context->done); 869 } 870 871 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) 872 { 873 context->cqe.done = mlx5_ib_umr_done; 874 context->status = -1; 875 init_completion(&context->done); 876 } 877 878 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, 879 struct mlx5_umr_wr *umrwr) 880 { 881 struct umr_common *umrc = &dev->umrc; 882 const struct ib_send_wr *bad; 883 int err; 884 struct mlx5_ib_umr_context umr_context; 885 886 mlx5_ib_init_umr_context(&umr_context); 887 umrwr->wr.wr_cqe = &umr_context.cqe; 888 889 down(&umrc->sem); 890 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); 891 if (err) { 892 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); 893 } else { 894 wait_for_completion(&umr_context.done); 895 if (umr_context.status != IB_WC_SUCCESS) { 896 mlx5_ib_warn(dev, "reg umr failed (%u)\n", 897 umr_context.status); 898 err = -EFAULT; 899 } 900 } 901 up(&umrc->sem); 902 return err; 903 } 904 905 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, 906 unsigned int order) 907 { 908 struct mlx5_mr_cache *cache = &dev->cache; 909 910 if (order < cache->ent[0].order) 911 return &cache->ent[0]; 912 order = order - cache->ent[0].order; 913 if (order > MR_CACHE_LAST_STD_ENTRY) 914 return NULL; 915 return &cache->ent[order]; 916 } 917 918 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 919 u64 length, int access_flags, u64 iova) 920 { 921 mr->ibmr.lkey = mr->mmkey.key; 922 mr->ibmr.rkey = mr->mmkey.key; 923 mr->ibmr.length = length; 924 mr->ibmr.device = &dev->ib_dev; 925 mr->ibmr.iova = iova; 926 mr->access_flags = access_flags; 927 } 928 929 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, 930 u64 iova) 931 { 932 /* 933 * The alignment of iova has already been checked upon entering 934 * UVERBS_METHOD_REG_DMABUF_MR 935 */ 936 umem->iova = iova; 937 return PAGE_SIZE; 938 } 939 940 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, 941 struct ib_umem *umem, u64 iova, 942 int access_flags) 943 { 944 struct mlx5_ib_dev *dev = to_mdev(pd->device); 945 struct mlx5_cache_ent *ent; 946 struct mlx5_ib_mr *mr; 947 unsigned int page_size; 948 949 if (umem->is_dmabuf) 950 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); 951 else 952 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 953 0, iova); 954 if (WARN_ON(!page_size)) 955 return ERR_PTR(-EINVAL); 956 ent = mr_cache_ent_from_order( 957 dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); 958 /* 959 * Matches access in alloc_cache_mr(). If the MR can't come from the 960 * cache then synchronously create an uncached one. 961 */ 962 if (!ent || ent->limit == 0 || 963 !mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) { 964 mutex_lock(&dev->slow_path_mutex); 965 mr = reg_create(pd, umem, iova, access_flags, page_size, false); 966 mutex_unlock(&dev->slow_path_mutex); 967 return mr; 968 } 969 970 mr = get_cache_mr(ent); 971 if (!mr) { 972 mr = create_cache_mr(ent); 973 /* 974 * The above already tried to do the same stuff as reg_create(), 975 * no reason to try it again. 976 */ 977 if (IS_ERR(mr)) 978 return mr; 979 } 980 981 mr->ibmr.pd = pd; 982 mr->umem = umem; 983 mr->page_shift = order_base_2(page_size); 984 set_mr_fields(dev, mr, umem->length, access_flags, iova); 985 986 return mr; 987 } 988 989 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ 990 MLX5_UMR_MTT_ALIGNMENT) 991 #define MLX5_SPARE_UMR_CHUNK 0x10000 992 993 /* 994 * Allocate a temporary buffer to hold the per-page information to transfer to 995 * HW. For efficiency this should be as large as it can be, but buffer 996 * allocation failure is not allowed, so try smaller sizes. 997 */ 998 static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) 999 { 1000 const size_t xlt_chunk_align = 1001 MLX5_UMR_MTT_ALIGNMENT / ent_size; 1002 size_t size; 1003 void *res = NULL; 1004 1005 static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0); 1006 1007 /* 1008 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the 1009 * allocation can't trigger any kind of reclaim. 1010 */ 1011 might_sleep(); 1012 1013 gfp_mask |= __GFP_ZERO | __GFP_NORETRY; 1014 1015 /* 1016 * If the system already has a suitable high order page then just use 1017 * that, but don't try hard to create one. This max is about 1M, so a 1018 * free x86 huge page will satisfy it. 1019 */ 1020 size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align), 1021 MLX5_MAX_UMR_CHUNK); 1022 *nents = size / ent_size; 1023 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 1024 get_order(size)); 1025 if (res) 1026 return res; 1027 1028 if (size > MLX5_SPARE_UMR_CHUNK) { 1029 size = MLX5_SPARE_UMR_CHUNK; 1030 *nents = size / ent_size; 1031 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 1032 get_order(size)); 1033 if (res) 1034 return res; 1035 } 1036 1037 *nents = PAGE_SIZE / ent_size; 1038 res = (void *)__get_free_page(gfp_mask); 1039 if (res) 1040 return res; 1041 1042 mutex_lock(&xlt_emergency_page_mutex); 1043 memset(xlt_emergency_page, 0, PAGE_SIZE); 1044 return xlt_emergency_page; 1045 } 1046 1047 static void mlx5_ib_free_xlt(void *xlt, size_t length) 1048 { 1049 if (xlt == xlt_emergency_page) { 1050 mutex_unlock(&xlt_emergency_page_mutex); 1051 return; 1052 } 1053 1054 free_pages((unsigned long)xlt, get_order(length)); 1055 } 1056 1057 /* 1058 * Create a MLX5_IB_SEND_UMR_UPDATE_XLT work request and XLT buffer ready for 1059 * submission. 1060 */ 1061 static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr, 1062 struct mlx5_umr_wr *wr, struct ib_sge *sg, 1063 size_t nents, size_t ent_size, 1064 unsigned int flags) 1065 { 1066 struct mlx5_ib_dev *dev = mr_to_mdev(mr); 1067 struct device *ddev = &dev->mdev->pdev->dev; 1068 dma_addr_t dma; 1069 void *xlt; 1070 1071 xlt = mlx5_ib_alloc_xlt(&nents, ent_size, 1072 flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : 1073 GFP_KERNEL); 1074 sg->length = nents * ent_size; 1075 dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE); 1076 if (dma_mapping_error(ddev, dma)) { 1077 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); 1078 mlx5_ib_free_xlt(xlt, sg->length); 1079 return NULL; 1080 } 1081 sg->addr = dma; 1082 sg->lkey = dev->umrc.pd->local_dma_lkey; 1083 1084 memset(wr, 0, sizeof(*wr)); 1085 wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; 1086 if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) 1087 wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1088 wr->wr.sg_list = sg; 1089 wr->wr.num_sge = 1; 1090 wr->wr.opcode = MLX5_IB_WR_UMR; 1091 wr->pd = mr->ibmr.pd; 1092 wr->mkey = mr->mmkey.key; 1093 wr->length = mr->ibmr.length; 1094 wr->virt_addr = mr->ibmr.iova; 1095 wr->access_flags = mr->access_flags; 1096 wr->page_shift = mr->page_shift; 1097 wr->xlt_size = sg->length; 1098 return xlt; 1099 } 1100 1101 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt, 1102 struct ib_sge *sg) 1103 { 1104 struct device *ddev = &dev->mdev->pdev->dev; 1105 1106 dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE); 1107 mlx5_ib_free_xlt(xlt, sg->length); 1108 } 1109 1110 static unsigned int xlt_wr_final_send_flags(unsigned int flags) 1111 { 1112 unsigned int res = 0; 1113 1114 if (flags & MLX5_IB_UPD_XLT_ENABLE) 1115 res |= MLX5_IB_SEND_UMR_ENABLE_MR | 1116 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | 1117 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1118 if (flags & MLX5_IB_UPD_XLT_PD || flags & MLX5_IB_UPD_XLT_ACCESS) 1119 res |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1120 if (flags & MLX5_IB_UPD_XLT_ADDR) 1121 res |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1122 return res; 1123 } 1124 1125 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 1126 int page_shift, int flags) 1127 { 1128 struct mlx5_ib_dev *dev = mr_to_mdev(mr); 1129 struct device *ddev = &dev->mdev->pdev->dev; 1130 void *xlt; 1131 struct mlx5_umr_wr wr; 1132 struct ib_sge sg; 1133 int err = 0; 1134 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) 1135 ? sizeof(struct mlx5_klm) 1136 : sizeof(struct mlx5_mtt); 1137 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; 1138 const int page_mask = page_align - 1; 1139 size_t pages_mapped = 0; 1140 size_t pages_to_map = 0; 1141 size_t pages_iter; 1142 size_t size_to_map = 0; 1143 size_t orig_sg_length; 1144 1145 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && 1146 !umr_can_use_indirect_mkey(dev)) 1147 return -EPERM; 1148 1149 if (WARN_ON(!mr->umem->is_odp)) 1150 return -EINVAL; 1151 1152 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, 1153 * so we need to align the offset and length accordingly 1154 */ 1155 if (idx & page_mask) { 1156 npages += idx & page_mask; 1157 idx &= ~page_mask; 1158 } 1159 pages_to_map = ALIGN(npages, page_align); 1160 1161 xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags); 1162 if (!xlt) 1163 return -ENOMEM; 1164 pages_iter = sg.length / desc_size; 1165 orig_sg_length = sg.length; 1166 1167 if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) { 1168 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); 1169 size_t max_pages = ib_umem_odp_num_pages(odp) - idx; 1170 1171 pages_to_map = min_t(size_t, pages_to_map, max_pages); 1172 } 1173 1174 wr.page_shift = page_shift; 1175 1176 for (pages_mapped = 0; 1177 pages_mapped < pages_to_map && !err; 1178 pages_mapped += pages_iter, idx += pages_iter) { 1179 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); 1180 size_to_map = npages * desc_size; 1181 dma_sync_single_for_cpu(ddev, sg.addr, sg.length, 1182 DMA_TO_DEVICE); 1183 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); 1184 dma_sync_single_for_device(ddev, sg.addr, sg.length, 1185 DMA_TO_DEVICE); 1186 1187 sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT); 1188 1189 if (pages_mapped + pages_iter >= pages_to_map) 1190 wr.wr.send_flags |= xlt_wr_final_send_flags(flags); 1191 1192 wr.offset = idx * desc_size; 1193 wr.xlt_size = sg.length; 1194 1195 err = mlx5_ib_post_send_wait(dev, &wr); 1196 } 1197 sg.length = orig_sg_length; 1198 mlx5_ib_unmap_free_xlt(dev, xlt, &sg); 1199 return err; 1200 } 1201 1202 /* 1203 * Send the DMA list to the HW for a normal MR using UMR. 1204 * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP 1205 * flag may be used. 1206 */ 1207 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) 1208 { 1209 struct mlx5_ib_dev *dev = mr_to_mdev(mr); 1210 struct device *ddev = &dev->mdev->pdev->dev; 1211 struct ib_block_iter biter; 1212 struct mlx5_mtt *cur_mtt; 1213 struct mlx5_umr_wr wr; 1214 size_t orig_sg_length; 1215 struct mlx5_mtt *mtt; 1216 size_t final_size; 1217 struct ib_sge sg; 1218 int err = 0; 1219 1220 if (WARN_ON(mr->umem->is_odp)) 1221 return -EINVAL; 1222 1223 mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, 1224 ib_umem_num_dma_blocks(mr->umem, 1225 1 << mr->page_shift), 1226 sizeof(*mtt), flags); 1227 if (!mtt) 1228 return -ENOMEM; 1229 orig_sg_length = sg.length; 1230 1231 cur_mtt = mtt; 1232 rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, 1233 mr->umem->sgt_append.sgt.nents, 1234 BIT(mr->page_shift)) { 1235 if (cur_mtt == (void *)mtt + sg.length) { 1236 dma_sync_single_for_device(ddev, sg.addr, sg.length, 1237 DMA_TO_DEVICE); 1238 err = mlx5_ib_post_send_wait(dev, &wr); 1239 if (err) 1240 goto err; 1241 dma_sync_single_for_cpu(ddev, sg.addr, sg.length, 1242 DMA_TO_DEVICE); 1243 wr.offset += sg.length; 1244 cur_mtt = mtt; 1245 } 1246 1247 cur_mtt->ptag = 1248 cpu_to_be64(rdma_block_iter_dma_address(&biter) | 1249 MLX5_IB_MTT_PRESENT); 1250 1251 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) 1252 cur_mtt->ptag = 0; 1253 1254 cur_mtt++; 1255 } 1256 1257 final_size = (void *)cur_mtt - (void *)mtt; 1258 sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT); 1259 memset(cur_mtt, 0, sg.length - final_size); 1260 wr.wr.send_flags |= xlt_wr_final_send_flags(flags); 1261 wr.xlt_size = sg.length; 1262 1263 dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE); 1264 err = mlx5_ib_post_send_wait(dev, &wr); 1265 1266 err: 1267 sg.length = orig_sg_length; 1268 mlx5_ib_unmap_free_xlt(dev, mtt, &sg); 1269 return err; 1270 } 1271 1272 /* 1273 * If ibmr is NULL it will be allocated by reg_create. 1274 * Else, the given ibmr will be used. 1275 */ 1276 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 1277 u64 iova, int access_flags, 1278 unsigned int page_size, bool populate) 1279 { 1280 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1281 struct mlx5_ib_mr *mr; 1282 __be64 *pas; 1283 void *mkc; 1284 int inlen; 1285 u32 *in; 1286 int err; 1287 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); 1288 1289 if (!page_size) 1290 return ERR_PTR(-EINVAL); 1291 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1292 if (!mr) 1293 return ERR_PTR(-ENOMEM); 1294 1295 mr->ibmr.pd = pd; 1296 mr->access_flags = access_flags; 1297 mr->page_shift = order_base_2(page_size); 1298 1299 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1300 if (populate) 1301 inlen += sizeof(*pas) * 1302 roundup(ib_umem_num_dma_blocks(umem, page_size), 2); 1303 in = kvzalloc(inlen, GFP_KERNEL); 1304 if (!in) { 1305 err = -ENOMEM; 1306 goto err_1; 1307 } 1308 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 1309 if (populate) { 1310 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { 1311 err = -EINVAL; 1312 goto err_2; 1313 } 1314 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, 1315 pg_cap ? MLX5_IB_MTT_PRESENT : 0); 1316 } 1317 1318 /* The pg_access bit allows setting the access flags 1319 * in the page list submitted with the command. */ 1320 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); 1321 1322 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1323 set_mkc_access_pd_addr_fields(mkc, access_flags, iova, 1324 populate ? pd : dev->umrc.pd); 1325 MLX5_SET(mkc, mkc, free, !populate); 1326 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 1327 MLX5_SET(mkc, mkc, umr_en, 1); 1328 1329 MLX5_SET64(mkc, mkc, len, umem->length); 1330 MLX5_SET(mkc, mkc, bsf_octword_size, 0); 1331 MLX5_SET(mkc, mkc, translations_octword_size, 1332 get_octo_len(iova, umem->length, mr->page_shift)); 1333 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); 1334 if (populate) { 1335 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 1336 get_octo_len(iova, umem->length, mr->page_shift)); 1337 } 1338 1339 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1340 if (err) { 1341 mlx5_ib_warn(dev, "create mkey failed\n"); 1342 goto err_2; 1343 } 1344 mr->mmkey.type = MLX5_MKEY_MR; 1345 mr->umem = umem; 1346 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1347 kvfree(in); 1348 1349 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); 1350 1351 return mr; 1352 1353 err_2: 1354 kvfree(in); 1355 err_1: 1356 kfree(mr); 1357 return ERR_PTR(err); 1358 } 1359 1360 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, 1361 u64 length, int acc, int mode) 1362 { 1363 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1364 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1365 struct mlx5_ib_mr *mr; 1366 void *mkc; 1367 u32 *in; 1368 int err; 1369 1370 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1371 if (!mr) 1372 return ERR_PTR(-ENOMEM); 1373 1374 in = kzalloc(inlen, GFP_KERNEL); 1375 if (!in) { 1376 err = -ENOMEM; 1377 goto err_free; 1378 } 1379 1380 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1381 1382 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); 1383 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); 1384 MLX5_SET64(mkc, mkc, len, length); 1385 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd); 1386 1387 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1388 if (err) 1389 goto err_in; 1390 1391 kfree(in); 1392 1393 set_mr_fields(dev, mr, length, acc, start_addr); 1394 1395 return &mr->ibmr; 1396 1397 err_in: 1398 kfree(in); 1399 1400 err_free: 1401 kfree(mr); 1402 1403 return ERR_PTR(err); 1404 } 1405 1406 int mlx5_ib_advise_mr(struct ib_pd *pd, 1407 enum ib_uverbs_advise_mr_advice advice, 1408 u32 flags, 1409 struct ib_sge *sg_list, 1410 u32 num_sge, 1411 struct uverbs_attr_bundle *attrs) 1412 { 1413 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && 1414 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && 1415 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) 1416 return -EOPNOTSUPP; 1417 1418 return mlx5_ib_advise_mr_prefetch(pd, advice, flags, 1419 sg_list, num_sge); 1420 } 1421 1422 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1423 struct ib_dm_mr_attr *attr, 1424 struct uverbs_attr_bundle *attrs) 1425 { 1426 struct mlx5_ib_dm *mdm = to_mdm(dm); 1427 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; 1428 u64 start_addr = mdm->dev_addr + attr->offset; 1429 int mode; 1430 1431 switch (mdm->type) { 1432 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 1433 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) 1434 return ERR_PTR(-EINVAL); 1435 1436 mode = MLX5_MKC_ACCESS_MODE_MEMIC; 1437 start_addr -= pci_resource_start(dev->pdev, 0); 1438 break; 1439 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 1440 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 1441 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) 1442 return ERR_PTR(-EINVAL); 1443 1444 mode = MLX5_MKC_ACCESS_MODE_SW_ICM; 1445 break; 1446 default: 1447 return ERR_PTR(-EINVAL); 1448 } 1449 1450 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, 1451 attr->access_flags, mode); 1452 } 1453 1454 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, 1455 u64 iova, int access_flags) 1456 { 1457 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1458 struct mlx5_ib_mr *mr = NULL; 1459 bool xlt_with_umr; 1460 int err; 1461 1462 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length); 1463 if (xlt_with_umr) { 1464 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); 1465 } else { 1466 unsigned int page_size = mlx5_umem_find_best_pgsz( 1467 umem, mkc, log_page_size, 0, iova); 1468 1469 mutex_lock(&dev->slow_path_mutex); 1470 mr = reg_create(pd, umem, iova, access_flags, page_size, true); 1471 mutex_unlock(&dev->slow_path_mutex); 1472 } 1473 if (IS_ERR(mr)) { 1474 ib_umem_release(umem); 1475 return ERR_CAST(mr); 1476 } 1477 1478 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1479 1480 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1481 1482 if (xlt_with_umr) { 1483 /* 1484 * If the MR was created with reg_create then it will be 1485 * configured properly but left disabled. It is safe to go ahead 1486 * and configure it again via UMR while enabling it. 1487 */ 1488 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); 1489 if (err) { 1490 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1491 return ERR_PTR(err); 1492 } 1493 } 1494 return &mr->ibmr; 1495 } 1496 1497 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, 1498 u64 iova, int access_flags, 1499 struct ib_udata *udata) 1500 { 1501 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1502 struct ib_umem_odp *odp; 1503 struct mlx5_ib_mr *mr; 1504 int err; 1505 1506 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1507 return ERR_PTR(-EOPNOTSUPP); 1508 1509 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); 1510 if (err) 1511 return ERR_PTR(err); 1512 if (!start && length == U64_MAX) { 1513 if (iova != 0) 1514 return ERR_PTR(-EINVAL); 1515 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1516 return ERR_PTR(-EINVAL); 1517 1518 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); 1519 if (IS_ERR(mr)) 1520 return ERR_CAST(mr); 1521 return &mr->ibmr; 1522 } 1523 1524 /* ODP requires xlt update via umr to work. */ 1525 if (!mlx5_ib_can_load_pas_with_umr(dev, length)) 1526 return ERR_PTR(-EINVAL); 1527 1528 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, 1529 &mlx5_mn_ops); 1530 if (IS_ERR(odp)) 1531 return ERR_CAST(odp); 1532 1533 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); 1534 if (IS_ERR(mr)) { 1535 ib_umem_release(&odp->umem); 1536 return ERR_CAST(mr); 1537 } 1538 xa_init(&mr->implicit_children); 1539 1540 odp->private = mr; 1541 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1542 if (err) 1543 goto err_dereg_mr; 1544 1545 err = mlx5_ib_init_odp_mr(mr); 1546 if (err) 1547 goto err_dereg_mr; 1548 return &mr->ibmr; 1549 1550 err_dereg_mr: 1551 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1552 return ERR_PTR(err); 1553 } 1554 1555 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1556 u64 iova, int access_flags, 1557 struct ib_udata *udata) 1558 { 1559 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1560 struct ib_umem *umem; 1561 1562 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1563 return ERR_PTR(-EOPNOTSUPP); 1564 1565 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1566 start, iova, length, access_flags); 1567 1568 if (access_flags & IB_ACCESS_ON_DEMAND) 1569 return create_user_odp_mr(pd, start, length, iova, access_flags, 1570 udata); 1571 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); 1572 if (IS_ERR(umem)) 1573 return ERR_CAST(umem); 1574 return create_real_mr(pd, umem, iova, access_flags); 1575 } 1576 1577 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) 1578 { 1579 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; 1580 struct mlx5_ib_mr *mr = umem_dmabuf->private; 1581 1582 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); 1583 1584 if (!umem_dmabuf->sgt) 1585 return; 1586 1587 mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); 1588 ib_umem_dmabuf_unmap_pages(umem_dmabuf); 1589 } 1590 1591 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { 1592 .allow_peer2peer = 1, 1593 .move_notify = mlx5_ib_dmabuf_invalidate_cb, 1594 }; 1595 1596 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, 1597 u64 length, u64 virt_addr, 1598 int fd, int access_flags, 1599 struct ib_udata *udata) 1600 { 1601 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1602 struct mlx5_ib_mr *mr = NULL; 1603 struct ib_umem_dmabuf *umem_dmabuf; 1604 int err; 1605 1606 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || 1607 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1608 return ERR_PTR(-EOPNOTSUPP); 1609 1610 mlx5_ib_dbg(dev, 1611 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n", 1612 offset, virt_addr, length, fd, access_flags); 1613 1614 /* dmabuf requires xlt update via umr to work. */ 1615 if (!mlx5_ib_can_load_pas_with_umr(dev, length)) 1616 return ERR_PTR(-EINVAL); 1617 1618 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, 1619 access_flags, 1620 &mlx5_ib_dmabuf_attach_ops); 1621 if (IS_ERR(umem_dmabuf)) { 1622 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n", 1623 PTR_ERR(umem_dmabuf)); 1624 return ERR_CAST(umem_dmabuf); 1625 } 1626 1627 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, 1628 access_flags); 1629 if (IS_ERR(mr)) { 1630 ib_umem_release(&umem_dmabuf->umem); 1631 return ERR_CAST(mr); 1632 } 1633 1634 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1635 1636 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); 1637 umem_dmabuf->private = mr; 1638 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1639 if (err) 1640 goto err_dereg_mr; 1641 1642 err = mlx5_ib_init_dmabuf_mr(mr); 1643 if (err) 1644 goto err_dereg_mr; 1645 return &mr->ibmr; 1646 1647 err_dereg_mr: 1648 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1649 return ERR_PTR(err); 1650 } 1651 1652 /** 1653 * revoke_mr - Fence all DMA on the MR 1654 * @mr: The MR to fence 1655 * 1656 * Upon return the NIC will not be doing any DMA to the pages under the MR, 1657 * and any DMA in progress will be completed. Failure of this function 1658 * indicates the HW has failed catastrophically. 1659 */ 1660 static int revoke_mr(struct mlx5_ib_mr *mr) 1661 { 1662 struct mlx5_umr_wr umrwr = {}; 1663 1664 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1665 return 0; 1666 1667 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | 1668 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1669 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1670 umrwr.pd = mr_to_mdev(mr)->umrc.pd; 1671 umrwr.mkey = mr->mmkey.key; 1672 umrwr.ignore_free_state = 1; 1673 1674 return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr); 1675 } 1676 1677 /* 1678 * True if the change in access flags can be done via UMR, only some access 1679 * flags can be updated. 1680 */ 1681 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, 1682 unsigned int current_access_flags, 1683 unsigned int target_access_flags) 1684 { 1685 unsigned int diffs = current_access_flags ^ target_access_flags; 1686 1687 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 1688 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) 1689 return false; 1690 return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags, 1691 target_access_flags); 1692 } 1693 1694 static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, 1695 int access_flags) 1696 { 1697 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1698 struct mlx5_umr_wr umrwr = { 1699 .wr = { 1700 .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | 1701 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS, 1702 .opcode = MLX5_IB_WR_UMR, 1703 }, 1704 .mkey = mr->mmkey.key, 1705 .pd = pd, 1706 .access_flags = access_flags, 1707 }; 1708 int err; 1709 1710 err = mlx5_ib_post_send_wait(dev, &umrwr); 1711 if (err) 1712 return err; 1713 1714 mr->access_flags = access_flags; 1715 return 0; 1716 } 1717 1718 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, 1719 struct ib_umem *new_umem, 1720 int new_access_flags, u64 iova, 1721 unsigned long *page_size) 1722 { 1723 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1724 1725 /* We only track the allocated sizes of MRs from the cache */ 1726 if (!mr->cache_ent) 1727 return false; 1728 if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length)) 1729 return false; 1730 1731 *page_size = 1732 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); 1733 if (WARN_ON(!*page_size)) 1734 return false; 1735 return (1ULL << mr->cache_ent->order) >= 1736 ib_umem_num_dma_blocks(new_umem, *page_size); 1737 } 1738 1739 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, 1740 int access_flags, int flags, struct ib_umem *new_umem, 1741 u64 iova, unsigned long page_size) 1742 { 1743 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1744 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE; 1745 struct ib_umem *old_umem = mr->umem; 1746 int err; 1747 1748 /* 1749 * To keep everything simple the MR is revoked before we start to mess 1750 * with it. This ensure the change is atomic relative to any use of the 1751 * MR. 1752 */ 1753 err = revoke_mr(mr); 1754 if (err) 1755 return err; 1756 1757 if (flags & IB_MR_REREG_PD) { 1758 mr->ibmr.pd = pd; 1759 upd_flags |= MLX5_IB_UPD_XLT_PD; 1760 } 1761 if (flags & IB_MR_REREG_ACCESS) { 1762 mr->access_flags = access_flags; 1763 upd_flags |= MLX5_IB_UPD_XLT_ACCESS; 1764 } 1765 1766 mr->ibmr.length = new_umem->length; 1767 mr->ibmr.iova = iova; 1768 mr->ibmr.length = new_umem->length; 1769 mr->page_shift = order_base_2(page_size); 1770 mr->umem = new_umem; 1771 err = mlx5_ib_update_mr_pas(mr, upd_flags); 1772 if (err) { 1773 /* 1774 * The MR is revoked at this point so there is no issue to free 1775 * new_umem. 1776 */ 1777 mr->umem = old_umem; 1778 return err; 1779 } 1780 1781 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); 1782 ib_umem_release(old_umem); 1783 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); 1784 return 0; 1785 } 1786 1787 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1788 u64 length, u64 iova, int new_access_flags, 1789 struct ib_pd *new_pd, 1790 struct ib_udata *udata) 1791 { 1792 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); 1793 struct mlx5_ib_mr *mr = to_mmr(ib_mr); 1794 int err; 1795 1796 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1797 return ERR_PTR(-EOPNOTSUPP); 1798 1799 mlx5_ib_dbg( 1800 dev, 1801 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1802 start, iova, length, new_access_flags); 1803 1804 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 1805 return ERR_PTR(-EOPNOTSUPP); 1806 1807 if (!(flags & IB_MR_REREG_ACCESS)) 1808 new_access_flags = mr->access_flags; 1809 if (!(flags & IB_MR_REREG_PD)) 1810 new_pd = ib_mr->pd; 1811 1812 if (!(flags & IB_MR_REREG_TRANS)) { 1813 struct ib_umem *umem; 1814 1815 /* Fast path for PD/access change */ 1816 if (can_use_umr_rereg_access(dev, mr->access_flags, 1817 new_access_flags)) { 1818 err = umr_rereg_pd_access(mr, new_pd, new_access_flags); 1819 if (err) 1820 return ERR_PTR(err); 1821 return NULL; 1822 } 1823 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ 1824 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1825 goto recreate; 1826 1827 /* 1828 * Only one active MR can refer to a umem at one time, revoke 1829 * the old MR before assigning the umem to the new one. 1830 */ 1831 err = revoke_mr(mr); 1832 if (err) 1833 return ERR_PTR(err); 1834 umem = mr->umem; 1835 mr->umem = NULL; 1836 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1837 1838 return create_real_mr(new_pd, umem, mr->ibmr.iova, 1839 new_access_flags); 1840 } 1841 1842 /* 1843 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does 1844 * but the logic around releasing the umem is different 1845 */ 1846 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1847 goto recreate; 1848 1849 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) && 1850 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { 1851 struct ib_umem *new_umem; 1852 unsigned long page_size; 1853 1854 new_umem = ib_umem_get(&dev->ib_dev, start, length, 1855 new_access_flags); 1856 if (IS_ERR(new_umem)) 1857 return ERR_CAST(new_umem); 1858 1859 /* Fast path for PAS change */ 1860 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, 1861 &page_size)) { 1862 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, 1863 new_umem, iova, page_size); 1864 if (err) { 1865 ib_umem_release(new_umem); 1866 return ERR_PTR(err); 1867 } 1868 return NULL; 1869 } 1870 return create_real_mr(new_pd, new_umem, iova, new_access_flags); 1871 } 1872 1873 /* 1874 * Everything else has no state we can preserve, just create a new MR 1875 * from scratch 1876 */ 1877 recreate: 1878 return mlx5_ib_reg_user_mr(new_pd, start, length, iova, 1879 new_access_flags, udata); 1880 } 1881 1882 static int 1883 mlx5_alloc_priv_descs(struct ib_device *device, 1884 struct mlx5_ib_mr *mr, 1885 int ndescs, 1886 int desc_size) 1887 { 1888 struct mlx5_ib_dev *dev = to_mdev(device); 1889 struct device *ddev = &dev->mdev->pdev->dev; 1890 int size = ndescs * desc_size; 1891 int add_size; 1892 int ret; 1893 1894 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 1895 1896 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); 1897 if (!mr->descs_alloc) 1898 return -ENOMEM; 1899 1900 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1901 1902 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); 1903 if (dma_mapping_error(ddev, mr->desc_map)) { 1904 ret = -ENOMEM; 1905 goto err; 1906 } 1907 1908 return 0; 1909 err: 1910 kfree(mr->descs_alloc); 1911 1912 return ret; 1913 } 1914 1915 static void 1916 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) 1917 { 1918 if (!mr->umem && mr->descs) { 1919 struct ib_device *device = mr->ibmr.device; 1920 int size = mr->max_descs * mr->desc_size; 1921 struct mlx5_ib_dev *dev = to_mdev(device); 1922 1923 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, 1924 DMA_TO_DEVICE); 1925 kfree(mr->descs_alloc); 1926 mr->descs = NULL; 1927 } 1928 } 1929 1930 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1931 { 1932 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1933 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); 1934 int rc; 1935 1936 /* 1937 * Any async use of the mr must hold the refcount, once the refcount 1938 * goes to zero no other thread, such as ODP page faults, prefetch, any 1939 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it. 1940 */ 1941 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 1942 refcount_read(&mr->mmkey.usecount) != 0 && 1943 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) 1944 mlx5r_deref_wait_odp_mkey(&mr->mmkey); 1945 1946 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { 1947 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 1948 mr->sig, NULL, GFP_KERNEL); 1949 1950 if (mr->mtt_mr) { 1951 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 1952 if (rc) 1953 return rc; 1954 mr->mtt_mr = NULL; 1955 } 1956 if (mr->klm_mr) { 1957 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 1958 if (rc) 1959 return rc; 1960 mr->klm_mr = NULL; 1961 } 1962 1963 if (mlx5_core_destroy_psv(dev->mdev, 1964 mr->sig->psv_memory.psv_idx)) 1965 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1966 mr->sig->psv_memory.psv_idx); 1967 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 1968 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1969 mr->sig->psv_wire.psv_idx); 1970 kfree(mr->sig); 1971 mr->sig = NULL; 1972 } 1973 1974 /* Stop DMA */ 1975 if (mr->cache_ent) { 1976 if (revoke_mr(mr)) { 1977 spin_lock_irq(&mr->cache_ent->lock); 1978 mr->cache_ent->total_mrs--; 1979 spin_unlock_irq(&mr->cache_ent->lock); 1980 mr->cache_ent = NULL; 1981 } 1982 } 1983 if (!mr->cache_ent) { 1984 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); 1985 if (rc) 1986 return rc; 1987 } 1988 1989 if (mr->umem) { 1990 bool is_odp = is_odp_mr(mr); 1991 1992 if (!is_odp) 1993 atomic_sub(ib_umem_num_pages(mr->umem), 1994 &dev->mdev->priv.reg_pages); 1995 ib_umem_release(mr->umem); 1996 if (is_odp) 1997 mlx5_ib_free_odp_mr(mr); 1998 } 1999 2000 if (mr->cache_ent) { 2001 mlx5_mr_cache_free(dev, mr); 2002 } else { 2003 mlx5_free_priv_descs(mr); 2004 kfree(mr); 2005 } 2006 return 0; 2007 } 2008 2009 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, 2010 int access_mode, int page_shift) 2011 { 2012 void *mkc; 2013 2014 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2015 2016 /* This is only used from the kernel, so setting the PD is OK. */ 2017 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd); 2018 MLX5_SET(mkc, mkc, free, 1); 2019 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 2020 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); 2021 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); 2022 MLX5_SET(mkc, mkc, umr_en, 1); 2023 MLX5_SET(mkc, mkc, log_page_size, page_shift); 2024 } 2025 2026 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2027 int ndescs, int desc_size, int page_shift, 2028 int access_mode, u32 *in, int inlen) 2029 { 2030 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2031 int err; 2032 2033 mr->access_mode = access_mode; 2034 mr->desc_size = desc_size; 2035 mr->max_descs = ndescs; 2036 2037 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); 2038 if (err) 2039 return err; 2040 2041 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); 2042 2043 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 2044 if (err) 2045 goto err_free_descs; 2046 2047 mr->mmkey.type = MLX5_MKEY_MR; 2048 mr->ibmr.lkey = mr->mmkey.key; 2049 mr->ibmr.rkey = mr->mmkey.key; 2050 2051 return 0; 2052 2053 err_free_descs: 2054 mlx5_free_priv_descs(mr); 2055 return err; 2056 } 2057 2058 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, 2059 u32 max_num_sg, u32 max_num_meta_sg, 2060 int desc_size, int access_mode) 2061 { 2062 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2063 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); 2064 int page_shift = 0; 2065 struct mlx5_ib_mr *mr; 2066 u32 *in; 2067 int err; 2068 2069 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2070 if (!mr) 2071 return ERR_PTR(-ENOMEM); 2072 2073 mr->ibmr.pd = pd; 2074 mr->ibmr.device = pd->device; 2075 2076 in = kzalloc(inlen, GFP_KERNEL); 2077 if (!in) { 2078 err = -ENOMEM; 2079 goto err_free; 2080 } 2081 2082 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) 2083 page_shift = PAGE_SHIFT; 2084 2085 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, 2086 access_mode, in, inlen); 2087 if (err) 2088 goto err_free_in; 2089 2090 mr->umem = NULL; 2091 kfree(in); 2092 2093 return mr; 2094 2095 err_free_in: 2096 kfree(in); 2097 err_free: 2098 kfree(mr); 2099 return ERR_PTR(err); 2100 } 2101 2102 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2103 int ndescs, u32 *in, int inlen) 2104 { 2105 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), 2106 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, 2107 inlen); 2108 } 2109 2110 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2111 int ndescs, u32 *in, int inlen) 2112 { 2113 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), 2114 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2115 } 2116 2117 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2118 int max_num_sg, int max_num_meta_sg, 2119 u32 *in, int inlen) 2120 { 2121 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2122 u32 psv_index[2]; 2123 void *mkc; 2124 int err; 2125 2126 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); 2127 if (!mr->sig) 2128 return -ENOMEM; 2129 2130 /* create mem & wire PSVs */ 2131 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); 2132 if (err) 2133 goto err_free_sig; 2134 2135 mr->sig->psv_memory.psv_idx = psv_index[0]; 2136 mr->sig->psv_wire.psv_idx = psv_index[1]; 2137 2138 mr->sig->sig_status_checked = true; 2139 mr->sig->sig_err_exists = false; 2140 /* Next UMR, Arm SIGERR */ 2141 ++mr->sig->sigerr_count; 2142 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2143 sizeof(struct mlx5_klm), 2144 MLX5_MKC_ACCESS_MODE_KLMS); 2145 if (IS_ERR(mr->klm_mr)) { 2146 err = PTR_ERR(mr->klm_mr); 2147 goto err_destroy_psv; 2148 } 2149 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2150 sizeof(struct mlx5_mtt), 2151 MLX5_MKC_ACCESS_MODE_MTT); 2152 if (IS_ERR(mr->mtt_mr)) { 2153 err = PTR_ERR(mr->mtt_mr); 2154 goto err_free_klm_mr; 2155 } 2156 2157 /* Set bsf descriptors for mkey */ 2158 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2159 MLX5_SET(mkc, mkc, bsf_en, 1); 2160 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); 2161 2162 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, 2163 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2164 if (err) 2165 goto err_free_mtt_mr; 2166 2167 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 2168 mr->sig, GFP_KERNEL)); 2169 if (err) 2170 goto err_free_descs; 2171 return 0; 2172 2173 err_free_descs: 2174 destroy_mkey(dev, mr); 2175 mlx5_free_priv_descs(mr); 2176 err_free_mtt_mr: 2177 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 2178 mr->mtt_mr = NULL; 2179 err_free_klm_mr: 2180 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 2181 mr->klm_mr = NULL; 2182 err_destroy_psv: 2183 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) 2184 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 2185 mr->sig->psv_memory.psv_idx); 2186 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 2187 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 2188 mr->sig->psv_wire.psv_idx); 2189 err_free_sig: 2190 kfree(mr->sig); 2191 2192 return err; 2193 } 2194 2195 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, 2196 enum ib_mr_type mr_type, u32 max_num_sg, 2197 u32 max_num_meta_sg) 2198 { 2199 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2200 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2201 int ndescs = ALIGN(max_num_sg, 4); 2202 struct mlx5_ib_mr *mr; 2203 u32 *in; 2204 int err; 2205 2206 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2207 if (!mr) 2208 return ERR_PTR(-ENOMEM); 2209 2210 in = kzalloc(inlen, GFP_KERNEL); 2211 if (!in) { 2212 err = -ENOMEM; 2213 goto err_free; 2214 } 2215 2216 mr->ibmr.device = pd->device; 2217 mr->umem = NULL; 2218 2219 switch (mr_type) { 2220 case IB_MR_TYPE_MEM_REG: 2221 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); 2222 break; 2223 case IB_MR_TYPE_SG_GAPS: 2224 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); 2225 break; 2226 case IB_MR_TYPE_INTEGRITY: 2227 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, 2228 max_num_meta_sg, in, inlen); 2229 break; 2230 default: 2231 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); 2232 err = -EINVAL; 2233 } 2234 2235 if (err) 2236 goto err_free_in; 2237 2238 kfree(in); 2239 2240 return &mr->ibmr; 2241 2242 err_free_in: 2243 kfree(in); 2244 err_free: 2245 kfree(mr); 2246 return ERR_PTR(err); 2247 } 2248 2249 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2250 u32 max_num_sg) 2251 { 2252 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); 2253 } 2254 2255 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 2256 u32 max_num_sg, u32 max_num_meta_sg) 2257 { 2258 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, 2259 max_num_meta_sg); 2260 } 2261 2262 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 2263 { 2264 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); 2265 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2266 struct mlx5_ib_mw *mw = to_mmw(ibmw); 2267 unsigned int ndescs; 2268 u32 *in = NULL; 2269 void *mkc; 2270 int err; 2271 struct mlx5_ib_alloc_mw req = {}; 2272 struct { 2273 __u32 comp_mask; 2274 __u32 response_length; 2275 } resp = {}; 2276 2277 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 2278 if (err) 2279 return err; 2280 2281 if (req.comp_mask || req.reserved1 || req.reserved2) 2282 return -EOPNOTSUPP; 2283 2284 if (udata->inlen > sizeof(req) && 2285 !ib_is_udata_cleared(udata, sizeof(req), 2286 udata->inlen - sizeof(req))) 2287 return -EOPNOTSUPP; 2288 2289 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 2290 2291 in = kzalloc(inlen, GFP_KERNEL); 2292 if (!in) { 2293 err = -ENOMEM; 2294 goto free; 2295 } 2296 2297 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2298 2299 MLX5_SET(mkc, mkc, free, 1); 2300 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 2301 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); 2302 MLX5_SET(mkc, mkc, umr_en, 1); 2303 MLX5_SET(mkc, mkc, lr, 1); 2304 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); 2305 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); 2306 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2307 2308 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); 2309 if (err) 2310 goto free; 2311 2312 mw->mmkey.type = MLX5_MKEY_MW; 2313 ibmw->rkey = mw->mmkey.key; 2314 mw->mmkey.ndescs = ndescs; 2315 2316 resp.response_length = 2317 min(offsetofend(typeof(resp), response_length), udata->outlen); 2318 if (resp.response_length) { 2319 err = ib_copy_to_udata(udata, &resp, resp.response_length); 2320 if (err) 2321 goto free_mkey; 2322 } 2323 2324 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2325 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); 2326 if (err) 2327 goto free_mkey; 2328 } 2329 2330 kfree(in); 2331 return 0; 2332 2333 free_mkey: 2334 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); 2335 free: 2336 kfree(in); 2337 return err; 2338 } 2339 2340 int mlx5_ib_dealloc_mw(struct ib_mw *mw) 2341 { 2342 struct mlx5_ib_dev *dev = to_mdev(mw->device); 2343 struct mlx5_ib_mw *mmw = to_mmw(mw); 2344 2345 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 2346 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) 2347 /* 2348 * pagefault_single_data_segment() may be accessing mmw 2349 * if the user bound an ODP MR to this MW. 2350 */ 2351 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); 2352 2353 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); 2354 } 2355 2356 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 2357 struct ib_mr_status *mr_status) 2358 { 2359 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 2360 int ret = 0; 2361 2362 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { 2363 pr_err("Invalid status check mask\n"); 2364 ret = -EINVAL; 2365 goto done; 2366 } 2367 2368 mr_status->fail_status = 0; 2369 if (check_mask & IB_MR_CHECK_SIG_STATUS) { 2370 if (!mmr->sig) { 2371 ret = -EINVAL; 2372 pr_err("signature status check requested on a non-signature enabled MR\n"); 2373 goto done; 2374 } 2375 2376 mmr->sig->sig_status_checked = true; 2377 if (!mmr->sig->sig_err_exists) 2378 goto done; 2379 2380 if (ibmr->lkey == mmr->sig->err_item.key) 2381 memcpy(&mr_status->sig_err, &mmr->sig->err_item, 2382 sizeof(mr_status->sig_err)); 2383 else { 2384 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; 2385 mr_status->sig_err.sig_err_offset = 0; 2386 mr_status->sig_err.key = mmr->sig->err_item.key; 2387 } 2388 2389 mmr->sig->sig_err_exists = false; 2390 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; 2391 } 2392 2393 done: 2394 return ret; 2395 } 2396 2397 static int 2398 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2399 int data_sg_nents, unsigned int *data_sg_offset, 2400 struct scatterlist *meta_sg, int meta_sg_nents, 2401 unsigned int *meta_sg_offset) 2402 { 2403 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2404 unsigned int sg_offset = 0; 2405 int n = 0; 2406 2407 mr->meta_length = 0; 2408 if (data_sg_nents == 1) { 2409 n++; 2410 mr->mmkey.ndescs = 1; 2411 if (data_sg_offset) 2412 sg_offset = *data_sg_offset; 2413 mr->data_length = sg_dma_len(data_sg) - sg_offset; 2414 mr->data_iova = sg_dma_address(data_sg) + sg_offset; 2415 if (meta_sg_nents == 1) { 2416 n++; 2417 mr->meta_ndescs = 1; 2418 if (meta_sg_offset) 2419 sg_offset = *meta_sg_offset; 2420 else 2421 sg_offset = 0; 2422 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; 2423 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; 2424 } 2425 ibmr->length = mr->data_length + mr->meta_length; 2426 } 2427 2428 return n; 2429 } 2430 2431 static int 2432 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, 2433 struct scatterlist *sgl, 2434 unsigned short sg_nents, 2435 unsigned int *sg_offset_p, 2436 struct scatterlist *meta_sgl, 2437 unsigned short meta_sg_nents, 2438 unsigned int *meta_sg_offset_p) 2439 { 2440 struct scatterlist *sg = sgl; 2441 struct mlx5_klm *klms = mr->descs; 2442 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 2443 u32 lkey = mr->ibmr.pd->local_dma_lkey; 2444 int i, j = 0; 2445 2446 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 2447 mr->ibmr.length = 0; 2448 2449 for_each_sg(sgl, sg, sg_nents, i) { 2450 if (unlikely(i >= mr->max_descs)) 2451 break; 2452 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 2453 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 2454 klms[i].key = cpu_to_be32(lkey); 2455 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2456 2457 sg_offset = 0; 2458 } 2459 2460 if (sg_offset_p) 2461 *sg_offset_p = sg_offset; 2462 2463 mr->mmkey.ndescs = i; 2464 mr->data_length = mr->ibmr.length; 2465 2466 if (meta_sg_nents) { 2467 sg = meta_sgl; 2468 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; 2469 for_each_sg(meta_sgl, sg, meta_sg_nents, j) { 2470 if (unlikely(i + j >= mr->max_descs)) 2471 break; 2472 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + 2473 sg_offset); 2474 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - 2475 sg_offset); 2476 klms[i + j].key = cpu_to_be32(lkey); 2477 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2478 2479 sg_offset = 0; 2480 } 2481 if (meta_sg_offset_p) 2482 *meta_sg_offset_p = sg_offset; 2483 2484 mr->meta_ndescs = j; 2485 mr->meta_length = mr->ibmr.length - mr->data_length; 2486 } 2487 2488 return i + j; 2489 } 2490 2491 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) 2492 { 2493 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2494 __be64 *descs; 2495 2496 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) 2497 return -ENOMEM; 2498 2499 descs = mr->descs; 2500 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2501 2502 return 0; 2503 } 2504 2505 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) 2506 { 2507 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2508 __be64 *descs; 2509 2510 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) 2511 return -ENOMEM; 2512 2513 descs = mr->descs; 2514 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = 2515 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2516 2517 return 0; 2518 } 2519 2520 static int 2521 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2522 int data_sg_nents, unsigned int *data_sg_offset, 2523 struct scatterlist *meta_sg, int meta_sg_nents, 2524 unsigned int *meta_sg_offset) 2525 { 2526 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2527 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; 2528 int n; 2529 2530 pi_mr->mmkey.ndescs = 0; 2531 pi_mr->meta_ndescs = 0; 2532 pi_mr->meta_length = 0; 2533 2534 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2535 pi_mr->desc_size * pi_mr->max_descs, 2536 DMA_TO_DEVICE); 2537 2538 pi_mr->ibmr.page_size = ibmr->page_size; 2539 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, 2540 mlx5_set_page); 2541 if (n != data_sg_nents) 2542 return n; 2543 2544 pi_mr->data_iova = pi_mr->ibmr.iova; 2545 pi_mr->data_length = pi_mr->ibmr.length; 2546 pi_mr->ibmr.length = pi_mr->data_length; 2547 ibmr->length = pi_mr->data_length; 2548 2549 if (meta_sg_nents) { 2550 u64 page_mask = ~((u64)ibmr->page_size - 1); 2551 u64 iova = pi_mr->data_iova; 2552 2553 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, 2554 meta_sg_offset, mlx5_set_page_pi); 2555 2556 pi_mr->meta_length = pi_mr->ibmr.length; 2557 /* 2558 * PI address for the HW is the offset of the metadata address 2559 * relative to the first data page address. 2560 * It equals to first data page address + size of data pages + 2561 * metadata offset at the first metadata page 2562 */ 2563 pi_mr->pi_iova = (iova & page_mask) + 2564 pi_mr->mmkey.ndescs * ibmr->page_size + 2565 (pi_mr->ibmr.iova & ~page_mask); 2566 /* 2567 * In order to use one MTT MR for data and metadata, we register 2568 * also the gaps between the end of the data and the start of 2569 * the metadata (the sig MR will verify that the HW will access 2570 * to right addresses). This mapping is safe because we use 2571 * internal mkey for the registration. 2572 */ 2573 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; 2574 pi_mr->ibmr.iova = iova; 2575 ibmr->length += pi_mr->meta_length; 2576 } 2577 2578 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2579 pi_mr->desc_size * pi_mr->max_descs, 2580 DMA_TO_DEVICE); 2581 2582 return n; 2583 } 2584 2585 static int 2586 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2587 int data_sg_nents, unsigned int *data_sg_offset, 2588 struct scatterlist *meta_sg, int meta_sg_nents, 2589 unsigned int *meta_sg_offset) 2590 { 2591 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2592 struct mlx5_ib_mr *pi_mr = mr->klm_mr; 2593 int n; 2594 2595 pi_mr->mmkey.ndescs = 0; 2596 pi_mr->meta_ndescs = 0; 2597 pi_mr->meta_length = 0; 2598 2599 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2600 pi_mr->desc_size * pi_mr->max_descs, 2601 DMA_TO_DEVICE); 2602 2603 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, 2604 meta_sg, meta_sg_nents, meta_sg_offset); 2605 2606 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2607 pi_mr->desc_size * pi_mr->max_descs, 2608 DMA_TO_DEVICE); 2609 2610 /* This is zero-based memory region */ 2611 pi_mr->data_iova = 0; 2612 pi_mr->ibmr.iova = 0; 2613 pi_mr->pi_iova = pi_mr->data_length; 2614 ibmr->length = pi_mr->ibmr.length; 2615 2616 return n; 2617 } 2618 2619 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2620 int data_sg_nents, unsigned int *data_sg_offset, 2621 struct scatterlist *meta_sg, int meta_sg_nents, 2622 unsigned int *meta_sg_offset) 2623 { 2624 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2625 struct mlx5_ib_mr *pi_mr = NULL; 2626 int n; 2627 2628 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); 2629 2630 mr->mmkey.ndescs = 0; 2631 mr->data_length = 0; 2632 mr->data_iova = 0; 2633 mr->meta_ndescs = 0; 2634 mr->pi_iova = 0; 2635 /* 2636 * As a performance optimization, if possible, there is no need to 2637 * perform UMR operation to register the data/metadata buffers. 2638 * First try to map the sg lists to PA descriptors with local_dma_lkey. 2639 * Fallback to UMR only in case of a failure. 2640 */ 2641 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2642 data_sg_offset, meta_sg, meta_sg_nents, 2643 meta_sg_offset); 2644 if (n == data_sg_nents + meta_sg_nents) 2645 goto out; 2646 /* 2647 * As a performance optimization, if possible, there is no need to map 2648 * the sg lists to KLM descriptors. First try to map the sg lists to MTT 2649 * descriptors and fallback to KLM only in case of a failure. 2650 * It's more efficient for the HW to work with MTT descriptors 2651 * (especially in high load). 2652 * Use KLM (indirect access) only if it's mandatory. 2653 */ 2654 pi_mr = mr->mtt_mr; 2655 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2656 data_sg_offset, meta_sg, meta_sg_nents, 2657 meta_sg_offset); 2658 if (n == data_sg_nents + meta_sg_nents) 2659 goto out; 2660 2661 pi_mr = mr->klm_mr; 2662 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2663 data_sg_offset, meta_sg, meta_sg_nents, 2664 meta_sg_offset); 2665 if (unlikely(n != data_sg_nents + meta_sg_nents)) 2666 return -ENOMEM; 2667 2668 out: 2669 /* This is zero-based memory region */ 2670 ibmr->iova = 0; 2671 mr->pi_mr = pi_mr; 2672 if (pi_mr) 2673 ibmr->sig_attrs->meta_length = pi_mr->meta_length; 2674 else 2675 ibmr->sig_attrs->meta_length = mr->meta_length; 2676 2677 return 0; 2678 } 2679 2680 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 2681 unsigned int *sg_offset) 2682 { 2683 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2684 int n; 2685 2686 mr->mmkey.ndescs = 0; 2687 2688 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, 2689 mr->desc_size * mr->max_descs, 2690 DMA_TO_DEVICE); 2691 2692 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 2693 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, 2694 NULL); 2695 else 2696 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 2697 mlx5_set_page); 2698 2699 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, 2700 mr->desc_size * mr->max_descs, 2701 DMA_TO_DEVICE); 2702 2703 return n; 2704 } 2705