1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2020, Intel Corporation. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 35 #include <linux/kref.h> 36 #include <linux/random.h> 37 #include <linux/debugfs.h> 38 #include <linux/export.h> 39 #include <linux/delay.h> 40 #include <linux/dma-buf.h> 41 #include <linux/dma-resv.h> 42 #include <rdma/ib_umem_odp.h> 43 #include "dm.h" 44 #include "mlx5_ib.h" 45 #include "umr.h" 46 47 enum { 48 MAX_PENDING_REG_MR = 8, 49 }; 50 51 #define MLX5_UMR_ALIGN 2048 52 53 static void 54 create_mkey_callback(int status, struct mlx5_async_work *context); 55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 56 u64 iova, int access_flags, 57 unsigned int page_size, bool populate); 58 59 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, 60 struct ib_pd *pd) 61 { 62 struct mlx5_ib_dev *dev = to_mdev(pd->device); 63 64 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 65 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 66 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 67 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 68 MLX5_SET(mkc, mkc, lr, 1); 69 70 if (acc & IB_ACCESS_RELAXED_ORDERING) { 71 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) 72 MLX5_SET(mkc, mkc, relaxed_ordering_write, 1); 73 74 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || 75 (MLX5_CAP_GEN(dev->mdev, 76 relaxed_ordering_read_pci_enabled) && 77 pcie_relaxed_ordering_enabled(dev->mdev->pdev))) 78 MLX5_SET(mkc, mkc, relaxed_ordering_read, 1); 79 } 80 81 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 82 MLX5_SET(mkc, mkc, qpn, 0xffffff); 83 MLX5_SET64(mkc, mkc, start_addr, start_addr); 84 } 85 86 static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in) 87 { 88 u8 key = atomic_inc_return(&dev->mkey_var); 89 void *mkc; 90 91 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 92 MLX5_SET(mkc, mkc, mkey_7_0, key); 93 *mkey = key; 94 } 95 96 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, 97 struct mlx5_ib_mkey *mkey, u32 *in, int inlen) 98 { 99 int ret; 100 101 assign_mkey_variant(dev, &mkey->key, in); 102 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); 103 if (!ret) 104 init_waitqueue_head(&mkey->wait); 105 106 return ret; 107 } 108 109 static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create) 110 { 111 struct mlx5_ib_dev *dev = async_create->ent->dev; 112 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 113 size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out); 114 115 MLX5_SET(create_mkey_in, async_create->in, opcode, 116 MLX5_CMD_OP_CREATE_MKEY); 117 assign_mkey_variant(dev, &async_create->mkey, async_create->in); 118 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen, 119 async_create->out, outlen, create_mkey_callback, 120 &async_create->cb_work); 121 } 122 123 static int mkey_cache_max_order(struct mlx5_ib_dev *dev); 124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); 125 126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 127 { 128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); 129 130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); 131 } 132 133 static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) 134 { 135 if (status == -ENXIO) /* core driver is not available */ 136 return; 137 138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); 139 if (status != -EREMOTEIO) /* driver specific failure */ 140 return; 141 142 /* Failed in FW, print cmd out failure details */ 143 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); 144 } 145 146 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings, 147 void *to_store) 148 { 149 XA_STATE(xas, &ent->mkeys, 0); 150 void *curr; 151 152 if (limit_pendings && 153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) 154 return -EAGAIN; 155 156 while (1) { 157 /* 158 * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version 159 * doesn't transparently unlock. Instead we set the xas index to 160 * the current value of reserved every iteration. 161 */ 162 xas_set(&xas, ent->reserved); 163 curr = xas_load(&xas); 164 if (!curr) { 165 if (to_store && ent->stored == ent->reserved) 166 xas_store(&xas, to_store); 167 else 168 xas_store(&xas, XA_ZERO_ENTRY); 169 if (xas_valid(&xas)) { 170 ent->reserved++; 171 if (to_store) { 172 if (ent->stored != ent->reserved) 173 __xa_store(&ent->mkeys, 174 ent->stored, 175 to_store, 176 GFP_KERNEL); 177 ent->stored++; 178 queue_adjust_cache_locked(ent); 179 WRITE_ONCE(ent->dev->cache.last_add, 180 jiffies); 181 } 182 } 183 } 184 xa_unlock_irq(&ent->mkeys); 185 186 /* 187 * Notice xas_nomem() must always be called as it cleans 188 * up any cached allocation. 189 */ 190 if (!xas_nomem(&xas, GFP_KERNEL)) 191 break; 192 xa_lock_irq(&ent->mkeys); 193 } 194 xa_lock_irq(&ent->mkeys); 195 if (xas_error(&xas)) 196 return xas_error(&xas); 197 if (WARN_ON(curr)) 198 return -EINVAL; 199 return 0; 200 } 201 202 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, 203 void *to_store) 204 { 205 int ret; 206 207 xa_lock_irq(&ent->mkeys); 208 ret = push_mkey_locked(ent, limit_pendings, to_store); 209 xa_unlock_irq(&ent->mkeys); 210 return ret; 211 } 212 213 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent) 214 { 215 void *old; 216 217 ent->reserved--; 218 old = __xa_erase(&ent->mkeys, ent->reserved); 219 WARN_ON(old); 220 } 221 222 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey) 223 { 224 void *old; 225 226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); 227 WARN_ON(old); 228 ent->stored++; 229 } 230 231 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent) 232 { 233 void *old, *xa_mkey; 234 235 ent->stored--; 236 ent->reserved--; 237 238 if (ent->stored == ent->reserved) { 239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored); 240 WARN_ON(!xa_mkey); 241 return (u32)xa_to_value(xa_mkey); 242 } 243 244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, 245 GFP_KERNEL); 246 WARN_ON(!xa_mkey || xa_is_err(xa_mkey)); 247 old = __xa_erase(&ent->mkeys, ent->reserved); 248 WARN_ON(old); 249 return (u32)xa_to_value(xa_mkey); 250 } 251 252 static void create_mkey_callback(int status, struct mlx5_async_work *context) 253 { 254 struct mlx5r_async_create_mkey *mkey_out = 255 container_of(context, struct mlx5r_async_create_mkey, cb_work); 256 struct mlx5_cache_ent *ent = mkey_out->ent; 257 struct mlx5_ib_dev *dev = ent->dev; 258 unsigned long flags; 259 260 if (status) { 261 create_mkey_warn(dev, status, mkey_out->out); 262 kfree(mkey_out); 263 xa_lock_irqsave(&ent->mkeys, flags); 264 undo_push_reserve_mkey(ent); 265 WRITE_ONCE(dev->fill_delay, 1); 266 xa_unlock_irqrestore(&ent->mkeys, flags); 267 mod_timer(&dev->delay_timer, jiffies + HZ); 268 return; 269 } 270 271 mkey_out->mkey |= mlx5_idx_to_mkey( 272 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); 273 WRITE_ONCE(dev->cache.last_add, jiffies); 274 275 xa_lock_irqsave(&ent->mkeys, flags); 276 push_to_reserved(ent, mkey_out->mkey); 277 /* If we are doing fill_to_high_water then keep going. */ 278 queue_adjust_cache_locked(ent); 279 xa_unlock_irqrestore(&ent->mkeys, flags); 280 kfree(mkey_out); 281 } 282 283 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) 284 { 285 int ret = 0; 286 287 switch (access_mode) { 288 case MLX5_MKC_ACCESS_MODE_MTT: 289 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / 290 sizeof(struct mlx5_mtt)); 291 break; 292 case MLX5_MKC_ACCESS_MODE_KSM: 293 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD / 294 sizeof(struct mlx5_klm)); 295 break; 296 default: 297 WARN_ON(1); 298 } 299 return ret; 300 } 301 302 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) 303 { 304 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); 305 MLX5_SET(mkc, mkc, free, 1); 306 MLX5_SET(mkc, mkc, umr_en, 1); 307 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); 308 MLX5_SET(mkc, mkc, access_mode_4_2, 309 (ent->rb_key.access_mode >> 2) & 0x7); 310 311 MLX5_SET(mkc, mkc, translations_octword_size, 312 get_mkc_octo_size(ent->rb_key.access_mode, 313 ent->rb_key.ndescs)); 314 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); 315 } 316 317 /* Asynchronously schedule new MRs to be populated in the cache. */ 318 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) 319 { 320 struct mlx5r_async_create_mkey *async_create; 321 void *mkc; 322 int err = 0; 323 int i; 324 325 for (i = 0; i < num; i++) { 326 async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey), 327 GFP_KERNEL); 328 if (!async_create) 329 return -ENOMEM; 330 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in, 331 memory_key_mkey_entry); 332 set_cache_mkc(ent, mkc); 333 async_create->ent = ent; 334 335 err = push_mkey(ent, true, NULL); 336 if (err) 337 goto free_async_create; 338 339 err = mlx5_ib_create_mkey_cb(async_create); 340 if (err) { 341 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); 342 goto err_undo_reserve; 343 } 344 } 345 346 return 0; 347 348 err_undo_reserve: 349 xa_lock_irq(&ent->mkeys); 350 undo_push_reserve_mkey(ent); 351 xa_unlock_irq(&ent->mkeys); 352 free_async_create: 353 kfree(async_create); 354 return err; 355 } 356 357 /* Synchronously create a MR in the cache */ 358 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) 359 { 360 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 361 void *mkc; 362 u32 *in; 363 int err; 364 365 in = kzalloc(inlen, GFP_KERNEL); 366 if (!in) 367 return -ENOMEM; 368 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 369 set_cache_mkc(ent, mkc); 370 371 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); 372 if (err) 373 goto free_in; 374 375 WRITE_ONCE(ent->dev->cache.last_add, jiffies); 376 free_in: 377 kfree(in); 378 return err; 379 } 380 381 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) 382 { 383 u32 mkey; 384 385 lockdep_assert_held(&ent->mkeys.xa_lock); 386 if (!ent->stored) 387 return; 388 mkey = pop_stored_mkey(ent); 389 xa_unlock_irq(&ent->mkeys); 390 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); 391 xa_lock_irq(&ent->mkeys); 392 } 393 394 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, 395 bool limit_fill) 396 __acquires(&ent->mkeys) __releases(&ent->mkeys) 397 { 398 int err; 399 400 lockdep_assert_held(&ent->mkeys.xa_lock); 401 402 while (true) { 403 if (limit_fill) 404 target = ent->limit * 2; 405 if (target == ent->reserved) 406 return 0; 407 if (target > ent->reserved) { 408 u32 todo = target - ent->reserved; 409 410 xa_unlock_irq(&ent->mkeys); 411 err = add_keys(ent, todo); 412 if (err == -EAGAIN) 413 usleep_range(3000, 5000); 414 xa_lock_irq(&ent->mkeys); 415 if (err) { 416 if (err != -EAGAIN) 417 return err; 418 } else 419 return 0; 420 } else { 421 remove_cache_mr_locked(ent); 422 } 423 } 424 } 425 426 static ssize_t size_write(struct file *filp, const char __user *buf, 427 size_t count, loff_t *pos) 428 { 429 struct mlx5_cache_ent *ent = filp->private_data; 430 u32 target; 431 int err; 432 433 err = kstrtou32_from_user(buf, count, 0, &target); 434 if (err) 435 return err; 436 437 /* 438 * Target is the new value of total_mrs the user requests, however we 439 * cannot free MRs that are in use. Compute the target value for stored 440 * mkeys. 441 */ 442 xa_lock_irq(&ent->mkeys); 443 if (target < ent->in_use) { 444 err = -EINVAL; 445 goto err_unlock; 446 } 447 target = target - ent->in_use; 448 if (target < ent->limit || target > ent->limit*2) { 449 err = -EINVAL; 450 goto err_unlock; 451 } 452 err = resize_available_mrs(ent, target, false); 453 if (err) 454 goto err_unlock; 455 xa_unlock_irq(&ent->mkeys); 456 457 return count; 458 459 err_unlock: 460 xa_unlock_irq(&ent->mkeys); 461 return err; 462 } 463 464 static ssize_t size_read(struct file *filp, char __user *buf, size_t count, 465 loff_t *pos) 466 { 467 struct mlx5_cache_ent *ent = filp->private_data; 468 char lbuf[20]; 469 int err; 470 471 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); 472 if (err < 0) 473 return err; 474 475 return simple_read_from_buffer(buf, count, pos, lbuf, err); 476 } 477 478 static const struct file_operations size_fops = { 479 .owner = THIS_MODULE, 480 .open = simple_open, 481 .write = size_write, 482 .read = size_read, 483 }; 484 485 static ssize_t limit_write(struct file *filp, const char __user *buf, 486 size_t count, loff_t *pos) 487 { 488 struct mlx5_cache_ent *ent = filp->private_data; 489 u32 var; 490 int err; 491 492 err = kstrtou32_from_user(buf, count, 0, &var); 493 if (err) 494 return err; 495 496 /* 497 * Upon set we immediately fill the cache to high water mark implied by 498 * the limit. 499 */ 500 xa_lock_irq(&ent->mkeys); 501 ent->limit = var; 502 err = resize_available_mrs(ent, 0, true); 503 xa_unlock_irq(&ent->mkeys); 504 if (err) 505 return err; 506 return count; 507 } 508 509 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, 510 loff_t *pos) 511 { 512 struct mlx5_cache_ent *ent = filp->private_data; 513 char lbuf[20]; 514 int err; 515 516 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); 517 if (err < 0) 518 return err; 519 520 return simple_read_from_buffer(buf, count, pos, lbuf, err); 521 } 522 523 static const struct file_operations limit_fops = { 524 .owner = THIS_MODULE, 525 .open = simple_open, 526 .write = limit_write, 527 .read = limit_read, 528 }; 529 530 static bool someone_adding(struct mlx5_mkey_cache *cache) 531 { 532 struct mlx5_cache_ent *ent; 533 struct rb_node *node; 534 bool ret; 535 536 mutex_lock(&cache->rb_lock); 537 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { 538 ent = rb_entry(node, struct mlx5_cache_ent, node); 539 xa_lock_irq(&ent->mkeys); 540 ret = ent->stored < ent->limit; 541 xa_unlock_irq(&ent->mkeys); 542 if (ret) { 543 mutex_unlock(&cache->rb_lock); 544 return true; 545 } 546 } 547 mutex_unlock(&cache->rb_lock); 548 return false; 549 } 550 551 /* 552 * Check if the bucket is outside the high/low water mark and schedule an async 553 * update. The cache refill has hysteresis, once the low water mark is hit it is 554 * refilled up to the high mark. 555 */ 556 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) 557 { 558 lockdep_assert_held(&ent->mkeys.xa_lock); 559 560 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) 561 return; 562 if (ent->stored < ent->limit) { 563 ent->fill_to_high_water = true; 564 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 565 } else if (ent->fill_to_high_water && 566 ent->reserved < 2 * ent->limit) { 567 /* 568 * Once we start populating due to hitting a low water mark 569 * continue until we pass the high water mark. 570 */ 571 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 572 } else if (ent->stored == 2 * ent->limit) { 573 ent->fill_to_high_water = false; 574 } else if (ent->stored > 2 * ent->limit) { 575 /* Queue deletion of excess entries */ 576 ent->fill_to_high_water = false; 577 if (ent->stored != ent->reserved) 578 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, 579 msecs_to_jiffies(1000)); 580 else 581 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); 582 } 583 } 584 585 static void __cache_work_func(struct mlx5_cache_ent *ent) 586 { 587 struct mlx5_ib_dev *dev = ent->dev; 588 struct mlx5_mkey_cache *cache = &dev->cache; 589 int err; 590 591 xa_lock_irq(&ent->mkeys); 592 if (ent->disabled) 593 goto out; 594 595 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && 596 !READ_ONCE(dev->fill_delay)) { 597 xa_unlock_irq(&ent->mkeys); 598 err = add_keys(ent, 1); 599 xa_lock_irq(&ent->mkeys); 600 if (ent->disabled) 601 goto out; 602 if (err) { 603 /* 604 * EAGAIN only happens if there are pending MRs, so we 605 * will be rescheduled when storing them. The only 606 * failure path here is ENOMEM. 607 */ 608 if (err != -EAGAIN) { 609 mlx5_ib_warn( 610 dev, 611 "add keys command failed, err %d\n", 612 err); 613 queue_delayed_work(cache->wq, &ent->dwork, 614 msecs_to_jiffies(1000)); 615 } 616 } 617 } else if (ent->stored > 2 * ent->limit) { 618 bool need_delay; 619 620 /* 621 * The remove_cache_mr() logic is performed as garbage 622 * collection task. Such task is intended to be run when no 623 * other active processes are running. 624 * 625 * The need_resched() will return TRUE if there are user tasks 626 * to be activated in near future. 627 * 628 * In such case, we don't execute remove_cache_mr() and postpone 629 * the garbage collection work to try to run in next cycle, in 630 * order to free CPU resources to other tasks. 631 */ 632 xa_unlock_irq(&ent->mkeys); 633 need_delay = need_resched() || someone_adding(cache) || 634 !time_after(jiffies, 635 READ_ONCE(cache->last_add) + 300 * HZ); 636 xa_lock_irq(&ent->mkeys); 637 if (ent->disabled) 638 goto out; 639 if (need_delay) { 640 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); 641 goto out; 642 } 643 remove_cache_mr_locked(ent); 644 queue_adjust_cache_locked(ent); 645 } 646 out: 647 xa_unlock_irq(&ent->mkeys); 648 } 649 650 static void delayed_cache_work_func(struct work_struct *work) 651 { 652 struct mlx5_cache_ent *ent; 653 654 ent = container_of(work, struct mlx5_cache_ent, dwork.work); 655 __cache_work_func(ent); 656 } 657 658 static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1, 659 struct mlx5r_cache_rb_key key2) 660 { 661 int res; 662 663 res = key1.ats - key2.ats; 664 if (res) 665 return res; 666 667 res = key1.access_mode - key2.access_mode; 668 if (res) 669 return res; 670 671 res = key1.access_flags - key2.access_flags; 672 if (res) 673 return res; 674 675 /* 676 * keep ndescs the last in the compare table since the find function 677 * searches for an exact match on all properties and only closest 678 * match in size. 679 */ 680 return key1.ndescs - key2.ndescs; 681 } 682 683 static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache, 684 struct mlx5_cache_ent *ent) 685 { 686 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL; 687 struct mlx5_cache_ent *cur; 688 int cmp; 689 690 /* Figure out where to put new node */ 691 while (*new) { 692 cur = rb_entry(*new, struct mlx5_cache_ent, node); 693 parent = *new; 694 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); 695 if (cmp > 0) 696 new = &((*new)->rb_left); 697 if (cmp < 0) 698 new = &((*new)->rb_right); 699 if (cmp == 0) { 700 mutex_unlock(&cache->rb_lock); 701 return -EEXIST; 702 } 703 } 704 705 /* Add new node and rebalance tree. */ 706 rb_link_node(&ent->node, parent, new); 707 rb_insert_color(&ent->node, &cache->rb_root); 708 709 return 0; 710 } 711 712 static struct mlx5_cache_ent * 713 mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, 714 struct mlx5r_cache_rb_key rb_key) 715 { 716 struct rb_node *node = dev->cache.rb_root.rb_node; 717 struct mlx5_cache_ent *cur, *smallest = NULL; 718 int cmp; 719 720 /* 721 * Find the smallest ent with order >= requested_order. 722 */ 723 while (node) { 724 cur = rb_entry(node, struct mlx5_cache_ent, node); 725 cmp = cache_ent_key_cmp(cur->rb_key, rb_key); 726 if (cmp > 0) { 727 smallest = cur; 728 node = node->rb_left; 729 } 730 if (cmp < 0) 731 node = node->rb_right; 732 if (cmp == 0) 733 return cur; 734 } 735 736 return (smallest && 737 smallest->rb_key.access_mode == rb_key.access_mode && 738 smallest->rb_key.access_flags == rb_key.access_flags && 739 smallest->rb_key.ats == rb_key.ats) ? 740 smallest : 741 NULL; 742 } 743 744 static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 745 struct mlx5_cache_ent *ent, 746 int access_flags) 747 { 748 struct mlx5_ib_mr *mr; 749 int err; 750 751 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 752 if (!mr) 753 return ERR_PTR(-ENOMEM); 754 755 xa_lock_irq(&ent->mkeys); 756 ent->in_use++; 757 758 if (!ent->stored) { 759 queue_adjust_cache_locked(ent); 760 ent->miss++; 761 xa_unlock_irq(&ent->mkeys); 762 err = create_cache_mkey(ent, &mr->mmkey.key); 763 if (err) { 764 xa_lock_irq(&ent->mkeys); 765 ent->in_use--; 766 xa_unlock_irq(&ent->mkeys); 767 kfree(mr); 768 return ERR_PTR(err); 769 } 770 } else { 771 mr->mmkey.key = pop_stored_mkey(ent); 772 queue_adjust_cache_locked(ent); 773 xa_unlock_irq(&ent->mkeys); 774 } 775 mr->mmkey.cache_ent = ent; 776 mr->mmkey.type = MLX5_MKEY_MR; 777 init_waitqueue_head(&mr->mmkey.wait); 778 return mr; 779 } 780 781 static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev, 782 int access_flags) 783 { 784 int ret = 0; 785 786 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) && 787 MLX5_CAP_GEN(dev->mdev, atomic) && 788 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) 789 ret |= IB_ACCESS_REMOTE_ATOMIC; 790 791 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && 792 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && 793 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) 794 ret |= IB_ACCESS_RELAXED_ORDERING; 795 796 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) && 797 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || 798 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && 799 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) 800 ret |= IB_ACCESS_RELAXED_ORDERING; 801 802 return ret; 803 } 804 805 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, 806 int access_flags, int access_mode, 807 int ndescs) 808 { 809 struct mlx5r_cache_rb_key rb_key = { 810 .ndescs = ndescs, 811 .access_mode = access_mode, 812 .access_flags = get_unchangeable_access_flags(dev, access_flags) 813 }; 814 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); 815 816 if (!ent) 817 return ERR_PTR(-EOPNOTSUPP); 818 819 return _mlx5_mr_cache_alloc(dev, ent, access_flags); 820 } 821 822 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) 823 { 824 u32 mkey; 825 826 cancel_delayed_work(&ent->dwork); 827 xa_lock_irq(&ent->mkeys); 828 while (ent->stored) { 829 mkey = pop_stored_mkey(ent); 830 xa_unlock_irq(&ent->mkeys); 831 mlx5_core_destroy_mkey(dev->mdev, mkey); 832 xa_lock_irq(&ent->mkeys); 833 } 834 xa_unlock_irq(&ent->mkeys); 835 } 836 837 static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) 838 { 839 if (!mlx5_debugfs_root || dev->is_rep) 840 return; 841 842 debugfs_remove_recursive(dev->cache.fs_root); 843 dev->cache.fs_root = NULL; 844 } 845 846 static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev, 847 struct mlx5_cache_ent *ent) 848 { 849 int order = order_base_2(ent->rb_key.ndescs); 850 struct dentry *dir; 851 852 if (!mlx5_debugfs_root || dev->is_rep) 853 return; 854 855 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) 856 order = MLX5_IMR_KSM_CACHE_ENTRY + 2; 857 858 sprintf(ent->name, "%d", order); 859 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); 860 debugfs_create_file("size", 0600, dir, ent, &size_fops); 861 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); 862 debugfs_create_ulong("cur", 0400, dir, &ent->stored); 863 debugfs_create_u32("miss", 0600, dir, &ent->miss); 864 } 865 866 static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev) 867 { 868 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev); 869 struct mlx5_mkey_cache *cache = &dev->cache; 870 871 if (!mlx5_debugfs_root || dev->is_rep) 872 return; 873 874 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root); 875 } 876 877 static void delay_time_func(struct timer_list *t) 878 { 879 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); 880 881 WRITE_ONCE(dev->fill_delay, 0); 882 } 883 884 struct mlx5_cache_ent * 885 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, 886 struct mlx5r_cache_rb_key rb_key, 887 bool persistent_entry) 888 { 889 struct mlx5_cache_ent *ent; 890 int order; 891 int ret; 892 893 ent = kzalloc(sizeof(*ent), GFP_KERNEL); 894 if (!ent) 895 return ERR_PTR(-ENOMEM); 896 897 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); 898 ent->rb_key = rb_key; 899 ent->dev = dev; 900 ent->is_tmp = !persistent_entry; 901 902 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); 903 904 ret = mlx5_cache_ent_insert(&dev->cache, ent); 905 if (ret) { 906 kfree(ent); 907 return ERR_PTR(ret); 908 } 909 910 if (persistent_entry) { 911 if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) 912 order = MLX5_IMR_KSM_CACHE_ENTRY; 913 else 914 order = order_base_2(rb_key.ndescs) - 2; 915 916 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && 917 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && 918 mlx5r_umr_can_load_pas(dev, 0)) 919 ent->limit = dev->mdev->profile.mr_cache[order].limit; 920 else 921 ent->limit = 0; 922 923 mlx5_mkey_cache_debugfs_add_ent(dev, ent); 924 } else { 925 mod_delayed_work(ent->dev->cache.wq, 926 &ent->dev->cache.remove_ent_dwork, 927 msecs_to_jiffies(30 * 1000)); 928 } 929 930 return ent; 931 } 932 933 static void remove_ent_work_func(struct work_struct *work) 934 { 935 struct mlx5_mkey_cache *cache; 936 struct mlx5_cache_ent *ent; 937 struct rb_node *cur; 938 939 cache = container_of(work, struct mlx5_mkey_cache, 940 remove_ent_dwork.work); 941 mutex_lock(&cache->rb_lock); 942 cur = rb_last(&cache->rb_root); 943 while (cur) { 944 ent = rb_entry(cur, struct mlx5_cache_ent, node); 945 cur = rb_prev(cur); 946 mutex_unlock(&cache->rb_lock); 947 948 xa_lock_irq(&ent->mkeys); 949 if (!ent->is_tmp) { 950 xa_unlock_irq(&ent->mkeys); 951 mutex_lock(&cache->rb_lock); 952 continue; 953 } 954 xa_unlock_irq(&ent->mkeys); 955 956 clean_keys(ent->dev, ent); 957 mutex_lock(&cache->rb_lock); 958 } 959 mutex_unlock(&cache->rb_lock); 960 } 961 962 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) 963 { 964 struct mlx5_mkey_cache *cache = &dev->cache; 965 struct rb_root *root = &dev->cache.rb_root; 966 struct mlx5r_cache_rb_key rb_key = { 967 .access_mode = MLX5_MKC_ACCESS_MODE_MTT, 968 }; 969 struct mlx5_cache_ent *ent; 970 struct rb_node *node; 971 int ret; 972 int i; 973 974 mutex_init(&dev->slow_path_mutex); 975 mutex_init(&dev->cache.rb_lock); 976 dev->cache.rb_root = RB_ROOT; 977 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); 978 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 979 if (!cache->wq) { 980 mlx5_ib_warn(dev, "failed to create work queue\n"); 981 return -ENOMEM; 982 } 983 984 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); 985 timer_setup(&dev->delay_timer, delay_time_func, 0); 986 mlx5_mkey_cache_debugfs_init(dev); 987 mutex_lock(&cache->rb_lock); 988 for (i = 0; i <= mkey_cache_max_order(dev); i++) { 989 rb_key.ndescs = 1 << (i + 2); 990 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); 991 if (IS_ERR(ent)) { 992 ret = PTR_ERR(ent); 993 goto err; 994 } 995 } 996 997 ret = mlx5_odp_init_mkey_cache(dev); 998 if (ret) 999 goto err; 1000 1001 mutex_unlock(&cache->rb_lock); 1002 for (node = rb_first(root); node; node = rb_next(node)) { 1003 ent = rb_entry(node, struct mlx5_cache_ent, node); 1004 xa_lock_irq(&ent->mkeys); 1005 queue_adjust_cache_locked(ent); 1006 xa_unlock_irq(&ent->mkeys); 1007 } 1008 1009 return 0; 1010 1011 err: 1012 mutex_unlock(&cache->rb_lock); 1013 mlx5_mkey_cache_debugfs_cleanup(dev); 1014 mlx5_ib_warn(dev, "failed to create mkey cache entry\n"); 1015 return ret; 1016 } 1017 1018 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) 1019 { 1020 struct rb_root *root = &dev->cache.rb_root; 1021 struct mlx5_cache_ent *ent; 1022 struct rb_node *node; 1023 1024 if (!dev->cache.wq) 1025 return; 1026 1027 cancel_delayed_work_sync(&dev->cache.remove_ent_dwork); 1028 mutex_lock(&dev->cache.rb_lock); 1029 for (node = rb_first(root); node; node = rb_next(node)) { 1030 ent = rb_entry(node, struct mlx5_cache_ent, node); 1031 xa_lock_irq(&ent->mkeys); 1032 ent->disabled = true; 1033 xa_unlock_irq(&ent->mkeys); 1034 cancel_delayed_work_sync(&ent->dwork); 1035 } 1036 1037 mlx5_mkey_cache_debugfs_cleanup(dev); 1038 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); 1039 1040 node = rb_first(root); 1041 while (node) { 1042 ent = rb_entry(node, struct mlx5_cache_ent, node); 1043 node = rb_next(node); 1044 clean_keys(dev, ent); 1045 rb_erase(&ent->node, root); 1046 kfree(ent); 1047 } 1048 mutex_unlock(&dev->cache.rb_lock); 1049 1050 destroy_workqueue(dev->cache.wq); 1051 del_timer_sync(&dev->delay_timer); 1052 } 1053 1054 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 1055 { 1056 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1057 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1058 struct mlx5_ib_mr *mr; 1059 void *mkc; 1060 u32 *in; 1061 int err; 1062 1063 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1064 if (!mr) 1065 return ERR_PTR(-ENOMEM); 1066 1067 in = kzalloc(inlen, GFP_KERNEL); 1068 if (!in) { 1069 err = -ENOMEM; 1070 goto err_free; 1071 } 1072 1073 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1074 1075 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); 1076 MLX5_SET(mkc, mkc, length64, 1); 1077 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0, 1078 pd); 1079 1080 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1081 if (err) 1082 goto err_in; 1083 1084 kfree(in); 1085 mr->mmkey.type = MLX5_MKEY_MR; 1086 mr->ibmr.lkey = mr->mmkey.key; 1087 mr->ibmr.rkey = mr->mmkey.key; 1088 mr->umem = NULL; 1089 1090 return &mr->ibmr; 1091 1092 err_in: 1093 kfree(in); 1094 1095 err_free: 1096 kfree(mr); 1097 1098 return ERR_PTR(err); 1099 } 1100 1101 static int get_octo_len(u64 addr, u64 len, int page_shift) 1102 { 1103 u64 page_size = 1ULL << page_shift; 1104 u64 offset; 1105 int npages; 1106 1107 offset = addr & (page_size - 1); 1108 npages = ALIGN(len + offset, page_size) >> page_shift; 1109 return (npages + 1) / 2; 1110 } 1111 1112 static int mkey_cache_max_order(struct mlx5_ib_dev *dev) 1113 { 1114 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 1115 return MKEY_CACHE_LAST_STD_ENTRY; 1116 return MLX5_MAX_UMR_SHIFT; 1117 } 1118 1119 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 1120 u64 length, int access_flags, u64 iova) 1121 { 1122 mr->ibmr.lkey = mr->mmkey.key; 1123 mr->ibmr.rkey = mr->mmkey.key; 1124 mr->ibmr.length = length; 1125 mr->ibmr.device = &dev->ib_dev; 1126 mr->ibmr.iova = iova; 1127 mr->access_flags = access_flags; 1128 } 1129 1130 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, 1131 u64 iova) 1132 { 1133 /* 1134 * The alignment of iova has already been checked upon entering 1135 * UVERBS_METHOD_REG_DMABUF_MR 1136 */ 1137 umem->iova = iova; 1138 return PAGE_SIZE; 1139 } 1140 1141 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, 1142 struct ib_umem *umem, u64 iova, 1143 int access_flags) 1144 { 1145 struct mlx5r_cache_rb_key rb_key = { 1146 .access_mode = MLX5_MKC_ACCESS_MODE_MTT, 1147 }; 1148 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1149 struct mlx5_cache_ent *ent; 1150 struct mlx5_ib_mr *mr; 1151 unsigned int page_size; 1152 1153 if (umem->is_dmabuf) 1154 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); 1155 else 1156 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 1157 0, iova); 1158 if (WARN_ON(!page_size)) 1159 return ERR_PTR(-EINVAL); 1160 1161 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); 1162 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); 1163 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags); 1164 ent = mkey_cache_ent_from_rb_key(dev, rb_key); 1165 /* 1166 * If the MR can't come from the cache then synchronously create an uncached 1167 * one. 1168 */ 1169 if (!ent) { 1170 mutex_lock(&dev->slow_path_mutex); 1171 mr = reg_create(pd, umem, iova, access_flags, page_size, false); 1172 mutex_unlock(&dev->slow_path_mutex); 1173 if (IS_ERR(mr)) 1174 return mr; 1175 mr->mmkey.rb_key = rb_key; 1176 return mr; 1177 } 1178 1179 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); 1180 if (IS_ERR(mr)) 1181 return mr; 1182 1183 mr->ibmr.pd = pd; 1184 mr->umem = umem; 1185 mr->page_shift = order_base_2(page_size); 1186 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1187 1188 return mr; 1189 } 1190 1191 /* 1192 * If ibmr is NULL it will be allocated by reg_create. 1193 * Else, the given ibmr will be used. 1194 */ 1195 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, 1196 u64 iova, int access_flags, 1197 unsigned int page_size, bool populate) 1198 { 1199 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1200 struct mlx5_ib_mr *mr; 1201 __be64 *pas; 1202 void *mkc; 1203 int inlen; 1204 u32 *in; 1205 int err; 1206 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); 1207 1208 if (!page_size) 1209 return ERR_PTR(-EINVAL); 1210 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1211 if (!mr) 1212 return ERR_PTR(-ENOMEM); 1213 1214 mr->ibmr.pd = pd; 1215 mr->access_flags = access_flags; 1216 mr->page_shift = order_base_2(page_size); 1217 1218 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1219 if (populate) 1220 inlen += sizeof(*pas) * 1221 roundup(ib_umem_num_dma_blocks(umem, page_size), 2); 1222 in = kvzalloc(inlen, GFP_KERNEL); 1223 if (!in) { 1224 err = -ENOMEM; 1225 goto err_1; 1226 } 1227 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 1228 if (populate) { 1229 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) { 1230 err = -EINVAL; 1231 goto err_2; 1232 } 1233 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, 1234 pg_cap ? MLX5_IB_MTT_PRESENT : 0); 1235 } 1236 1237 /* The pg_access bit allows setting the access flags 1238 * in the page list submitted with the command. 1239 */ 1240 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); 1241 1242 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1243 set_mkc_access_pd_addr_fields(mkc, access_flags, iova, 1244 populate ? pd : dev->umrc.pd); 1245 MLX5_SET(mkc, mkc, free, !populate); 1246 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 1247 MLX5_SET(mkc, mkc, umr_en, 1); 1248 1249 MLX5_SET64(mkc, mkc, len, umem->length); 1250 MLX5_SET(mkc, mkc, bsf_octword_size, 0); 1251 MLX5_SET(mkc, mkc, translations_octword_size, 1252 get_octo_len(iova, umem->length, mr->page_shift)); 1253 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); 1254 if (mlx5_umem_needs_ats(dev, umem, access_flags)) 1255 MLX5_SET(mkc, mkc, ma_translation_mode, 1); 1256 if (populate) { 1257 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 1258 get_octo_len(iova, umem->length, mr->page_shift)); 1259 } 1260 1261 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1262 if (err) { 1263 mlx5_ib_warn(dev, "create mkey failed\n"); 1264 goto err_2; 1265 } 1266 mr->mmkey.type = MLX5_MKEY_MR; 1267 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift); 1268 mr->umem = umem; 1269 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1270 kvfree(in); 1271 1272 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); 1273 1274 return mr; 1275 1276 err_2: 1277 kvfree(in); 1278 err_1: 1279 kfree(mr); 1280 return ERR_PTR(err); 1281 } 1282 1283 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, 1284 u64 length, int acc, int mode) 1285 { 1286 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1287 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1288 struct mlx5_ib_mr *mr; 1289 void *mkc; 1290 u32 *in; 1291 int err; 1292 1293 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1294 if (!mr) 1295 return ERR_PTR(-ENOMEM); 1296 1297 in = kzalloc(inlen, GFP_KERNEL); 1298 if (!in) { 1299 err = -ENOMEM; 1300 goto err_free; 1301 } 1302 1303 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1304 1305 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); 1306 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7); 1307 MLX5_SET64(mkc, mkc, len, length); 1308 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd); 1309 1310 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1311 if (err) 1312 goto err_in; 1313 1314 kfree(in); 1315 1316 set_mr_fields(dev, mr, length, acc, start_addr); 1317 1318 return &mr->ibmr; 1319 1320 err_in: 1321 kfree(in); 1322 1323 err_free: 1324 kfree(mr); 1325 1326 return ERR_PTR(err); 1327 } 1328 1329 int mlx5_ib_advise_mr(struct ib_pd *pd, 1330 enum ib_uverbs_advise_mr_advice advice, 1331 u32 flags, 1332 struct ib_sge *sg_list, 1333 u32 num_sge, 1334 struct uverbs_attr_bundle *attrs) 1335 { 1336 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && 1337 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && 1338 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) 1339 return -EOPNOTSUPP; 1340 1341 return mlx5_ib_advise_mr_prefetch(pd, advice, flags, 1342 sg_list, num_sge); 1343 } 1344 1345 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1346 struct ib_dm_mr_attr *attr, 1347 struct uverbs_attr_bundle *attrs) 1348 { 1349 struct mlx5_ib_dm *mdm = to_mdm(dm); 1350 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; 1351 u64 start_addr = mdm->dev_addr + attr->offset; 1352 int mode; 1353 1354 switch (mdm->type) { 1355 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 1356 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) 1357 return ERR_PTR(-EINVAL); 1358 1359 mode = MLX5_MKC_ACCESS_MODE_MEMIC; 1360 start_addr -= pci_resource_start(dev->pdev, 0); 1361 break; 1362 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 1363 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 1364 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: 1365 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) 1366 return ERR_PTR(-EINVAL); 1367 1368 mode = MLX5_MKC_ACCESS_MODE_SW_ICM; 1369 break; 1370 default: 1371 return ERR_PTR(-EINVAL); 1372 } 1373 1374 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, 1375 attr->access_flags, mode); 1376 } 1377 1378 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, 1379 u64 iova, int access_flags) 1380 { 1381 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1382 struct mlx5_ib_mr *mr = NULL; 1383 bool xlt_with_umr; 1384 int err; 1385 1386 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length); 1387 if (xlt_with_umr) { 1388 mr = alloc_cacheable_mr(pd, umem, iova, access_flags); 1389 } else { 1390 unsigned int page_size = mlx5_umem_find_best_pgsz( 1391 umem, mkc, log_page_size, 0, iova); 1392 1393 mutex_lock(&dev->slow_path_mutex); 1394 mr = reg_create(pd, umem, iova, access_flags, page_size, true); 1395 mutex_unlock(&dev->slow_path_mutex); 1396 } 1397 if (IS_ERR(mr)) { 1398 ib_umem_release(umem); 1399 return ERR_CAST(mr); 1400 } 1401 1402 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1403 1404 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1405 1406 if (xlt_with_umr) { 1407 /* 1408 * If the MR was created with reg_create then it will be 1409 * configured properly but left disabled. It is safe to go ahead 1410 * and configure it again via UMR while enabling it. 1411 */ 1412 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE); 1413 if (err) { 1414 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1415 return ERR_PTR(err); 1416 } 1417 } 1418 return &mr->ibmr; 1419 } 1420 1421 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, 1422 u64 iova, int access_flags, 1423 struct ib_udata *udata) 1424 { 1425 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1426 struct ib_umem_odp *odp; 1427 struct mlx5_ib_mr *mr; 1428 int err; 1429 1430 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1431 return ERR_PTR(-EOPNOTSUPP); 1432 1433 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); 1434 if (err) 1435 return ERR_PTR(err); 1436 if (!start && length == U64_MAX) { 1437 if (iova != 0) 1438 return ERR_PTR(-EINVAL); 1439 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1440 return ERR_PTR(-EINVAL); 1441 1442 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); 1443 if (IS_ERR(mr)) 1444 return ERR_CAST(mr); 1445 return &mr->ibmr; 1446 } 1447 1448 /* ODP requires xlt update via umr to work. */ 1449 if (!mlx5r_umr_can_load_pas(dev, length)) 1450 return ERR_PTR(-EINVAL); 1451 1452 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, 1453 &mlx5_mn_ops); 1454 if (IS_ERR(odp)) 1455 return ERR_CAST(odp); 1456 1457 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); 1458 if (IS_ERR(mr)) { 1459 ib_umem_release(&odp->umem); 1460 return ERR_CAST(mr); 1461 } 1462 xa_init(&mr->implicit_children); 1463 1464 odp->private = mr; 1465 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1466 if (err) 1467 goto err_dereg_mr; 1468 1469 err = mlx5_ib_init_odp_mr(mr); 1470 if (err) 1471 goto err_dereg_mr; 1472 return &mr->ibmr; 1473 1474 err_dereg_mr: 1475 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1476 return ERR_PTR(err); 1477 } 1478 1479 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1480 u64 iova, int access_flags, 1481 struct ib_udata *udata) 1482 { 1483 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1484 struct ib_umem *umem; 1485 1486 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1487 return ERR_PTR(-EOPNOTSUPP); 1488 1489 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1490 start, iova, length, access_flags); 1491 1492 if (access_flags & IB_ACCESS_ON_DEMAND) 1493 return create_user_odp_mr(pd, start, length, iova, access_flags, 1494 udata); 1495 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); 1496 if (IS_ERR(umem)) 1497 return ERR_CAST(umem); 1498 return create_real_mr(pd, umem, iova, access_flags); 1499 } 1500 1501 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) 1502 { 1503 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; 1504 struct mlx5_ib_mr *mr = umem_dmabuf->private; 1505 1506 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); 1507 1508 if (!umem_dmabuf->sgt) 1509 return; 1510 1511 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); 1512 ib_umem_dmabuf_unmap_pages(umem_dmabuf); 1513 } 1514 1515 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = { 1516 .allow_peer2peer = 1, 1517 .move_notify = mlx5_ib_dmabuf_invalidate_cb, 1518 }; 1519 1520 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, 1521 u64 length, u64 virt_addr, 1522 int fd, int access_flags, 1523 struct ib_udata *udata) 1524 { 1525 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1526 struct mlx5_ib_mr *mr = NULL; 1527 struct ib_umem_dmabuf *umem_dmabuf; 1528 int err; 1529 1530 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || 1531 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) 1532 return ERR_PTR(-EOPNOTSUPP); 1533 1534 mlx5_ib_dbg(dev, 1535 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n", 1536 offset, virt_addr, length, fd, access_flags); 1537 1538 /* dmabuf requires xlt update via umr to work. */ 1539 if (!mlx5r_umr_can_load_pas(dev, length)) 1540 return ERR_PTR(-EINVAL); 1541 1542 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, 1543 access_flags, 1544 &mlx5_ib_dmabuf_attach_ops); 1545 if (IS_ERR(umem_dmabuf)) { 1546 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n", 1547 PTR_ERR(umem_dmabuf)); 1548 return ERR_CAST(umem_dmabuf); 1549 } 1550 1551 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, 1552 access_flags); 1553 if (IS_ERR(mr)) { 1554 ib_umem_release(&umem_dmabuf->umem); 1555 return ERR_CAST(mr); 1556 } 1557 1558 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1559 1560 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); 1561 umem_dmabuf->private = mr; 1562 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); 1563 if (err) 1564 goto err_dereg_mr; 1565 1566 err = mlx5_ib_init_dmabuf_mr(mr); 1567 if (err) 1568 goto err_dereg_mr; 1569 return &mr->ibmr; 1570 1571 err_dereg_mr: 1572 mlx5_ib_dereg_mr(&mr->ibmr, NULL); 1573 return ERR_PTR(err); 1574 } 1575 1576 /* 1577 * True if the change in access flags can be done via UMR, only some access 1578 * flags can be updated. 1579 */ 1580 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, 1581 unsigned int current_access_flags, 1582 unsigned int target_access_flags) 1583 { 1584 unsigned int diffs = current_access_flags ^ target_access_flags; 1585 1586 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 1587 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) 1588 return false; 1589 return mlx5r_umr_can_reconfig(dev, current_access_flags, 1590 target_access_flags); 1591 } 1592 1593 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, 1594 struct ib_umem *new_umem, 1595 int new_access_flags, u64 iova, 1596 unsigned long *page_size) 1597 { 1598 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1599 1600 /* We only track the allocated sizes of MRs from the cache */ 1601 if (!mr->mmkey.cache_ent) 1602 return false; 1603 if (!mlx5r_umr_can_load_pas(dev, new_umem->length)) 1604 return false; 1605 1606 *page_size = 1607 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova); 1608 if (WARN_ON(!*page_size)) 1609 return false; 1610 return (mr->mmkey.cache_ent->rb_key.ndescs) >= 1611 ib_umem_num_dma_blocks(new_umem, *page_size); 1612 } 1613 1614 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, 1615 int access_flags, int flags, struct ib_umem *new_umem, 1616 u64 iova, unsigned long page_size) 1617 { 1618 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1619 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE; 1620 struct ib_umem *old_umem = mr->umem; 1621 int err; 1622 1623 /* 1624 * To keep everything simple the MR is revoked before we start to mess 1625 * with it. This ensure the change is atomic relative to any use of the 1626 * MR. 1627 */ 1628 err = mlx5r_umr_revoke_mr(mr); 1629 if (err) 1630 return err; 1631 1632 if (flags & IB_MR_REREG_PD) { 1633 mr->ibmr.pd = pd; 1634 upd_flags |= MLX5_IB_UPD_XLT_PD; 1635 } 1636 if (flags & IB_MR_REREG_ACCESS) { 1637 mr->access_flags = access_flags; 1638 upd_flags |= MLX5_IB_UPD_XLT_ACCESS; 1639 } 1640 1641 mr->ibmr.iova = iova; 1642 mr->ibmr.length = new_umem->length; 1643 mr->page_shift = order_base_2(page_size); 1644 mr->umem = new_umem; 1645 err = mlx5r_umr_update_mr_pas(mr, upd_flags); 1646 if (err) { 1647 /* 1648 * The MR is revoked at this point so there is no issue to free 1649 * new_umem. 1650 */ 1651 mr->umem = old_umem; 1652 return err; 1653 } 1654 1655 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); 1656 ib_umem_release(old_umem); 1657 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); 1658 return 0; 1659 } 1660 1661 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1662 u64 length, u64 iova, int new_access_flags, 1663 struct ib_pd *new_pd, 1664 struct ib_udata *udata) 1665 { 1666 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); 1667 struct mlx5_ib_mr *mr = to_mmr(ib_mr); 1668 int err; 1669 1670 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1671 return ERR_PTR(-EOPNOTSUPP); 1672 1673 mlx5_ib_dbg( 1674 dev, 1675 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n", 1676 start, iova, length, new_access_flags); 1677 1678 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 1679 return ERR_PTR(-EOPNOTSUPP); 1680 1681 if (!(flags & IB_MR_REREG_ACCESS)) 1682 new_access_flags = mr->access_flags; 1683 if (!(flags & IB_MR_REREG_PD)) 1684 new_pd = ib_mr->pd; 1685 1686 if (!(flags & IB_MR_REREG_TRANS)) { 1687 struct ib_umem *umem; 1688 1689 /* Fast path for PD/access change */ 1690 if (can_use_umr_rereg_access(dev, mr->access_flags, 1691 new_access_flags)) { 1692 err = mlx5r_umr_rereg_pd_access(mr, new_pd, 1693 new_access_flags); 1694 if (err) 1695 return ERR_PTR(err); 1696 return NULL; 1697 } 1698 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ 1699 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1700 goto recreate; 1701 1702 /* 1703 * Only one active MR can refer to a umem at one time, revoke 1704 * the old MR before assigning the umem to the new one. 1705 */ 1706 err = mlx5r_umr_revoke_mr(mr); 1707 if (err) 1708 return ERR_PTR(err); 1709 umem = mr->umem; 1710 mr->umem = NULL; 1711 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); 1712 1713 return create_real_mr(new_pd, umem, mr->ibmr.iova, 1714 new_access_flags); 1715 } 1716 1717 /* 1718 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does 1719 * but the logic around releasing the umem is different 1720 */ 1721 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) 1722 goto recreate; 1723 1724 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) && 1725 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { 1726 struct ib_umem *new_umem; 1727 unsigned long page_size; 1728 1729 new_umem = ib_umem_get(&dev->ib_dev, start, length, 1730 new_access_flags); 1731 if (IS_ERR(new_umem)) 1732 return ERR_CAST(new_umem); 1733 1734 /* Fast path for PAS change */ 1735 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova, 1736 &page_size)) { 1737 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags, 1738 new_umem, iova, page_size); 1739 if (err) { 1740 ib_umem_release(new_umem); 1741 return ERR_PTR(err); 1742 } 1743 return NULL; 1744 } 1745 return create_real_mr(new_pd, new_umem, iova, new_access_flags); 1746 } 1747 1748 /* 1749 * Everything else has no state we can preserve, just create a new MR 1750 * from scratch 1751 */ 1752 recreate: 1753 return mlx5_ib_reg_user_mr(new_pd, start, length, iova, 1754 new_access_flags, udata); 1755 } 1756 1757 static int 1758 mlx5_alloc_priv_descs(struct ib_device *device, 1759 struct mlx5_ib_mr *mr, 1760 int ndescs, 1761 int desc_size) 1762 { 1763 struct mlx5_ib_dev *dev = to_mdev(device); 1764 struct device *ddev = &dev->mdev->pdev->dev; 1765 int size = ndescs * desc_size; 1766 int add_size; 1767 int ret; 1768 1769 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 1770 if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) { 1771 int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size)); 1772 1773 add_size = min_t(int, end - size, add_size); 1774 } 1775 1776 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); 1777 if (!mr->descs_alloc) 1778 return -ENOMEM; 1779 1780 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1781 1782 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); 1783 if (dma_mapping_error(ddev, mr->desc_map)) { 1784 ret = -ENOMEM; 1785 goto err; 1786 } 1787 1788 return 0; 1789 err: 1790 kfree(mr->descs_alloc); 1791 1792 return ret; 1793 } 1794 1795 static void 1796 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) 1797 { 1798 if (!mr->umem && mr->descs) { 1799 struct ib_device *device = mr->ibmr.device; 1800 int size = mr->max_descs * mr->desc_size; 1801 struct mlx5_ib_dev *dev = to_mdev(device); 1802 1803 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, 1804 DMA_TO_DEVICE); 1805 kfree(mr->descs_alloc); 1806 mr->descs = NULL; 1807 } 1808 } 1809 1810 static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, 1811 struct mlx5_ib_mr *mr) 1812 { 1813 struct mlx5_mkey_cache *cache = &dev->cache; 1814 struct mlx5_cache_ent *ent; 1815 int ret; 1816 1817 if (mr->mmkey.cache_ent) { 1818 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); 1819 mr->mmkey.cache_ent->in_use--; 1820 goto end; 1821 } 1822 1823 mutex_lock(&cache->rb_lock); 1824 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); 1825 if (ent) { 1826 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { 1827 if (ent->disabled) { 1828 mutex_unlock(&cache->rb_lock); 1829 return -EOPNOTSUPP; 1830 } 1831 mr->mmkey.cache_ent = ent; 1832 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); 1833 mutex_unlock(&cache->rb_lock); 1834 goto end; 1835 } 1836 } 1837 1838 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); 1839 mutex_unlock(&cache->rb_lock); 1840 if (IS_ERR(ent)) 1841 return PTR_ERR(ent); 1842 1843 mr->mmkey.cache_ent = ent; 1844 xa_lock_irq(&mr->mmkey.cache_ent->mkeys); 1845 1846 end: 1847 ret = push_mkey_locked(mr->mmkey.cache_ent, false, 1848 xa_mk_value(mr->mmkey.key)); 1849 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); 1850 return ret; 1851 } 1852 1853 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1854 { 1855 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1856 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); 1857 int rc; 1858 1859 /* 1860 * Any async use of the mr must hold the refcount, once the refcount 1861 * goes to zero no other thread, such as ODP page faults, prefetch, any 1862 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it. 1863 */ 1864 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 1865 refcount_read(&mr->mmkey.usecount) != 0 && 1866 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) 1867 mlx5r_deref_wait_odp_mkey(&mr->mmkey); 1868 1869 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { 1870 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 1871 mr->sig, NULL, GFP_KERNEL); 1872 1873 if (mr->mtt_mr) { 1874 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 1875 if (rc) 1876 return rc; 1877 mr->mtt_mr = NULL; 1878 } 1879 if (mr->klm_mr) { 1880 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 1881 if (rc) 1882 return rc; 1883 mr->klm_mr = NULL; 1884 } 1885 1886 if (mlx5_core_destroy_psv(dev->mdev, 1887 mr->sig->psv_memory.psv_idx)) 1888 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1889 mr->sig->psv_memory.psv_idx); 1890 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 1891 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1892 mr->sig->psv_wire.psv_idx); 1893 kfree(mr->sig); 1894 mr->sig = NULL; 1895 } 1896 1897 /* Stop DMA */ 1898 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) 1899 if (mlx5r_umr_revoke_mr(mr) || 1900 cache_ent_find_and_store(dev, mr)) 1901 mr->mmkey.cache_ent = NULL; 1902 1903 if (!mr->mmkey.cache_ent) { 1904 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); 1905 if (rc) 1906 return rc; 1907 } 1908 1909 if (mr->umem) { 1910 bool is_odp = is_odp_mr(mr); 1911 1912 if (!is_odp) 1913 atomic_sub(ib_umem_num_pages(mr->umem), 1914 &dev->mdev->priv.reg_pages); 1915 ib_umem_release(mr->umem); 1916 if (is_odp) 1917 mlx5_ib_free_odp_mr(mr); 1918 } 1919 1920 if (!mr->mmkey.cache_ent) 1921 mlx5_free_priv_descs(mr); 1922 1923 kfree(mr); 1924 return 0; 1925 } 1926 1927 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, 1928 int access_mode, int page_shift) 1929 { 1930 void *mkc; 1931 1932 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1933 1934 /* This is only used from the kernel, so setting the PD is OK. */ 1935 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd); 1936 MLX5_SET(mkc, mkc, free, 1); 1937 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1938 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); 1939 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); 1940 MLX5_SET(mkc, mkc, umr_en, 1); 1941 MLX5_SET(mkc, mkc, log_page_size, page_shift); 1942 } 1943 1944 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 1945 int ndescs, int desc_size, int page_shift, 1946 int access_mode, u32 *in, int inlen) 1947 { 1948 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1949 int err; 1950 1951 mr->access_mode = access_mode; 1952 mr->desc_size = desc_size; 1953 mr->max_descs = ndescs; 1954 1955 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); 1956 if (err) 1957 return err; 1958 1959 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); 1960 1961 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); 1962 if (err) 1963 goto err_free_descs; 1964 1965 mr->mmkey.type = MLX5_MKEY_MR; 1966 mr->ibmr.lkey = mr->mmkey.key; 1967 mr->ibmr.rkey = mr->mmkey.key; 1968 1969 return 0; 1970 1971 err_free_descs: 1972 mlx5_free_priv_descs(mr); 1973 return err; 1974 } 1975 1976 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, 1977 u32 max_num_sg, u32 max_num_meta_sg, 1978 int desc_size, int access_mode) 1979 { 1980 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1981 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); 1982 int page_shift = 0; 1983 struct mlx5_ib_mr *mr; 1984 u32 *in; 1985 int err; 1986 1987 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1988 if (!mr) 1989 return ERR_PTR(-ENOMEM); 1990 1991 mr->ibmr.pd = pd; 1992 mr->ibmr.device = pd->device; 1993 1994 in = kzalloc(inlen, GFP_KERNEL); 1995 if (!in) { 1996 err = -ENOMEM; 1997 goto err_free; 1998 } 1999 2000 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) 2001 page_shift = PAGE_SHIFT; 2002 2003 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, 2004 access_mode, in, inlen); 2005 if (err) 2006 goto err_free_in; 2007 2008 mr->umem = NULL; 2009 kfree(in); 2010 2011 return mr; 2012 2013 err_free_in: 2014 kfree(in); 2015 err_free: 2016 kfree(mr); 2017 return ERR_PTR(err); 2018 } 2019 2020 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2021 int ndescs, u32 *in, int inlen) 2022 { 2023 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), 2024 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, 2025 inlen); 2026 } 2027 2028 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2029 int ndescs, u32 *in, int inlen) 2030 { 2031 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), 2032 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2033 } 2034 2035 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, 2036 int max_num_sg, int max_num_meta_sg, 2037 u32 *in, int inlen) 2038 { 2039 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2040 u32 psv_index[2]; 2041 void *mkc; 2042 int err; 2043 2044 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); 2045 if (!mr->sig) 2046 return -ENOMEM; 2047 2048 /* create mem & wire PSVs */ 2049 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); 2050 if (err) 2051 goto err_free_sig; 2052 2053 mr->sig->psv_memory.psv_idx = psv_index[0]; 2054 mr->sig->psv_wire.psv_idx = psv_index[1]; 2055 2056 mr->sig->sig_status_checked = true; 2057 mr->sig->sig_err_exists = false; 2058 /* Next UMR, Arm SIGERR */ 2059 ++mr->sig->sigerr_count; 2060 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2061 sizeof(struct mlx5_klm), 2062 MLX5_MKC_ACCESS_MODE_KLMS); 2063 if (IS_ERR(mr->klm_mr)) { 2064 err = PTR_ERR(mr->klm_mr); 2065 goto err_destroy_psv; 2066 } 2067 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, 2068 sizeof(struct mlx5_mtt), 2069 MLX5_MKC_ACCESS_MODE_MTT); 2070 if (IS_ERR(mr->mtt_mr)) { 2071 err = PTR_ERR(mr->mtt_mr); 2072 goto err_free_klm_mr; 2073 } 2074 2075 /* Set bsf descriptors for mkey */ 2076 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2077 MLX5_SET(mkc, mkc, bsf_en, 1); 2078 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); 2079 2080 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, 2081 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); 2082 if (err) 2083 goto err_free_mtt_mr; 2084 2085 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), 2086 mr->sig, GFP_KERNEL)); 2087 if (err) 2088 goto err_free_descs; 2089 return 0; 2090 2091 err_free_descs: 2092 destroy_mkey(dev, mr); 2093 mlx5_free_priv_descs(mr); 2094 err_free_mtt_mr: 2095 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); 2096 mr->mtt_mr = NULL; 2097 err_free_klm_mr: 2098 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); 2099 mr->klm_mr = NULL; 2100 err_destroy_psv: 2101 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) 2102 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 2103 mr->sig->psv_memory.psv_idx); 2104 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) 2105 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 2106 mr->sig->psv_wire.psv_idx); 2107 err_free_sig: 2108 kfree(mr->sig); 2109 2110 return err; 2111 } 2112 2113 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, 2114 enum ib_mr_type mr_type, u32 max_num_sg, 2115 u32 max_num_meta_sg) 2116 { 2117 struct mlx5_ib_dev *dev = to_mdev(pd->device); 2118 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2119 int ndescs = ALIGN(max_num_sg, 4); 2120 struct mlx5_ib_mr *mr; 2121 u32 *in; 2122 int err; 2123 2124 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 2125 if (!mr) 2126 return ERR_PTR(-ENOMEM); 2127 2128 in = kzalloc(inlen, GFP_KERNEL); 2129 if (!in) { 2130 err = -ENOMEM; 2131 goto err_free; 2132 } 2133 2134 mr->ibmr.device = pd->device; 2135 mr->umem = NULL; 2136 2137 switch (mr_type) { 2138 case IB_MR_TYPE_MEM_REG: 2139 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); 2140 break; 2141 case IB_MR_TYPE_SG_GAPS: 2142 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); 2143 break; 2144 case IB_MR_TYPE_INTEGRITY: 2145 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, 2146 max_num_meta_sg, in, inlen); 2147 break; 2148 default: 2149 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); 2150 err = -EINVAL; 2151 } 2152 2153 if (err) 2154 goto err_free_in; 2155 2156 kfree(in); 2157 2158 return &mr->ibmr; 2159 2160 err_free_in: 2161 kfree(in); 2162 err_free: 2163 kfree(mr); 2164 return ERR_PTR(err); 2165 } 2166 2167 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2168 u32 max_num_sg) 2169 { 2170 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); 2171 } 2172 2173 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 2174 u32 max_num_sg, u32 max_num_meta_sg) 2175 { 2176 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, 2177 max_num_meta_sg); 2178 } 2179 2180 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 2181 { 2182 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); 2183 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 2184 struct mlx5_ib_mw *mw = to_mmw(ibmw); 2185 unsigned int ndescs; 2186 u32 *in = NULL; 2187 void *mkc; 2188 int err; 2189 struct mlx5_ib_alloc_mw req = {}; 2190 struct { 2191 __u32 comp_mask; 2192 __u32 response_length; 2193 } resp = {}; 2194 2195 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 2196 if (err) 2197 return err; 2198 2199 if (req.comp_mask || req.reserved1 || req.reserved2) 2200 return -EOPNOTSUPP; 2201 2202 if (udata->inlen > sizeof(req) && 2203 !ib_is_udata_cleared(udata, sizeof(req), 2204 udata->inlen - sizeof(req))) 2205 return -EOPNOTSUPP; 2206 2207 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 2208 2209 in = kzalloc(inlen, GFP_KERNEL); 2210 if (!in) 2211 return -ENOMEM; 2212 2213 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2214 2215 MLX5_SET(mkc, mkc, free, 1); 2216 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 2217 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); 2218 MLX5_SET(mkc, mkc, umr_en, 1); 2219 MLX5_SET(mkc, mkc, lr, 1); 2220 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); 2221 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); 2222 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2223 2224 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); 2225 if (err) 2226 goto free; 2227 2228 mw->mmkey.type = MLX5_MKEY_MW; 2229 ibmw->rkey = mw->mmkey.key; 2230 mw->mmkey.ndescs = ndescs; 2231 2232 resp.response_length = 2233 min(offsetofend(typeof(resp), response_length), udata->outlen); 2234 if (resp.response_length) { 2235 err = ib_copy_to_udata(udata, &resp, resp.response_length); 2236 if (err) 2237 goto free_mkey; 2238 } 2239 2240 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2241 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); 2242 if (err) 2243 goto free_mkey; 2244 } 2245 2246 kfree(in); 2247 return 0; 2248 2249 free_mkey: 2250 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); 2251 free: 2252 kfree(in); 2253 return err; 2254 } 2255 2256 int mlx5_ib_dealloc_mw(struct ib_mw *mw) 2257 { 2258 struct mlx5_ib_dev *dev = to_mdev(mw->device); 2259 struct mlx5_ib_mw *mmw = to_mmw(mw); 2260 2261 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && 2262 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) 2263 /* 2264 * pagefault_single_data_segment() may be accessing mmw 2265 * if the user bound an ODP MR to this MW. 2266 */ 2267 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); 2268 2269 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); 2270 } 2271 2272 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 2273 struct ib_mr_status *mr_status) 2274 { 2275 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 2276 int ret = 0; 2277 2278 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { 2279 pr_err("Invalid status check mask\n"); 2280 ret = -EINVAL; 2281 goto done; 2282 } 2283 2284 mr_status->fail_status = 0; 2285 if (check_mask & IB_MR_CHECK_SIG_STATUS) { 2286 if (!mmr->sig) { 2287 ret = -EINVAL; 2288 pr_err("signature status check requested on a non-signature enabled MR\n"); 2289 goto done; 2290 } 2291 2292 mmr->sig->sig_status_checked = true; 2293 if (!mmr->sig->sig_err_exists) 2294 goto done; 2295 2296 if (ibmr->lkey == mmr->sig->err_item.key) 2297 memcpy(&mr_status->sig_err, &mmr->sig->err_item, 2298 sizeof(mr_status->sig_err)); 2299 else { 2300 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; 2301 mr_status->sig_err.sig_err_offset = 0; 2302 mr_status->sig_err.key = mmr->sig->err_item.key; 2303 } 2304 2305 mmr->sig->sig_err_exists = false; 2306 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; 2307 } 2308 2309 done: 2310 return ret; 2311 } 2312 2313 static int 2314 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2315 int data_sg_nents, unsigned int *data_sg_offset, 2316 struct scatterlist *meta_sg, int meta_sg_nents, 2317 unsigned int *meta_sg_offset) 2318 { 2319 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2320 unsigned int sg_offset = 0; 2321 int n = 0; 2322 2323 mr->meta_length = 0; 2324 if (data_sg_nents == 1) { 2325 n++; 2326 mr->mmkey.ndescs = 1; 2327 if (data_sg_offset) 2328 sg_offset = *data_sg_offset; 2329 mr->data_length = sg_dma_len(data_sg) - sg_offset; 2330 mr->data_iova = sg_dma_address(data_sg) + sg_offset; 2331 if (meta_sg_nents == 1) { 2332 n++; 2333 mr->meta_ndescs = 1; 2334 if (meta_sg_offset) 2335 sg_offset = *meta_sg_offset; 2336 else 2337 sg_offset = 0; 2338 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; 2339 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; 2340 } 2341 ibmr->length = mr->data_length + mr->meta_length; 2342 } 2343 2344 return n; 2345 } 2346 2347 static int 2348 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, 2349 struct scatterlist *sgl, 2350 unsigned short sg_nents, 2351 unsigned int *sg_offset_p, 2352 struct scatterlist *meta_sgl, 2353 unsigned short meta_sg_nents, 2354 unsigned int *meta_sg_offset_p) 2355 { 2356 struct scatterlist *sg = sgl; 2357 struct mlx5_klm *klms = mr->descs; 2358 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 2359 u32 lkey = mr->ibmr.pd->local_dma_lkey; 2360 int i, j = 0; 2361 2362 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 2363 mr->ibmr.length = 0; 2364 2365 for_each_sg(sgl, sg, sg_nents, i) { 2366 if (unlikely(i >= mr->max_descs)) 2367 break; 2368 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 2369 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 2370 klms[i].key = cpu_to_be32(lkey); 2371 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2372 2373 sg_offset = 0; 2374 } 2375 2376 if (sg_offset_p) 2377 *sg_offset_p = sg_offset; 2378 2379 mr->mmkey.ndescs = i; 2380 mr->data_length = mr->ibmr.length; 2381 2382 if (meta_sg_nents) { 2383 sg = meta_sgl; 2384 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; 2385 for_each_sg(meta_sgl, sg, meta_sg_nents, j) { 2386 if (unlikely(i + j >= mr->max_descs)) 2387 break; 2388 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + 2389 sg_offset); 2390 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - 2391 sg_offset); 2392 klms[i + j].key = cpu_to_be32(lkey); 2393 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 2394 2395 sg_offset = 0; 2396 } 2397 if (meta_sg_offset_p) 2398 *meta_sg_offset_p = sg_offset; 2399 2400 mr->meta_ndescs = j; 2401 mr->meta_length = mr->ibmr.length - mr->data_length; 2402 } 2403 2404 return i + j; 2405 } 2406 2407 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) 2408 { 2409 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2410 __be64 *descs; 2411 2412 if (unlikely(mr->mmkey.ndescs == mr->max_descs)) 2413 return -ENOMEM; 2414 2415 descs = mr->descs; 2416 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2417 2418 return 0; 2419 } 2420 2421 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) 2422 { 2423 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2424 __be64 *descs; 2425 2426 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) 2427 return -ENOMEM; 2428 2429 descs = mr->descs; 2430 descs[mr->mmkey.ndescs + mr->meta_ndescs++] = 2431 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 2432 2433 return 0; 2434 } 2435 2436 static int 2437 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2438 int data_sg_nents, unsigned int *data_sg_offset, 2439 struct scatterlist *meta_sg, int meta_sg_nents, 2440 unsigned int *meta_sg_offset) 2441 { 2442 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2443 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; 2444 int n; 2445 2446 pi_mr->mmkey.ndescs = 0; 2447 pi_mr->meta_ndescs = 0; 2448 pi_mr->meta_length = 0; 2449 2450 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2451 pi_mr->desc_size * pi_mr->max_descs, 2452 DMA_TO_DEVICE); 2453 2454 pi_mr->ibmr.page_size = ibmr->page_size; 2455 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, 2456 mlx5_set_page); 2457 if (n != data_sg_nents) 2458 return n; 2459 2460 pi_mr->data_iova = pi_mr->ibmr.iova; 2461 pi_mr->data_length = pi_mr->ibmr.length; 2462 pi_mr->ibmr.length = pi_mr->data_length; 2463 ibmr->length = pi_mr->data_length; 2464 2465 if (meta_sg_nents) { 2466 u64 page_mask = ~((u64)ibmr->page_size - 1); 2467 u64 iova = pi_mr->data_iova; 2468 2469 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, 2470 meta_sg_offset, mlx5_set_page_pi); 2471 2472 pi_mr->meta_length = pi_mr->ibmr.length; 2473 /* 2474 * PI address for the HW is the offset of the metadata address 2475 * relative to the first data page address. 2476 * It equals to first data page address + size of data pages + 2477 * metadata offset at the first metadata page 2478 */ 2479 pi_mr->pi_iova = (iova & page_mask) + 2480 pi_mr->mmkey.ndescs * ibmr->page_size + 2481 (pi_mr->ibmr.iova & ~page_mask); 2482 /* 2483 * In order to use one MTT MR for data and metadata, we register 2484 * also the gaps between the end of the data and the start of 2485 * the metadata (the sig MR will verify that the HW will access 2486 * to right addresses). This mapping is safe because we use 2487 * internal mkey for the registration. 2488 */ 2489 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; 2490 pi_mr->ibmr.iova = iova; 2491 ibmr->length += pi_mr->meta_length; 2492 } 2493 2494 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2495 pi_mr->desc_size * pi_mr->max_descs, 2496 DMA_TO_DEVICE); 2497 2498 return n; 2499 } 2500 2501 static int 2502 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2503 int data_sg_nents, unsigned int *data_sg_offset, 2504 struct scatterlist *meta_sg, int meta_sg_nents, 2505 unsigned int *meta_sg_offset) 2506 { 2507 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2508 struct mlx5_ib_mr *pi_mr = mr->klm_mr; 2509 int n; 2510 2511 pi_mr->mmkey.ndescs = 0; 2512 pi_mr->meta_ndescs = 0; 2513 pi_mr->meta_length = 0; 2514 2515 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, 2516 pi_mr->desc_size * pi_mr->max_descs, 2517 DMA_TO_DEVICE); 2518 2519 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, 2520 meta_sg, meta_sg_nents, meta_sg_offset); 2521 2522 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, 2523 pi_mr->desc_size * pi_mr->max_descs, 2524 DMA_TO_DEVICE); 2525 2526 /* This is zero-based memory region */ 2527 pi_mr->data_iova = 0; 2528 pi_mr->ibmr.iova = 0; 2529 pi_mr->pi_iova = pi_mr->data_length; 2530 ibmr->length = pi_mr->ibmr.length; 2531 2532 return n; 2533 } 2534 2535 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, 2536 int data_sg_nents, unsigned int *data_sg_offset, 2537 struct scatterlist *meta_sg, int meta_sg_nents, 2538 unsigned int *meta_sg_offset) 2539 { 2540 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2541 struct mlx5_ib_mr *pi_mr = NULL; 2542 int n; 2543 2544 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); 2545 2546 mr->mmkey.ndescs = 0; 2547 mr->data_length = 0; 2548 mr->data_iova = 0; 2549 mr->meta_ndescs = 0; 2550 mr->pi_iova = 0; 2551 /* 2552 * As a performance optimization, if possible, there is no need to 2553 * perform UMR operation to register the data/metadata buffers. 2554 * First try to map the sg lists to PA descriptors with local_dma_lkey. 2555 * Fallback to UMR only in case of a failure. 2556 */ 2557 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2558 data_sg_offset, meta_sg, meta_sg_nents, 2559 meta_sg_offset); 2560 if (n == data_sg_nents + meta_sg_nents) 2561 goto out; 2562 /* 2563 * As a performance optimization, if possible, there is no need to map 2564 * the sg lists to KLM descriptors. First try to map the sg lists to MTT 2565 * descriptors and fallback to KLM only in case of a failure. 2566 * It's more efficient for the HW to work with MTT descriptors 2567 * (especially in high load). 2568 * Use KLM (indirect access) only if it's mandatory. 2569 */ 2570 pi_mr = mr->mtt_mr; 2571 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2572 data_sg_offset, meta_sg, meta_sg_nents, 2573 meta_sg_offset); 2574 if (n == data_sg_nents + meta_sg_nents) 2575 goto out; 2576 2577 pi_mr = mr->klm_mr; 2578 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, 2579 data_sg_offset, meta_sg, meta_sg_nents, 2580 meta_sg_offset); 2581 if (unlikely(n != data_sg_nents + meta_sg_nents)) 2582 return -ENOMEM; 2583 2584 out: 2585 /* This is zero-based memory region */ 2586 ibmr->iova = 0; 2587 mr->pi_mr = pi_mr; 2588 if (pi_mr) 2589 ibmr->sig_attrs->meta_length = pi_mr->meta_length; 2590 else 2591 ibmr->sig_attrs->meta_length = mr->meta_length; 2592 2593 return 0; 2594 } 2595 2596 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 2597 unsigned int *sg_offset) 2598 { 2599 struct mlx5_ib_mr *mr = to_mmr(ibmr); 2600 int n; 2601 2602 mr->mmkey.ndescs = 0; 2603 2604 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, 2605 mr->desc_size * mr->max_descs, 2606 DMA_TO_DEVICE); 2607 2608 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 2609 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, 2610 NULL); 2611 else 2612 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 2613 mlx5_set_page); 2614 2615 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, 2616 mr->desc_size * mr->max_descs, 2617 DMA_TO_DEVICE); 2618 2619 return n; 2620 } 2621