1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 34 #include <linux/kref.h> 35 #include <linux/random.h> 36 #include <linux/debugfs.h> 37 #include <linux/export.h> 38 #include <linux/delay.h> 39 #include <rdma/ib_umem.h> 40 #include <rdma/ib_umem_odp.h> 41 #include <rdma/ib_verbs.h> 42 #include "mlx5_ib.h" 43 44 enum { 45 MAX_PENDING_REG_MR = 8, 46 }; 47 48 #define MLX5_UMR_ALIGN 2048 49 50 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 51 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 52 static int mr_cache_max_order(struct mlx5_ib_dev *dev); 53 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 54 static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) 55 { 56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); 57 } 58 59 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) 60 { 61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); 62 } 63 64 static bool use_umr(struct mlx5_ib_dev *dev, int order) 65 { 66 return order <= mr_cache_max_order(dev) && 67 umr_can_modify_entity_size(dev); 68 } 69 70 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 71 { 72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); 73 74 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 75 /* Wait until all page fault handlers using the mr complete. */ 76 synchronize_srcu(&dev->mr_srcu); 77 #endif 78 79 return err; 80 } 81 82 static int order2idx(struct mlx5_ib_dev *dev, int order) 83 { 84 struct mlx5_mr_cache *cache = &dev->cache; 85 86 if (order < cache->ent[0].order) 87 return 0; 88 else 89 return order - cache->ent[0].order; 90 } 91 92 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) 93 { 94 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= 95 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); 96 } 97 98 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 99 static void update_odp_mr(struct mlx5_ib_mr *mr) 100 { 101 if (mr->umem->odp_data) { 102 /* 103 * This barrier prevents the compiler from moving the 104 * setting of umem->odp_data->private to point to our 105 * MR, before reg_umr finished, to ensure that the MR 106 * initialization have finished before starting to 107 * handle invalidations. 108 */ 109 smp_wmb(); 110 mr->umem->odp_data->private = mr; 111 /* 112 * Make sure we will see the new 113 * umem->odp_data->private value in the invalidation 114 * routines, before we can get page faults on the 115 * MR. Page faults can happen once we put the MR in 116 * the tree, below this line. Without the barrier, 117 * there can be a fault handling and an invalidation 118 * before umem->odp_data->private == mr is visible to 119 * the invalidation handler. 120 */ 121 smp_wmb(); 122 } 123 } 124 #endif 125 126 static void reg_mr_callback(int status, void *context) 127 { 128 struct mlx5_ib_mr *mr = context; 129 struct mlx5_ib_dev *dev = mr->dev; 130 struct mlx5_mr_cache *cache = &dev->cache; 131 int c = order2idx(dev, mr->order); 132 struct mlx5_cache_ent *ent = &cache->ent[c]; 133 u8 key; 134 unsigned long flags; 135 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; 136 int err; 137 138 spin_lock_irqsave(&ent->lock, flags); 139 ent->pending--; 140 spin_unlock_irqrestore(&ent->lock, flags); 141 if (status) { 142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); 143 kfree(mr); 144 dev->fill_delay = 1; 145 mod_timer(&dev->delay_timer, jiffies + HZ); 146 return; 147 } 148 149 mr->mmkey.type = MLX5_MKEY_MR; 150 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); 151 key = dev->mdev->priv.mkey_key++; 152 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); 153 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; 154 155 cache->last_add = jiffies; 156 157 spin_lock_irqsave(&ent->lock, flags); 158 list_add_tail(&mr->list, &ent->head); 159 ent->cur++; 160 ent->size++; 161 spin_unlock_irqrestore(&ent->lock, flags); 162 163 write_lock_irqsave(&table->lock, flags); 164 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), 165 &mr->mmkey); 166 if (err) 167 pr_err("Error inserting to mkey tree. 0x%x\n", -err); 168 write_unlock_irqrestore(&table->lock, flags); 169 170 if (!completion_done(&ent->compl)) 171 complete(&ent->compl); 172 } 173 174 static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 175 { 176 struct mlx5_mr_cache *cache = &dev->cache; 177 struct mlx5_cache_ent *ent = &cache->ent[c]; 178 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 179 struct mlx5_ib_mr *mr; 180 void *mkc; 181 u32 *in; 182 int err = 0; 183 int i; 184 185 in = kzalloc(inlen, GFP_KERNEL); 186 if (!in) 187 return -ENOMEM; 188 189 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 190 for (i = 0; i < num; i++) { 191 if (ent->pending >= MAX_PENDING_REG_MR) { 192 err = -EAGAIN; 193 break; 194 } 195 196 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 197 if (!mr) { 198 err = -ENOMEM; 199 break; 200 } 201 mr->order = ent->order; 202 mr->allocated_from_cache = 1; 203 mr->dev = dev; 204 205 MLX5_SET(mkc, mkc, free, 1); 206 MLX5_SET(mkc, mkc, umr_en, 1); 207 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); 208 MLX5_SET(mkc, mkc, access_mode_4_2, 209 (ent->access_mode >> 2) & 0x7); 210 211 MLX5_SET(mkc, mkc, qpn, 0xffffff); 212 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); 213 MLX5_SET(mkc, mkc, log_page_size, ent->page); 214 215 spin_lock_irq(&ent->lock); 216 ent->pending++; 217 spin_unlock_irq(&ent->lock); 218 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, 219 in, inlen, 220 mr->out, sizeof(mr->out), 221 reg_mr_callback, mr); 222 if (err) { 223 spin_lock_irq(&ent->lock); 224 ent->pending--; 225 spin_unlock_irq(&ent->lock); 226 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 227 kfree(mr); 228 break; 229 } 230 } 231 232 kfree(in); 233 return err; 234 } 235 236 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 237 { 238 struct mlx5_mr_cache *cache = &dev->cache; 239 struct mlx5_cache_ent *ent = &cache->ent[c]; 240 struct mlx5_ib_mr *tmp_mr; 241 struct mlx5_ib_mr *mr; 242 LIST_HEAD(del_list); 243 int i; 244 245 for (i = 0; i < num; i++) { 246 spin_lock_irq(&ent->lock); 247 if (list_empty(&ent->head)) { 248 spin_unlock_irq(&ent->lock); 249 break; 250 } 251 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 252 list_move(&mr->list, &del_list); 253 ent->cur--; 254 ent->size--; 255 spin_unlock_irq(&ent->lock); 256 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); 257 } 258 259 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 260 synchronize_srcu(&dev->mr_srcu); 261 #endif 262 263 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { 264 list_del(&mr->list); 265 kfree(mr); 266 } 267 } 268 269 static ssize_t size_write(struct file *filp, const char __user *buf, 270 size_t count, loff_t *pos) 271 { 272 struct mlx5_cache_ent *ent = filp->private_data; 273 struct mlx5_ib_dev *dev = ent->dev; 274 char lbuf[20]; 275 u32 var; 276 int err; 277 int c; 278 279 if (copy_from_user(lbuf, buf, sizeof(lbuf))) 280 return -EFAULT; 281 282 c = order2idx(dev, ent->order); 283 lbuf[sizeof(lbuf) - 1] = 0; 284 285 if (sscanf(lbuf, "%u", &var) != 1) 286 return -EINVAL; 287 288 if (var < ent->limit) 289 return -EINVAL; 290 291 if (var > ent->size) { 292 do { 293 err = add_keys(dev, c, var - ent->size); 294 if (err && err != -EAGAIN) 295 return err; 296 297 usleep_range(3000, 5000); 298 } while (err); 299 } else if (var < ent->size) { 300 remove_keys(dev, c, ent->size - var); 301 } 302 303 return count; 304 } 305 306 static ssize_t size_read(struct file *filp, char __user *buf, size_t count, 307 loff_t *pos) 308 { 309 struct mlx5_cache_ent *ent = filp->private_data; 310 char lbuf[20]; 311 int err; 312 313 if (*pos) 314 return 0; 315 316 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); 317 if (err < 0) 318 return err; 319 320 if (copy_to_user(buf, lbuf, err)) 321 return -EFAULT; 322 323 *pos += err; 324 325 return err; 326 } 327 328 static const struct file_operations size_fops = { 329 .owner = THIS_MODULE, 330 .open = simple_open, 331 .write = size_write, 332 .read = size_read, 333 }; 334 335 static ssize_t limit_write(struct file *filp, const char __user *buf, 336 size_t count, loff_t *pos) 337 { 338 struct mlx5_cache_ent *ent = filp->private_data; 339 struct mlx5_ib_dev *dev = ent->dev; 340 char lbuf[20]; 341 u32 var; 342 int err; 343 int c; 344 345 if (copy_from_user(lbuf, buf, sizeof(lbuf))) 346 return -EFAULT; 347 348 c = order2idx(dev, ent->order); 349 lbuf[sizeof(lbuf) - 1] = 0; 350 351 if (sscanf(lbuf, "%u", &var) != 1) 352 return -EINVAL; 353 354 if (var > ent->size) 355 return -EINVAL; 356 357 ent->limit = var; 358 359 if (ent->cur < ent->limit) { 360 err = add_keys(dev, c, 2 * ent->limit - ent->cur); 361 if (err) 362 return err; 363 } 364 365 return count; 366 } 367 368 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, 369 loff_t *pos) 370 { 371 struct mlx5_cache_ent *ent = filp->private_data; 372 char lbuf[20]; 373 int err; 374 375 if (*pos) 376 return 0; 377 378 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); 379 if (err < 0) 380 return err; 381 382 if (copy_to_user(buf, lbuf, err)) 383 return -EFAULT; 384 385 *pos += err; 386 387 return err; 388 } 389 390 static const struct file_operations limit_fops = { 391 .owner = THIS_MODULE, 392 .open = simple_open, 393 .write = limit_write, 394 .read = limit_read, 395 }; 396 397 static int someone_adding(struct mlx5_mr_cache *cache) 398 { 399 int i; 400 401 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 402 if (cache->ent[i].cur < cache->ent[i].limit) 403 return 1; 404 } 405 406 return 0; 407 } 408 409 static void __cache_work_func(struct mlx5_cache_ent *ent) 410 { 411 struct mlx5_ib_dev *dev = ent->dev; 412 struct mlx5_mr_cache *cache = &dev->cache; 413 int i = order2idx(dev, ent->order); 414 int err; 415 416 if (cache->stopped) 417 return; 418 419 ent = &dev->cache.ent[i]; 420 if (ent->cur < 2 * ent->limit && !dev->fill_delay) { 421 err = add_keys(dev, i, 1); 422 if (ent->cur < 2 * ent->limit) { 423 if (err == -EAGAIN) { 424 mlx5_ib_dbg(dev, "returned eagain, order %d\n", 425 i + 2); 426 queue_delayed_work(cache->wq, &ent->dwork, 427 msecs_to_jiffies(3)); 428 } else if (err) { 429 mlx5_ib_warn(dev, "command failed order %d, err %d\n", 430 i + 2, err); 431 queue_delayed_work(cache->wq, &ent->dwork, 432 msecs_to_jiffies(1000)); 433 } else { 434 queue_work(cache->wq, &ent->work); 435 } 436 } 437 } else if (ent->cur > 2 * ent->limit) { 438 /* 439 * The remove_keys() logic is performed as garbage collection 440 * task. Such task is intended to be run when no other active 441 * processes are running. 442 * 443 * The need_resched() will return TRUE if there are user tasks 444 * to be activated in near future. 445 * 446 * In such case, we don't execute remove_keys() and postpone 447 * the garbage collection work to try to run in next cycle, 448 * in order to free CPU resources to other tasks. 449 */ 450 if (!need_resched() && !someone_adding(cache) && 451 time_after(jiffies, cache->last_add + 300 * HZ)) { 452 remove_keys(dev, i, 1); 453 if (ent->cur > ent->limit) 454 queue_work(cache->wq, &ent->work); 455 } else { 456 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); 457 } 458 } 459 } 460 461 static void delayed_cache_work_func(struct work_struct *work) 462 { 463 struct mlx5_cache_ent *ent; 464 465 ent = container_of(work, struct mlx5_cache_ent, dwork.work); 466 __cache_work_func(ent); 467 } 468 469 static void cache_work_func(struct work_struct *work) 470 { 471 struct mlx5_cache_ent *ent; 472 473 ent = container_of(work, struct mlx5_cache_ent, work); 474 __cache_work_func(ent); 475 } 476 477 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) 478 { 479 struct mlx5_mr_cache *cache = &dev->cache; 480 struct mlx5_cache_ent *ent; 481 struct mlx5_ib_mr *mr; 482 int err; 483 484 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { 485 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); 486 return NULL; 487 } 488 489 ent = &cache->ent[entry]; 490 while (1) { 491 spin_lock_irq(&ent->lock); 492 if (list_empty(&ent->head)) { 493 spin_unlock_irq(&ent->lock); 494 495 err = add_keys(dev, entry, 1); 496 if (err && err != -EAGAIN) 497 return ERR_PTR(err); 498 499 wait_for_completion(&ent->compl); 500 } else { 501 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 502 list); 503 list_del(&mr->list); 504 ent->cur--; 505 spin_unlock_irq(&ent->lock); 506 if (ent->cur < ent->limit) 507 queue_work(cache->wq, &ent->work); 508 return mr; 509 } 510 } 511 } 512 513 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) 514 { 515 struct mlx5_mr_cache *cache = &dev->cache; 516 struct mlx5_ib_mr *mr = NULL; 517 struct mlx5_cache_ent *ent; 518 int last_umr_cache_entry; 519 int c; 520 int i; 521 522 c = order2idx(dev, order); 523 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); 524 if (c < 0 || c > last_umr_cache_entry) { 525 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); 526 return NULL; 527 } 528 529 for (i = c; i <= last_umr_cache_entry; i++) { 530 ent = &cache->ent[i]; 531 532 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); 533 534 spin_lock_irq(&ent->lock); 535 if (!list_empty(&ent->head)) { 536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 537 list); 538 list_del(&mr->list); 539 ent->cur--; 540 spin_unlock_irq(&ent->lock); 541 if (ent->cur < ent->limit) 542 queue_work(cache->wq, &ent->work); 543 break; 544 } 545 spin_unlock_irq(&ent->lock); 546 547 queue_work(cache->wq, &ent->work); 548 } 549 550 if (!mr) 551 cache->ent[c].miss++; 552 553 return mr; 554 } 555 556 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 557 { 558 struct mlx5_mr_cache *cache = &dev->cache; 559 struct mlx5_cache_ent *ent; 560 int shrink = 0; 561 int c; 562 563 c = order2idx(dev, mr->order); 564 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { 565 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); 566 return; 567 } 568 569 if (unreg_umr(dev, mr)) 570 return; 571 572 ent = &cache->ent[c]; 573 spin_lock_irq(&ent->lock); 574 list_add_tail(&mr->list, &ent->head); 575 ent->cur++; 576 if (ent->cur > 2 * ent->limit) 577 shrink = 1; 578 spin_unlock_irq(&ent->lock); 579 580 if (shrink) 581 queue_work(cache->wq, &ent->work); 582 } 583 584 static void clean_keys(struct mlx5_ib_dev *dev, int c) 585 { 586 struct mlx5_mr_cache *cache = &dev->cache; 587 struct mlx5_cache_ent *ent = &cache->ent[c]; 588 struct mlx5_ib_mr *tmp_mr; 589 struct mlx5_ib_mr *mr; 590 LIST_HEAD(del_list); 591 592 cancel_delayed_work(&ent->dwork); 593 while (1) { 594 spin_lock_irq(&ent->lock); 595 if (list_empty(&ent->head)) { 596 spin_unlock_irq(&ent->lock); 597 break; 598 } 599 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 600 list_move(&mr->list, &del_list); 601 ent->cur--; 602 ent->size--; 603 spin_unlock_irq(&ent->lock); 604 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); 605 } 606 607 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 608 synchronize_srcu(&dev->mr_srcu); 609 #endif 610 611 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { 612 list_del(&mr->list); 613 kfree(mr); 614 } 615 } 616 617 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) 618 { 619 if (!mlx5_debugfs_root || dev->rep) 620 return; 621 622 debugfs_remove_recursive(dev->cache.root); 623 dev->cache.root = NULL; 624 } 625 626 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 627 { 628 struct mlx5_mr_cache *cache = &dev->cache; 629 struct mlx5_cache_ent *ent; 630 int i; 631 632 if (!mlx5_debugfs_root || dev->rep) 633 return 0; 634 635 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); 636 if (!cache->root) 637 return -ENOMEM; 638 639 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 640 ent = &cache->ent[i]; 641 sprintf(ent->name, "%d", ent->order); 642 ent->dir = debugfs_create_dir(ent->name, cache->root); 643 if (!ent->dir) 644 goto err; 645 646 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, 647 &size_fops); 648 if (!ent->fsize) 649 goto err; 650 651 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, 652 &limit_fops); 653 if (!ent->flimit) 654 goto err; 655 656 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, 657 &ent->cur); 658 if (!ent->fcur) 659 goto err; 660 661 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, 662 &ent->miss); 663 if (!ent->fmiss) 664 goto err; 665 } 666 667 return 0; 668 err: 669 mlx5_mr_cache_debugfs_cleanup(dev); 670 671 return -ENOMEM; 672 } 673 674 static void delay_time_func(struct timer_list *t) 675 { 676 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); 677 678 dev->fill_delay = 0; 679 } 680 681 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) 682 { 683 struct mlx5_mr_cache *cache = &dev->cache; 684 struct mlx5_cache_ent *ent; 685 int err; 686 int i; 687 688 mutex_init(&dev->slow_path_mutex); 689 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 690 if (!cache->wq) { 691 mlx5_ib_warn(dev, "failed to create work queue\n"); 692 return -ENOMEM; 693 } 694 695 timer_setup(&dev->delay_timer, delay_time_func, 0); 696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 697 ent = &cache->ent[i]; 698 INIT_LIST_HEAD(&ent->head); 699 spin_lock_init(&ent->lock); 700 ent->order = i + 2; 701 ent->dev = dev; 702 ent->limit = 0; 703 704 init_completion(&ent->compl); 705 INIT_WORK(&ent->work, cache_work_func); 706 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); 707 queue_work(cache->wq, &ent->work); 708 709 if (i > MR_CACHE_LAST_STD_ENTRY) { 710 mlx5_odp_init_mr_cache_entry(ent); 711 continue; 712 } 713 714 if (ent->order > mr_cache_max_order(dev)) 715 continue; 716 717 ent->page = PAGE_SHIFT; 718 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / 719 MLX5_IB_UMR_OCTOWORD; 720 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 721 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && 722 !dev->rep && 723 mlx5_core_is_pf(dev->mdev)) 724 ent->limit = dev->mdev->profile->mr_cache[i].limit; 725 else 726 ent->limit = 0; 727 } 728 729 err = mlx5_mr_cache_debugfs_init(dev); 730 if (err) 731 mlx5_ib_warn(dev, "cache debugfs failure\n"); 732 733 /* 734 * We don't want to fail driver if debugfs failed to initialize, 735 * so we are not forwarding error to the user. 736 */ 737 738 return 0; 739 } 740 741 static void wait_for_async_commands(struct mlx5_ib_dev *dev) 742 { 743 struct mlx5_mr_cache *cache = &dev->cache; 744 struct mlx5_cache_ent *ent; 745 int total = 0; 746 int i; 747 int j; 748 749 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 750 ent = &cache->ent[i]; 751 for (j = 0 ; j < 1000; j++) { 752 if (!ent->pending) 753 break; 754 msleep(50); 755 } 756 } 757 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 758 ent = &cache->ent[i]; 759 total += ent->pending; 760 } 761 762 if (total) 763 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); 764 else 765 mlx5_ib_warn(dev, "done with all pending requests\n"); 766 } 767 768 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) 769 { 770 int i; 771 772 if (!dev->cache.wq) 773 return 0; 774 775 dev->cache.stopped = 1; 776 flush_workqueue(dev->cache.wq); 777 778 mlx5_mr_cache_debugfs_cleanup(dev); 779 780 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 781 clean_keys(dev, i); 782 783 destroy_workqueue(dev->cache.wq); 784 wait_for_async_commands(dev); 785 del_timer_sync(&dev->delay_timer); 786 787 return 0; 788 } 789 790 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 791 { 792 struct mlx5_ib_dev *dev = to_mdev(pd->device); 793 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 794 struct mlx5_core_dev *mdev = dev->mdev; 795 struct mlx5_ib_mr *mr; 796 void *mkc; 797 u32 *in; 798 int err; 799 800 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 801 if (!mr) 802 return ERR_PTR(-ENOMEM); 803 804 in = kzalloc(inlen, GFP_KERNEL); 805 if (!in) { 806 err = -ENOMEM; 807 goto err_free; 808 } 809 810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 811 812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); 813 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 814 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 815 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 816 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 817 MLX5_SET(mkc, mkc, lr, 1); 818 819 MLX5_SET(mkc, mkc, length64, 1); 820 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 821 MLX5_SET(mkc, mkc, qpn, 0xffffff); 822 MLX5_SET64(mkc, mkc, start_addr, 0); 823 824 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); 825 if (err) 826 goto err_in; 827 828 kfree(in); 829 mr->mmkey.type = MLX5_MKEY_MR; 830 mr->ibmr.lkey = mr->mmkey.key; 831 mr->ibmr.rkey = mr->mmkey.key; 832 mr->umem = NULL; 833 834 return &mr->ibmr; 835 836 err_in: 837 kfree(in); 838 839 err_free: 840 kfree(mr); 841 842 return ERR_PTR(err); 843 } 844 845 static int get_octo_len(u64 addr, u64 len, int page_shift) 846 { 847 u64 page_size = 1ULL << page_shift; 848 u64 offset; 849 int npages; 850 851 offset = addr & (page_size - 1); 852 npages = ALIGN(len + offset, page_size) >> page_shift; 853 return (npages + 1) / 2; 854 } 855 856 static int mr_cache_max_order(struct mlx5_ib_dev *dev) 857 { 858 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 859 return MR_CACHE_LAST_STD_ENTRY + 2; 860 return MLX5_MAX_UMR_SHIFT; 861 } 862 863 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, 864 int access_flags, struct ib_umem **umem, 865 int *npages, int *page_shift, int *ncont, 866 int *order) 867 { 868 struct mlx5_ib_dev *dev = to_mdev(pd->device); 869 int err; 870 871 *umem = ib_umem_get(pd->uobject->context, start, length, 872 access_flags, 0); 873 err = PTR_ERR_OR_ZERO(*umem); 874 if (err) { 875 *umem = NULL; 876 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 877 return err; 878 } 879 880 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, 881 page_shift, ncont, order); 882 if (!*npages) { 883 mlx5_ib_warn(dev, "avoid zero region\n"); 884 ib_umem_release(*umem); 885 return -EINVAL; 886 } 887 888 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", 889 *npages, *ncont, *order, *page_shift); 890 891 return 0; 892 } 893 894 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) 895 { 896 struct mlx5_ib_umr_context *context = 897 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); 898 899 context->status = wc->status; 900 complete(&context->done); 901 } 902 903 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) 904 { 905 context->cqe.done = mlx5_ib_umr_done; 906 context->status = -1; 907 init_completion(&context->done); 908 } 909 910 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, 911 struct mlx5_umr_wr *umrwr) 912 { 913 struct umr_common *umrc = &dev->umrc; 914 struct ib_send_wr *bad; 915 int err; 916 struct mlx5_ib_umr_context umr_context; 917 918 mlx5_ib_init_umr_context(&umr_context); 919 umrwr->wr.wr_cqe = &umr_context.cqe; 920 921 down(&umrc->sem); 922 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); 923 if (err) { 924 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); 925 } else { 926 wait_for_completion(&umr_context.done); 927 if (umr_context.status != IB_WC_SUCCESS) { 928 mlx5_ib_warn(dev, "reg umr failed (%u)\n", 929 umr_context.status); 930 err = -EFAULT; 931 } 932 } 933 up(&umrc->sem); 934 return err; 935 } 936 937 static struct mlx5_ib_mr *alloc_mr_from_cache( 938 struct ib_pd *pd, struct ib_umem *umem, 939 u64 virt_addr, u64 len, int npages, 940 int page_shift, int order, int access_flags) 941 { 942 struct mlx5_ib_dev *dev = to_mdev(pd->device); 943 struct mlx5_ib_mr *mr; 944 int err = 0; 945 int i; 946 947 for (i = 0; i < 1; i++) { 948 mr = alloc_cached_mr(dev, order); 949 if (mr) 950 break; 951 952 err = add_keys(dev, order2idx(dev, order), 1); 953 if (err && err != -EAGAIN) { 954 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); 955 break; 956 } 957 } 958 959 if (!mr) 960 return ERR_PTR(-EAGAIN); 961 962 mr->ibmr.pd = pd; 963 mr->umem = umem; 964 mr->access_flags = access_flags; 965 mr->desc_size = sizeof(struct mlx5_mtt); 966 mr->mmkey.iova = virt_addr; 967 mr->mmkey.size = len; 968 mr->mmkey.pd = to_mpd(pd)->pdn; 969 970 return mr; 971 } 972 973 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, 974 void *xlt, int page_shift, size_t size, 975 int flags) 976 { 977 struct mlx5_ib_dev *dev = mr->dev; 978 struct ib_umem *umem = mr->umem; 979 980 if (flags & MLX5_IB_UPD_XLT_INDIRECT) { 981 if (!umr_can_use_indirect_mkey(dev)) 982 return -EPERM; 983 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags); 984 return npages; 985 } 986 987 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); 988 989 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { 990 __mlx5_ib_populate_pas(dev, umem, page_shift, 991 idx, npages, xlt, 992 MLX5_IB_MTT_PRESENT); 993 /* Clear padding after the pages 994 * brought from the umem. 995 */ 996 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, 997 size - npages * sizeof(struct mlx5_mtt)); 998 } 999 1000 return npages; 1001 } 1002 1003 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ 1004 MLX5_UMR_MTT_ALIGNMENT) 1005 #define MLX5_SPARE_UMR_CHUNK 0x10000 1006 1007 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 1008 int page_shift, int flags) 1009 { 1010 struct mlx5_ib_dev *dev = mr->dev; 1011 struct device *ddev = dev->ib_dev.dev.parent; 1012 int size; 1013 void *xlt; 1014 dma_addr_t dma; 1015 struct mlx5_umr_wr wr; 1016 struct ib_sge sg; 1017 int err = 0; 1018 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) 1019 ? sizeof(struct mlx5_klm) 1020 : sizeof(struct mlx5_mtt); 1021 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; 1022 const int page_mask = page_align - 1; 1023 size_t pages_mapped = 0; 1024 size_t pages_to_map = 0; 1025 size_t pages_iter = 0; 1026 gfp_t gfp; 1027 bool use_emergency_page = false; 1028 1029 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) && 1030 !umr_can_use_indirect_mkey(dev)) 1031 return -EPERM; 1032 1033 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, 1034 * so we need to align the offset and length accordingly 1035 */ 1036 if (idx & page_mask) { 1037 npages += idx & page_mask; 1038 idx &= ~page_mask; 1039 } 1040 1041 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; 1042 gfp |= __GFP_ZERO | __GFP_NOWARN; 1043 1044 pages_to_map = ALIGN(npages, page_align); 1045 size = desc_size * pages_to_map; 1046 size = min_t(int, size, MLX5_MAX_UMR_CHUNK); 1047 1048 xlt = (void *)__get_free_pages(gfp, get_order(size)); 1049 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { 1050 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", 1051 size, get_order(size), MLX5_SPARE_UMR_CHUNK); 1052 1053 size = MLX5_SPARE_UMR_CHUNK; 1054 xlt = (void *)__get_free_pages(gfp, get_order(size)); 1055 } 1056 1057 if (!xlt) { 1058 mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); 1059 xlt = (void *)mlx5_ib_get_xlt_emergency_page(); 1060 size = PAGE_SIZE; 1061 memset(xlt, 0, size); 1062 use_emergency_page = true; 1063 } 1064 pages_iter = size / desc_size; 1065 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); 1066 if (dma_mapping_error(ddev, dma)) { 1067 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); 1068 err = -ENOMEM; 1069 goto free_xlt; 1070 } 1071 1072 sg.addr = dma; 1073 sg.lkey = dev->umrc.pd->local_dma_lkey; 1074 1075 memset(&wr, 0, sizeof(wr)); 1076 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; 1077 if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) 1078 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1079 wr.wr.sg_list = &sg; 1080 wr.wr.num_sge = 1; 1081 wr.wr.opcode = MLX5_IB_WR_UMR; 1082 1083 wr.pd = mr->ibmr.pd; 1084 wr.mkey = mr->mmkey.key; 1085 wr.length = mr->mmkey.size; 1086 wr.virt_addr = mr->mmkey.iova; 1087 wr.access_flags = mr->access_flags; 1088 wr.page_shift = page_shift; 1089 1090 for (pages_mapped = 0; 1091 pages_mapped < pages_to_map && !err; 1092 pages_mapped += pages_iter, idx += pages_iter) { 1093 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); 1094 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); 1095 npages = populate_xlt(mr, idx, npages, xlt, 1096 page_shift, size, flags); 1097 1098 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); 1099 1100 sg.length = ALIGN(npages * desc_size, 1101 MLX5_UMR_MTT_ALIGNMENT); 1102 1103 if (pages_mapped + pages_iter >= pages_to_map) { 1104 if (flags & MLX5_IB_UPD_XLT_ENABLE) 1105 wr.wr.send_flags |= 1106 MLX5_IB_SEND_UMR_ENABLE_MR | 1107 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | 1108 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1109 if (flags & MLX5_IB_UPD_XLT_PD || 1110 flags & MLX5_IB_UPD_XLT_ACCESS) 1111 wr.wr.send_flags |= 1112 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1113 if (flags & MLX5_IB_UPD_XLT_ADDR) 1114 wr.wr.send_flags |= 1115 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1116 } 1117 1118 wr.offset = idx * desc_size; 1119 wr.xlt_size = sg.length; 1120 1121 err = mlx5_ib_post_send_wait(dev, &wr); 1122 } 1123 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); 1124 1125 free_xlt: 1126 if (use_emergency_page) 1127 mlx5_ib_put_xlt_emergency_page(); 1128 else 1129 free_pages((unsigned long)xlt, get_order(size)); 1130 1131 return err; 1132 } 1133 1134 /* 1135 * If ibmr is NULL it will be allocated by reg_create. 1136 * Else, the given ibmr will be used. 1137 */ 1138 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, 1139 u64 virt_addr, u64 length, 1140 struct ib_umem *umem, int npages, 1141 int page_shift, int access_flags, 1142 bool populate) 1143 { 1144 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1145 struct mlx5_ib_mr *mr; 1146 __be64 *pas; 1147 void *mkc; 1148 int inlen; 1149 u32 *in; 1150 int err; 1151 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); 1152 1153 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); 1154 if (!mr) 1155 return ERR_PTR(-ENOMEM); 1156 1157 mr->ibmr.pd = pd; 1158 mr->access_flags = access_flags; 1159 1160 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1161 if (populate) 1162 inlen += sizeof(*pas) * roundup(npages, 2); 1163 in = kvzalloc(inlen, GFP_KERNEL); 1164 if (!in) { 1165 err = -ENOMEM; 1166 goto err_1; 1167 } 1168 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 1169 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND)) 1170 mlx5_ib_populate_pas(dev, umem, page_shift, pas, 1171 pg_cap ? MLX5_IB_MTT_PRESENT : 0); 1172 1173 /* The pg_access bit allows setting the access flags 1174 * in the page list submitted with the command. */ 1175 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); 1176 1177 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1178 MLX5_SET(mkc, mkc, free, !populate); 1179 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 1180 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); 1181 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); 1182 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); 1183 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); 1184 MLX5_SET(mkc, mkc, lr, 1); 1185 MLX5_SET(mkc, mkc, umr_en, 1); 1186 1187 MLX5_SET64(mkc, mkc, start_addr, virt_addr); 1188 MLX5_SET64(mkc, mkc, len, length); 1189 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1190 MLX5_SET(mkc, mkc, bsf_octword_size, 0); 1191 MLX5_SET(mkc, mkc, translations_octword_size, 1192 get_octo_len(virt_addr, length, page_shift)); 1193 MLX5_SET(mkc, mkc, log_page_size, page_shift); 1194 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1195 if (populate) { 1196 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 1197 get_octo_len(virt_addr, length, page_shift)); 1198 } 1199 1200 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); 1201 if (err) { 1202 mlx5_ib_warn(dev, "create mkey failed\n"); 1203 goto err_2; 1204 } 1205 mr->mmkey.type = MLX5_MKEY_MR; 1206 mr->desc_size = sizeof(struct mlx5_mtt); 1207 mr->dev = dev; 1208 kvfree(in); 1209 1210 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); 1211 1212 return mr; 1213 1214 err_2: 1215 kvfree(in); 1216 1217 err_1: 1218 if (!ibmr) 1219 kfree(mr); 1220 1221 return ERR_PTR(err); 1222 } 1223 1224 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 1225 int npages, u64 length, int access_flags) 1226 { 1227 mr->npages = npages; 1228 atomic_add(npages, &dev->mdev->priv.reg_pages); 1229 mr->ibmr.lkey = mr->mmkey.key; 1230 mr->ibmr.rkey = mr->mmkey.key; 1231 mr->ibmr.length = length; 1232 mr->access_flags = access_flags; 1233 } 1234 1235 static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr, 1236 u64 length, int acc) 1237 { 1238 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1239 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1240 struct mlx5_core_dev *mdev = dev->mdev; 1241 struct mlx5_ib_mr *mr; 1242 void *mkc; 1243 u32 *in; 1244 int err; 1245 1246 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1247 if (!mr) 1248 return ERR_PTR(-ENOMEM); 1249 1250 in = kzalloc(inlen, GFP_KERNEL); 1251 if (!in) { 1252 err = -ENOMEM; 1253 goto err_free; 1254 } 1255 1256 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1257 1258 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3); 1259 MLX5_SET(mkc, mkc, access_mode_4_2, 1260 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7); 1261 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 1262 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 1263 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 1264 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 1265 MLX5_SET(mkc, mkc, lr, 1); 1266 1267 MLX5_SET64(mkc, mkc, len, length); 1268 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1269 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1270 MLX5_SET64(mkc, mkc, start_addr, 1271 memic_addr - pci_resource_start(dev->mdev->pdev, 0)); 1272 1273 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); 1274 if (err) 1275 goto err_in; 1276 1277 kfree(in); 1278 1279 mr->umem = NULL; 1280 set_mr_fileds(dev, mr, 0, length, acc); 1281 1282 return &mr->ibmr; 1283 1284 err_in: 1285 kfree(in); 1286 1287 err_free: 1288 kfree(mr); 1289 1290 return ERR_PTR(err); 1291 } 1292 1293 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1294 struct ib_dm_mr_attr *attr, 1295 struct uverbs_attr_bundle *attrs) 1296 { 1297 struct mlx5_ib_dm *mdm = to_mdm(dm); 1298 u64 memic_addr; 1299 1300 if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS) 1301 return ERR_PTR(-EINVAL); 1302 1303 memic_addr = mdm->dev_addr + attr->offset; 1304 1305 return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length, 1306 attr->access_flags); 1307 } 1308 1309 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1310 u64 virt_addr, int access_flags, 1311 struct ib_udata *udata) 1312 { 1313 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1314 struct mlx5_ib_mr *mr = NULL; 1315 bool populate_mtts = false; 1316 struct ib_umem *umem; 1317 int page_shift; 1318 int npages; 1319 int ncont; 1320 int order; 1321 int err; 1322 1323 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1324 return ERR_PTR(-EOPNOTSUPP); 1325 1326 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", 1327 start, virt_addr, length, access_flags); 1328 1329 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1330 if (!start && length == U64_MAX) { 1331 if (!(access_flags & IB_ACCESS_ON_DEMAND) || 1332 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1333 return ERR_PTR(-EINVAL); 1334 1335 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); 1336 if (IS_ERR(mr)) 1337 return ERR_CAST(mr); 1338 return &mr->ibmr; 1339 } 1340 #endif 1341 1342 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, 1343 &page_shift, &ncont, &order); 1344 1345 if (err < 0) 1346 return ERR_PTR(err); 1347 1348 if (use_umr(dev, order)) { 1349 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1350 page_shift, order, access_flags); 1351 if (PTR_ERR(mr) == -EAGAIN) { 1352 mlx5_ib_dbg(dev, "cache empty for order %d\n", order); 1353 mr = NULL; 1354 } 1355 populate_mtts = false; 1356 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { 1357 if (access_flags & IB_ACCESS_ON_DEMAND) { 1358 err = -EINVAL; 1359 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); 1360 goto error; 1361 } 1362 populate_mtts = true; 1363 } 1364 1365 if (!mr) { 1366 if (!umr_can_modify_entity_size(dev)) 1367 populate_mtts = true; 1368 mutex_lock(&dev->slow_path_mutex); 1369 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1370 page_shift, access_flags, populate_mtts); 1371 mutex_unlock(&dev->slow_path_mutex); 1372 } 1373 1374 if (IS_ERR(mr)) { 1375 err = PTR_ERR(mr); 1376 goto error; 1377 } 1378 1379 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1380 1381 mr->umem = umem; 1382 set_mr_fileds(dev, mr, npages, length, access_flags); 1383 1384 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1385 update_odp_mr(mr); 1386 #endif 1387 1388 if (!populate_mtts) { 1389 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; 1390 1391 if (access_flags & IB_ACCESS_ON_DEMAND) 1392 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; 1393 1394 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, 1395 update_xlt_flags); 1396 1397 if (err) { 1398 dereg_mr(dev, mr); 1399 return ERR_PTR(err); 1400 } 1401 } 1402 1403 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1404 mr->live = 1; 1405 #endif 1406 return &mr->ibmr; 1407 error: 1408 ib_umem_release(umem); 1409 return ERR_PTR(err); 1410 } 1411 1412 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1413 { 1414 struct mlx5_core_dev *mdev = dev->mdev; 1415 struct mlx5_umr_wr umrwr = {}; 1416 1417 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1418 return 0; 1419 1420 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | 1421 MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1422 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1423 umrwr.mkey = mr->mmkey.key; 1424 1425 return mlx5_ib_post_send_wait(dev, &umrwr); 1426 } 1427 1428 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, 1429 int access_flags, int flags) 1430 { 1431 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1432 struct mlx5_umr_wr umrwr = {}; 1433 int err; 1434 1435 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1436 1437 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1438 umrwr.mkey = mr->mmkey.key; 1439 1440 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { 1441 umrwr.pd = pd; 1442 umrwr.access_flags = access_flags; 1443 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1444 } 1445 1446 err = mlx5_ib_post_send_wait(dev, &umrwr); 1447 1448 return err; 1449 } 1450 1451 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1452 u64 length, u64 virt_addr, int new_access_flags, 1453 struct ib_pd *new_pd, struct ib_udata *udata) 1454 { 1455 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); 1456 struct mlx5_ib_mr *mr = to_mmr(ib_mr); 1457 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; 1458 int access_flags = flags & IB_MR_REREG_ACCESS ? 1459 new_access_flags : 1460 mr->access_flags; 1461 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; 1462 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; 1463 int page_shift = 0; 1464 int upd_flags = 0; 1465 int npages = 0; 1466 int ncont = 0; 1467 int order = 0; 1468 int err; 1469 1470 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", 1471 start, virt_addr, length, access_flags); 1472 1473 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); 1474 1475 if (flags != IB_MR_REREG_PD) { 1476 /* 1477 * Replace umem. This needs to be done whether or not UMR is 1478 * used. 1479 */ 1480 flags |= IB_MR_REREG_TRANS; 1481 ib_umem_release(mr->umem); 1482 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, 1483 &npages, &page_shift, &ncont, &order); 1484 if (err) 1485 goto err; 1486 } 1487 1488 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1489 /* 1490 * UMR can't be used - MKey needs to be replaced. 1491 */ 1492 if (mr->allocated_from_cache) 1493 err = unreg_umr(dev, mr); 1494 else 1495 err = destroy_mkey(dev, mr); 1496 if (err) 1497 goto err; 1498 1499 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, 1500 page_shift, access_flags, true); 1501 1502 if (IS_ERR(mr)) { 1503 err = PTR_ERR(mr); 1504 mr = to_mmr(ib_mr); 1505 goto err; 1506 } 1507 1508 mr->allocated_from_cache = 0; 1509 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1510 mr->live = 1; 1511 #endif 1512 } else { 1513 /* 1514 * Send a UMR WQE 1515 */ 1516 mr->ibmr.pd = pd; 1517 mr->access_flags = access_flags; 1518 mr->mmkey.iova = addr; 1519 mr->mmkey.size = len; 1520 mr->mmkey.pd = to_mpd(pd)->pdn; 1521 1522 if (flags & IB_MR_REREG_TRANS) { 1523 upd_flags = MLX5_IB_UPD_XLT_ADDR; 1524 if (flags & IB_MR_REREG_PD) 1525 upd_flags |= MLX5_IB_UPD_XLT_PD; 1526 if (flags & IB_MR_REREG_ACCESS) 1527 upd_flags |= MLX5_IB_UPD_XLT_ACCESS; 1528 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, 1529 upd_flags); 1530 } else { 1531 err = rereg_umr(pd, mr, access_flags, flags); 1532 } 1533 1534 if (err) 1535 goto err; 1536 } 1537 1538 set_mr_fileds(dev, mr, npages, len, access_flags); 1539 1540 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1541 update_odp_mr(mr); 1542 #endif 1543 return 0; 1544 1545 err: 1546 if (mr->umem) { 1547 ib_umem_release(mr->umem); 1548 mr->umem = NULL; 1549 } 1550 clean_mr(dev, mr); 1551 return err; 1552 } 1553 1554 static int 1555 mlx5_alloc_priv_descs(struct ib_device *device, 1556 struct mlx5_ib_mr *mr, 1557 int ndescs, 1558 int desc_size) 1559 { 1560 int size = ndescs * desc_size; 1561 int add_size; 1562 int ret; 1563 1564 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 1565 1566 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); 1567 if (!mr->descs_alloc) 1568 return -ENOMEM; 1569 1570 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1571 1572 mr->desc_map = dma_map_single(device->dev.parent, mr->descs, 1573 size, DMA_TO_DEVICE); 1574 if (dma_mapping_error(device->dev.parent, mr->desc_map)) { 1575 ret = -ENOMEM; 1576 goto err; 1577 } 1578 1579 return 0; 1580 err: 1581 kfree(mr->descs_alloc); 1582 1583 return ret; 1584 } 1585 1586 static void 1587 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) 1588 { 1589 if (mr->descs) { 1590 struct ib_device *device = mr->ibmr.device; 1591 int size = mr->max_descs * mr->desc_size; 1592 1593 dma_unmap_single(device->dev.parent, mr->desc_map, 1594 size, DMA_TO_DEVICE); 1595 kfree(mr->descs_alloc); 1596 mr->descs = NULL; 1597 } 1598 } 1599 1600 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1601 { 1602 int allocated_from_cache = mr->allocated_from_cache; 1603 1604 if (mr->sig) { 1605 if (mlx5_core_destroy_psv(dev->mdev, 1606 mr->sig->psv_memory.psv_idx)) 1607 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1608 mr->sig->psv_memory.psv_idx); 1609 if (mlx5_core_destroy_psv(dev->mdev, 1610 mr->sig->psv_wire.psv_idx)) 1611 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1612 mr->sig->psv_wire.psv_idx); 1613 kfree(mr->sig); 1614 mr->sig = NULL; 1615 } 1616 1617 mlx5_free_priv_descs(mr); 1618 1619 if (!allocated_from_cache) 1620 destroy_mkey(dev, mr); 1621 } 1622 1623 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1624 { 1625 int npages = mr->npages; 1626 struct ib_umem *umem = mr->umem; 1627 1628 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1629 if (umem && umem->odp_data) { 1630 /* Prevent new page faults from succeeding */ 1631 mr->live = 0; 1632 /* Wait for all running page-fault handlers to finish. */ 1633 synchronize_srcu(&dev->mr_srcu); 1634 /* Destroy all page mappings */ 1635 if (umem->odp_data->page_list) 1636 mlx5_ib_invalidate_range(umem, ib_umem_start(umem), 1637 ib_umem_end(umem)); 1638 else 1639 mlx5_ib_free_implicit_mr(mr); 1640 /* 1641 * We kill the umem before the MR for ODP, 1642 * so that there will not be any invalidations in 1643 * flight, looking at the *mr struct. 1644 */ 1645 ib_umem_release(umem); 1646 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1647 1648 /* Avoid double-freeing the umem. */ 1649 umem = NULL; 1650 } 1651 #endif 1652 1653 clean_mr(dev, mr); 1654 1655 if (umem) { 1656 ib_umem_release(umem); 1657 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1658 } 1659 1660 if (!mr->allocated_from_cache) 1661 kfree(mr); 1662 else 1663 mlx5_mr_cache_free(dev, mr); 1664 } 1665 1666 int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1667 { 1668 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr)); 1669 return 0; 1670 } 1671 1672 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, 1673 enum ib_mr_type mr_type, 1674 u32 max_num_sg) 1675 { 1676 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1677 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1678 int ndescs = ALIGN(max_num_sg, 4); 1679 struct mlx5_ib_mr *mr; 1680 void *mkc; 1681 u32 *in; 1682 int err; 1683 1684 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1685 if (!mr) 1686 return ERR_PTR(-ENOMEM); 1687 1688 in = kzalloc(inlen, GFP_KERNEL); 1689 if (!in) { 1690 err = -ENOMEM; 1691 goto err_free; 1692 } 1693 1694 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1695 MLX5_SET(mkc, mkc, free, 1); 1696 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1697 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1698 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1699 1700 if (mr_type == IB_MR_TYPE_MEM_REG) { 1701 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 1702 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); 1703 err = mlx5_alloc_priv_descs(pd->device, mr, 1704 ndescs, sizeof(struct mlx5_mtt)); 1705 if (err) 1706 goto err_free_in; 1707 1708 mr->desc_size = sizeof(struct mlx5_mtt); 1709 mr->max_descs = ndescs; 1710 } else if (mr_type == IB_MR_TYPE_SG_GAPS) { 1711 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; 1712 1713 err = mlx5_alloc_priv_descs(pd->device, mr, 1714 ndescs, sizeof(struct mlx5_klm)); 1715 if (err) 1716 goto err_free_in; 1717 mr->desc_size = sizeof(struct mlx5_klm); 1718 mr->max_descs = ndescs; 1719 } else if (mr_type == IB_MR_TYPE_SIGNATURE) { 1720 u32 psv_index[2]; 1721 1722 MLX5_SET(mkc, mkc, bsf_en, 1); 1723 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); 1724 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); 1725 if (!mr->sig) { 1726 err = -ENOMEM; 1727 goto err_free_in; 1728 } 1729 1730 /* create mem & wire PSVs */ 1731 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 1732 2, psv_index); 1733 if (err) 1734 goto err_free_sig; 1735 1736 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; 1737 mr->sig->psv_memory.psv_idx = psv_index[0]; 1738 mr->sig->psv_wire.psv_idx = psv_index[1]; 1739 1740 mr->sig->sig_status_checked = true; 1741 mr->sig->sig_err_exists = false; 1742 /* Next UMR, Arm SIGERR */ 1743 ++mr->sig->sigerr_count; 1744 } else { 1745 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); 1746 err = -EINVAL; 1747 goto err_free_in; 1748 } 1749 1750 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3); 1751 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7); 1752 MLX5_SET(mkc, mkc, umr_en, 1); 1753 1754 mr->ibmr.device = pd->device; 1755 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); 1756 if (err) 1757 goto err_destroy_psv; 1758 1759 mr->mmkey.type = MLX5_MKEY_MR; 1760 mr->ibmr.lkey = mr->mmkey.key; 1761 mr->ibmr.rkey = mr->mmkey.key; 1762 mr->umem = NULL; 1763 kfree(in); 1764 1765 return &mr->ibmr; 1766 1767 err_destroy_psv: 1768 if (mr->sig) { 1769 if (mlx5_core_destroy_psv(dev->mdev, 1770 mr->sig->psv_memory.psv_idx)) 1771 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1772 mr->sig->psv_memory.psv_idx); 1773 if (mlx5_core_destroy_psv(dev->mdev, 1774 mr->sig->psv_wire.psv_idx)) 1775 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1776 mr->sig->psv_wire.psv_idx); 1777 } 1778 mlx5_free_priv_descs(mr); 1779 err_free_sig: 1780 kfree(mr->sig); 1781 err_free_in: 1782 kfree(in); 1783 err_free: 1784 kfree(mr); 1785 return ERR_PTR(err); 1786 } 1787 1788 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 1789 struct ib_udata *udata) 1790 { 1791 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1792 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1793 struct mlx5_ib_mw *mw = NULL; 1794 u32 *in = NULL; 1795 void *mkc; 1796 int ndescs; 1797 int err; 1798 struct mlx5_ib_alloc_mw req = {}; 1799 struct { 1800 __u32 comp_mask; 1801 __u32 response_length; 1802 } resp = {}; 1803 1804 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1805 if (err) 1806 return ERR_PTR(err); 1807 1808 if (req.comp_mask || req.reserved1 || req.reserved2) 1809 return ERR_PTR(-EOPNOTSUPP); 1810 1811 if (udata->inlen > sizeof(req) && 1812 !ib_is_udata_cleared(udata, sizeof(req), 1813 udata->inlen - sizeof(req))) 1814 return ERR_PTR(-EOPNOTSUPP); 1815 1816 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 1817 1818 mw = kzalloc(sizeof(*mw), GFP_KERNEL); 1819 in = kzalloc(inlen, GFP_KERNEL); 1820 if (!mw || !in) { 1821 err = -ENOMEM; 1822 goto free; 1823 } 1824 1825 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1826 1827 MLX5_SET(mkc, mkc, free, 1); 1828 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1829 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1830 MLX5_SET(mkc, mkc, umr_en, 1); 1831 MLX5_SET(mkc, mkc, lr, 1); 1832 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); 1833 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); 1834 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1835 1836 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); 1837 if (err) 1838 goto free; 1839 1840 mw->mmkey.type = MLX5_MKEY_MW; 1841 mw->ibmw.rkey = mw->mmkey.key; 1842 mw->ndescs = ndescs; 1843 1844 resp.response_length = min(offsetof(typeof(resp), response_length) + 1845 sizeof(resp.response_length), udata->outlen); 1846 if (resp.response_length) { 1847 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1848 if (err) { 1849 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); 1850 goto free; 1851 } 1852 } 1853 1854 kfree(in); 1855 return &mw->ibmw; 1856 1857 free: 1858 kfree(mw); 1859 kfree(in); 1860 return ERR_PTR(err); 1861 } 1862 1863 int mlx5_ib_dealloc_mw(struct ib_mw *mw) 1864 { 1865 struct mlx5_ib_mw *mmw = to_mmw(mw); 1866 int err; 1867 1868 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, 1869 &mmw->mmkey); 1870 if (!err) 1871 kfree(mmw); 1872 return err; 1873 } 1874 1875 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1876 struct ib_mr_status *mr_status) 1877 { 1878 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 1879 int ret = 0; 1880 1881 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { 1882 pr_err("Invalid status check mask\n"); 1883 ret = -EINVAL; 1884 goto done; 1885 } 1886 1887 mr_status->fail_status = 0; 1888 if (check_mask & IB_MR_CHECK_SIG_STATUS) { 1889 if (!mmr->sig) { 1890 ret = -EINVAL; 1891 pr_err("signature status check requested on a non-signature enabled MR\n"); 1892 goto done; 1893 } 1894 1895 mmr->sig->sig_status_checked = true; 1896 if (!mmr->sig->sig_err_exists) 1897 goto done; 1898 1899 if (ibmr->lkey == mmr->sig->err_item.key) 1900 memcpy(&mr_status->sig_err, &mmr->sig->err_item, 1901 sizeof(mr_status->sig_err)); 1902 else { 1903 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; 1904 mr_status->sig_err.sig_err_offset = 0; 1905 mr_status->sig_err.key = mmr->sig->err_item.key; 1906 } 1907 1908 mmr->sig->sig_err_exists = false; 1909 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; 1910 } 1911 1912 done: 1913 return ret; 1914 } 1915 1916 static int 1917 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, 1918 struct scatterlist *sgl, 1919 unsigned short sg_nents, 1920 unsigned int *sg_offset_p) 1921 { 1922 struct scatterlist *sg = sgl; 1923 struct mlx5_klm *klms = mr->descs; 1924 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1925 u32 lkey = mr->ibmr.pd->local_dma_lkey; 1926 int i; 1927 1928 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1929 mr->ibmr.length = 0; 1930 1931 for_each_sg(sgl, sg, sg_nents, i) { 1932 if (unlikely(i >= mr->max_descs)) 1933 break; 1934 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 1935 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 1936 klms[i].key = cpu_to_be32(lkey); 1937 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 1938 1939 sg_offset = 0; 1940 } 1941 mr->ndescs = i; 1942 1943 if (sg_offset_p) 1944 *sg_offset_p = sg_offset; 1945 1946 return i; 1947 } 1948 1949 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) 1950 { 1951 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1952 __be64 *descs; 1953 1954 if (unlikely(mr->ndescs == mr->max_descs)) 1955 return -ENOMEM; 1956 1957 descs = mr->descs; 1958 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 1959 1960 return 0; 1961 } 1962 1963 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1964 unsigned int *sg_offset) 1965 { 1966 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1967 int n; 1968 1969 mr->ndescs = 0; 1970 1971 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, 1972 mr->desc_size * mr->max_descs, 1973 DMA_TO_DEVICE); 1974 1975 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 1976 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset); 1977 else 1978 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 1979 mlx5_set_page); 1980 1981 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, 1982 mr->desc_size * mr->max_descs, 1983 DMA_TO_DEVICE); 1984 1985 return n; 1986 } 1987