1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 34 #include <linux/kref.h> 35 #include <linux/random.h> 36 #include <linux/debugfs.h> 37 #include <linux/export.h> 38 #include <linux/delay.h> 39 #include <rdma/ib_umem.h> 40 #include <rdma/ib_umem_odp.h> 41 #include <rdma/ib_verbs.h> 42 #include "mlx5_ib.h" 43 44 enum { 45 MAX_PENDING_REG_MR = 8, 46 }; 47 48 #define MLX5_UMR_ALIGN 2048 49 50 static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 51 static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 52 static int mr_cache_max_order(struct mlx5_ib_dev *dev); 53 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 54 55 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 56 { 57 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); 58 59 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 60 /* Wait until all page fault handlers using the mr complete. */ 61 synchronize_srcu(&dev->mr_srcu); 62 #endif 63 64 return err; 65 } 66 67 static int order2idx(struct mlx5_ib_dev *dev, int order) 68 { 69 struct mlx5_mr_cache *cache = &dev->cache; 70 71 if (order < cache->ent[0].order) 72 return 0; 73 else 74 return order - cache->ent[0].order; 75 } 76 77 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) 78 { 79 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= 80 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); 81 } 82 83 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 84 static void update_odp_mr(struct mlx5_ib_mr *mr) 85 { 86 if (mr->umem->odp_data) { 87 /* 88 * This barrier prevents the compiler from moving the 89 * setting of umem->odp_data->private to point to our 90 * MR, before reg_umr finished, to ensure that the MR 91 * initialization have finished before starting to 92 * handle invalidations. 93 */ 94 smp_wmb(); 95 mr->umem->odp_data->private = mr; 96 /* 97 * Make sure we will see the new 98 * umem->odp_data->private value in the invalidation 99 * routines, before we can get page faults on the 100 * MR. Page faults can happen once we put the MR in 101 * the tree, below this line. Without the barrier, 102 * there can be a fault handling and an invalidation 103 * before umem->odp_data->private == mr is visible to 104 * the invalidation handler. 105 */ 106 smp_wmb(); 107 } 108 } 109 #endif 110 111 static void reg_mr_callback(int status, void *context) 112 { 113 struct mlx5_ib_mr *mr = context; 114 struct mlx5_ib_dev *dev = mr->dev; 115 struct mlx5_mr_cache *cache = &dev->cache; 116 int c = order2idx(dev, mr->order); 117 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 u8 key; 119 unsigned long flags; 120 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; 121 int err; 122 123 spin_lock_irqsave(&ent->lock, flags); 124 ent->pending--; 125 spin_unlock_irqrestore(&ent->lock, flags); 126 if (status) { 127 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); 128 kfree(mr); 129 dev->fill_delay = 1; 130 mod_timer(&dev->delay_timer, jiffies + HZ); 131 return; 132 } 133 134 mr->mmkey.type = MLX5_MKEY_MR; 135 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); 136 key = dev->mdev->priv.mkey_key++; 137 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); 138 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key; 139 140 cache->last_add = jiffies; 141 142 spin_lock_irqsave(&ent->lock, flags); 143 list_add_tail(&mr->list, &ent->head); 144 ent->cur++; 145 ent->size++; 146 spin_unlock_irqrestore(&ent->lock, flags); 147 148 write_lock_irqsave(&table->lock, flags); 149 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), 150 &mr->mmkey); 151 if (err) 152 pr_err("Error inserting to mkey tree. 0x%x\n", -err); 153 write_unlock_irqrestore(&table->lock, flags); 154 155 if (!completion_done(&ent->compl)) 156 complete(&ent->compl); 157 } 158 159 static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 160 { 161 struct mlx5_mr_cache *cache = &dev->cache; 162 struct mlx5_cache_ent *ent = &cache->ent[c]; 163 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 164 struct mlx5_ib_mr *mr; 165 void *mkc; 166 u32 *in; 167 int err = 0; 168 int i; 169 170 in = kzalloc(inlen, GFP_KERNEL); 171 if (!in) 172 return -ENOMEM; 173 174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 175 for (i = 0; i < num; i++) { 176 if (ent->pending >= MAX_PENDING_REG_MR) { 177 err = -EAGAIN; 178 break; 179 } 180 181 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 182 if (!mr) { 183 err = -ENOMEM; 184 break; 185 } 186 mr->order = ent->order; 187 mr->allocated_from_cache = 1; 188 mr->dev = dev; 189 190 MLX5_SET(mkc, mkc, free, 1); 191 MLX5_SET(mkc, mkc, umr_en, 1); 192 MLX5_SET(mkc, mkc, access_mode, ent->access_mode); 193 194 MLX5_SET(mkc, mkc, qpn, 0xffffff); 195 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); 196 MLX5_SET(mkc, mkc, log_page_size, ent->page); 197 198 spin_lock_irq(&ent->lock); 199 ent->pending++; 200 spin_unlock_irq(&ent->lock); 201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey, 202 in, inlen, 203 mr->out, sizeof(mr->out), 204 reg_mr_callback, mr); 205 if (err) { 206 spin_lock_irq(&ent->lock); 207 ent->pending--; 208 spin_unlock_irq(&ent->lock); 209 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 210 kfree(mr); 211 break; 212 } 213 } 214 215 kfree(in); 216 return err; 217 } 218 219 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 220 { 221 struct mlx5_mr_cache *cache = &dev->cache; 222 struct mlx5_cache_ent *ent = &cache->ent[c]; 223 struct mlx5_ib_mr *mr; 224 int err; 225 int i; 226 227 for (i = 0; i < num; i++) { 228 spin_lock_irq(&ent->lock); 229 if (list_empty(&ent->head)) { 230 spin_unlock_irq(&ent->lock); 231 return; 232 } 233 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 234 list_del(&mr->list); 235 ent->cur--; 236 ent->size--; 237 spin_unlock_irq(&ent->lock); 238 err = destroy_mkey(dev, mr); 239 if (err) 240 mlx5_ib_warn(dev, "failed destroy mkey\n"); 241 else 242 kfree(mr); 243 } 244 } 245 246 static ssize_t size_write(struct file *filp, const char __user *buf, 247 size_t count, loff_t *pos) 248 { 249 struct mlx5_cache_ent *ent = filp->private_data; 250 struct mlx5_ib_dev *dev = ent->dev; 251 char lbuf[20]; 252 u32 var; 253 int err; 254 int c; 255 256 if (copy_from_user(lbuf, buf, sizeof(lbuf))) 257 return -EFAULT; 258 259 c = order2idx(dev, ent->order); 260 lbuf[sizeof(lbuf) - 1] = 0; 261 262 if (sscanf(lbuf, "%u", &var) != 1) 263 return -EINVAL; 264 265 if (var < ent->limit) 266 return -EINVAL; 267 268 if (var > ent->size) { 269 do { 270 err = add_keys(dev, c, var - ent->size); 271 if (err && err != -EAGAIN) 272 return err; 273 274 usleep_range(3000, 5000); 275 } while (err); 276 } else if (var < ent->size) { 277 remove_keys(dev, c, ent->size - var); 278 } 279 280 return count; 281 } 282 283 static ssize_t size_read(struct file *filp, char __user *buf, size_t count, 284 loff_t *pos) 285 { 286 struct mlx5_cache_ent *ent = filp->private_data; 287 char lbuf[20]; 288 int err; 289 290 if (*pos) 291 return 0; 292 293 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); 294 if (err < 0) 295 return err; 296 297 if (copy_to_user(buf, lbuf, err)) 298 return -EFAULT; 299 300 *pos += err; 301 302 return err; 303 } 304 305 static const struct file_operations size_fops = { 306 .owner = THIS_MODULE, 307 .open = simple_open, 308 .write = size_write, 309 .read = size_read, 310 }; 311 312 static ssize_t limit_write(struct file *filp, const char __user *buf, 313 size_t count, loff_t *pos) 314 { 315 struct mlx5_cache_ent *ent = filp->private_data; 316 struct mlx5_ib_dev *dev = ent->dev; 317 char lbuf[20]; 318 u32 var; 319 int err; 320 int c; 321 322 if (copy_from_user(lbuf, buf, sizeof(lbuf))) 323 return -EFAULT; 324 325 c = order2idx(dev, ent->order); 326 lbuf[sizeof(lbuf) - 1] = 0; 327 328 if (sscanf(lbuf, "%u", &var) != 1) 329 return -EINVAL; 330 331 if (var > ent->size) 332 return -EINVAL; 333 334 ent->limit = var; 335 336 if (ent->cur < ent->limit) { 337 err = add_keys(dev, c, 2 * ent->limit - ent->cur); 338 if (err) 339 return err; 340 } 341 342 return count; 343 } 344 345 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, 346 loff_t *pos) 347 { 348 struct mlx5_cache_ent *ent = filp->private_data; 349 char lbuf[20]; 350 int err; 351 352 if (*pos) 353 return 0; 354 355 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); 356 if (err < 0) 357 return err; 358 359 if (copy_to_user(buf, lbuf, err)) 360 return -EFAULT; 361 362 *pos += err; 363 364 return err; 365 } 366 367 static const struct file_operations limit_fops = { 368 .owner = THIS_MODULE, 369 .open = simple_open, 370 .write = limit_write, 371 .read = limit_read, 372 }; 373 374 static int someone_adding(struct mlx5_mr_cache *cache) 375 { 376 int i; 377 378 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 379 if (cache->ent[i].cur < cache->ent[i].limit) 380 return 1; 381 } 382 383 return 0; 384 } 385 386 static void __cache_work_func(struct mlx5_cache_ent *ent) 387 { 388 struct mlx5_ib_dev *dev = ent->dev; 389 struct mlx5_mr_cache *cache = &dev->cache; 390 int i = order2idx(dev, ent->order); 391 int err; 392 393 if (cache->stopped) 394 return; 395 396 ent = &dev->cache.ent[i]; 397 if (ent->cur < 2 * ent->limit && !dev->fill_delay) { 398 err = add_keys(dev, i, 1); 399 if (ent->cur < 2 * ent->limit) { 400 if (err == -EAGAIN) { 401 mlx5_ib_dbg(dev, "returned eagain, order %d\n", 402 i + 2); 403 queue_delayed_work(cache->wq, &ent->dwork, 404 msecs_to_jiffies(3)); 405 } else if (err) { 406 mlx5_ib_warn(dev, "command failed order %d, err %d\n", 407 i + 2, err); 408 queue_delayed_work(cache->wq, &ent->dwork, 409 msecs_to_jiffies(1000)); 410 } else { 411 queue_work(cache->wq, &ent->work); 412 } 413 } 414 } else if (ent->cur > 2 * ent->limit) { 415 /* 416 * The remove_keys() logic is performed as garbage collection 417 * task. Such task is intended to be run when no other active 418 * processes are running. 419 * 420 * The need_resched() will return TRUE if there are user tasks 421 * to be activated in near future. 422 * 423 * In such case, we don't execute remove_keys() and postpone 424 * the garbage collection work to try to run in next cycle, 425 * in order to free CPU resources to other tasks. 426 */ 427 if (!need_resched() && !someone_adding(cache) && 428 time_after(jiffies, cache->last_add + 300 * HZ)) { 429 remove_keys(dev, i, 1); 430 if (ent->cur > ent->limit) 431 queue_work(cache->wq, &ent->work); 432 } else { 433 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); 434 } 435 } 436 } 437 438 static void delayed_cache_work_func(struct work_struct *work) 439 { 440 struct mlx5_cache_ent *ent; 441 442 ent = container_of(work, struct mlx5_cache_ent, dwork.work); 443 __cache_work_func(ent); 444 } 445 446 static void cache_work_func(struct work_struct *work) 447 { 448 struct mlx5_cache_ent *ent; 449 450 ent = container_of(work, struct mlx5_cache_ent, work); 451 __cache_work_func(ent); 452 } 453 454 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) 455 { 456 struct mlx5_mr_cache *cache = &dev->cache; 457 struct mlx5_cache_ent *ent; 458 struct mlx5_ib_mr *mr; 459 int err; 460 461 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { 462 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); 463 return NULL; 464 } 465 466 ent = &cache->ent[entry]; 467 while (1) { 468 spin_lock_irq(&ent->lock); 469 if (list_empty(&ent->head)) { 470 spin_unlock_irq(&ent->lock); 471 472 err = add_keys(dev, entry, 1); 473 if (err && err != -EAGAIN) 474 return ERR_PTR(err); 475 476 wait_for_completion(&ent->compl); 477 } else { 478 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 479 list); 480 list_del(&mr->list); 481 ent->cur--; 482 spin_unlock_irq(&ent->lock); 483 if (ent->cur < ent->limit) 484 queue_work(cache->wq, &ent->work); 485 return mr; 486 } 487 } 488 } 489 490 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) 491 { 492 struct mlx5_mr_cache *cache = &dev->cache; 493 struct mlx5_ib_mr *mr = NULL; 494 struct mlx5_cache_ent *ent; 495 int last_umr_cache_entry; 496 int c; 497 int i; 498 499 c = order2idx(dev, order); 500 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); 501 if (c < 0 || c > last_umr_cache_entry) { 502 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); 503 return NULL; 504 } 505 506 for (i = c; i <= last_umr_cache_entry; i++) { 507 ent = &cache->ent[i]; 508 509 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); 510 511 spin_lock_irq(&ent->lock); 512 if (!list_empty(&ent->head)) { 513 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, 514 list); 515 list_del(&mr->list); 516 ent->cur--; 517 spin_unlock_irq(&ent->lock); 518 if (ent->cur < ent->limit) 519 queue_work(cache->wq, &ent->work); 520 break; 521 } 522 spin_unlock_irq(&ent->lock); 523 524 queue_work(cache->wq, &ent->work); 525 } 526 527 if (!mr) 528 cache->ent[c].miss++; 529 530 return mr; 531 } 532 533 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 534 { 535 struct mlx5_mr_cache *cache = &dev->cache; 536 struct mlx5_cache_ent *ent; 537 int shrink = 0; 538 int c; 539 540 c = order2idx(dev, mr->order); 541 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { 542 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); 543 return; 544 } 545 546 if (unreg_umr(dev, mr)) 547 return; 548 549 ent = &cache->ent[c]; 550 spin_lock_irq(&ent->lock); 551 list_add_tail(&mr->list, &ent->head); 552 ent->cur++; 553 if (ent->cur > 2 * ent->limit) 554 shrink = 1; 555 spin_unlock_irq(&ent->lock); 556 557 if (shrink) 558 queue_work(cache->wq, &ent->work); 559 } 560 561 static void clean_keys(struct mlx5_ib_dev *dev, int c) 562 { 563 struct mlx5_mr_cache *cache = &dev->cache; 564 struct mlx5_cache_ent *ent = &cache->ent[c]; 565 struct mlx5_ib_mr *mr; 566 int err; 567 568 cancel_delayed_work(&ent->dwork); 569 while (1) { 570 spin_lock_irq(&ent->lock); 571 if (list_empty(&ent->head)) { 572 spin_unlock_irq(&ent->lock); 573 return; 574 } 575 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); 576 list_del(&mr->list); 577 ent->cur--; 578 ent->size--; 579 spin_unlock_irq(&ent->lock); 580 err = destroy_mkey(dev, mr); 581 if (err) 582 mlx5_ib_warn(dev, "failed destroy mkey\n"); 583 else 584 kfree(mr); 585 } 586 } 587 588 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) 589 { 590 if (!mlx5_debugfs_root) 591 return; 592 593 debugfs_remove_recursive(dev->cache.root); 594 dev->cache.root = NULL; 595 } 596 597 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 598 { 599 struct mlx5_mr_cache *cache = &dev->cache; 600 struct mlx5_cache_ent *ent; 601 int i; 602 603 if (!mlx5_debugfs_root) 604 return 0; 605 606 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); 607 if (!cache->root) 608 return -ENOMEM; 609 610 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 611 ent = &cache->ent[i]; 612 sprintf(ent->name, "%d", ent->order); 613 ent->dir = debugfs_create_dir(ent->name, cache->root); 614 if (!ent->dir) 615 goto err; 616 617 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, 618 &size_fops); 619 if (!ent->fsize) 620 goto err; 621 622 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, 623 &limit_fops); 624 if (!ent->flimit) 625 goto err; 626 627 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, 628 &ent->cur); 629 if (!ent->fcur) 630 goto err; 631 632 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, 633 &ent->miss); 634 if (!ent->fmiss) 635 goto err; 636 } 637 638 return 0; 639 err: 640 mlx5_mr_cache_debugfs_cleanup(dev); 641 642 return -ENOMEM; 643 } 644 645 static void delay_time_func(struct timer_list *t) 646 { 647 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer); 648 649 dev->fill_delay = 0; 650 } 651 652 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) 653 { 654 struct mlx5_mr_cache *cache = &dev->cache; 655 struct mlx5_cache_ent *ent; 656 int err; 657 int i; 658 659 mutex_init(&dev->slow_path_mutex); 660 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 661 if (!cache->wq) { 662 mlx5_ib_warn(dev, "failed to create work queue\n"); 663 return -ENOMEM; 664 } 665 666 timer_setup(&dev->delay_timer, delay_time_func, 0); 667 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 668 ent = &cache->ent[i]; 669 INIT_LIST_HEAD(&ent->head); 670 spin_lock_init(&ent->lock); 671 ent->order = i + 2; 672 ent->dev = dev; 673 ent->limit = 0; 674 675 init_completion(&ent->compl); 676 INIT_WORK(&ent->work, cache_work_func); 677 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); 678 queue_work(cache->wq, &ent->work); 679 680 if (i > MR_CACHE_LAST_STD_ENTRY) { 681 mlx5_odp_init_mr_cache_entry(ent); 682 continue; 683 } 684 685 if (ent->order > mr_cache_max_order(dev)) 686 continue; 687 688 ent->page = PAGE_SHIFT; 689 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / 690 MLX5_IB_UMR_OCTOWORD; 691 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 692 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && 693 mlx5_core_is_pf(dev->mdev)) 694 ent->limit = dev->mdev->profile->mr_cache[i].limit; 695 else 696 ent->limit = 0; 697 } 698 699 err = mlx5_mr_cache_debugfs_init(dev); 700 if (err) 701 mlx5_ib_warn(dev, "cache debugfs failure\n"); 702 703 /* 704 * We don't want to fail driver if debugfs failed to initialize, 705 * so we are not forwarding error to the user. 706 */ 707 708 return 0; 709 } 710 711 static void wait_for_async_commands(struct mlx5_ib_dev *dev) 712 { 713 struct mlx5_mr_cache *cache = &dev->cache; 714 struct mlx5_cache_ent *ent; 715 int total = 0; 716 int i; 717 int j; 718 719 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 720 ent = &cache->ent[i]; 721 for (j = 0 ; j < 1000; j++) { 722 if (!ent->pending) 723 break; 724 msleep(50); 725 } 726 } 727 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { 728 ent = &cache->ent[i]; 729 total += ent->pending; 730 } 731 732 if (total) 733 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); 734 else 735 mlx5_ib_warn(dev, "done with all pending requests\n"); 736 } 737 738 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) 739 { 740 int i; 741 742 dev->cache.stopped = 1; 743 flush_workqueue(dev->cache.wq); 744 745 mlx5_mr_cache_debugfs_cleanup(dev); 746 747 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 748 clean_keys(dev, i); 749 750 destroy_workqueue(dev->cache.wq); 751 wait_for_async_commands(dev); 752 del_timer_sync(&dev->delay_timer); 753 754 return 0; 755 } 756 757 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 758 { 759 struct mlx5_ib_dev *dev = to_mdev(pd->device); 760 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 761 struct mlx5_core_dev *mdev = dev->mdev; 762 struct mlx5_ib_mr *mr; 763 void *mkc; 764 u32 *in; 765 int err; 766 767 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 768 if (!mr) 769 return ERR_PTR(-ENOMEM); 770 771 in = kzalloc(inlen, GFP_KERNEL); 772 if (!in) { 773 err = -ENOMEM; 774 goto err_free; 775 } 776 777 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 778 779 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); 780 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); 781 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); 782 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); 783 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); 784 MLX5_SET(mkc, mkc, lr, 1); 785 786 MLX5_SET(mkc, mkc, length64, 1); 787 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 788 MLX5_SET(mkc, mkc, qpn, 0xffffff); 789 MLX5_SET64(mkc, mkc, start_addr, 0); 790 791 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); 792 if (err) 793 goto err_in; 794 795 kfree(in); 796 mr->mmkey.type = MLX5_MKEY_MR; 797 mr->ibmr.lkey = mr->mmkey.key; 798 mr->ibmr.rkey = mr->mmkey.key; 799 mr->umem = NULL; 800 801 return &mr->ibmr; 802 803 err_in: 804 kfree(in); 805 806 err_free: 807 kfree(mr); 808 809 return ERR_PTR(err); 810 } 811 812 static int get_octo_len(u64 addr, u64 len, int page_shift) 813 { 814 u64 page_size = 1ULL << page_shift; 815 u64 offset; 816 int npages; 817 818 offset = addr & (page_size - 1); 819 npages = ALIGN(len + offset, page_size) >> page_shift; 820 return (npages + 1) / 2; 821 } 822 823 static int mr_cache_max_order(struct mlx5_ib_dev *dev) 824 { 825 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 826 return MR_CACHE_LAST_STD_ENTRY + 2; 827 return MLX5_MAX_UMR_SHIFT; 828 } 829 830 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, 831 int access_flags, struct ib_umem **umem, 832 int *npages, int *page_shift, int *ncont, 833 int *order) 834 { 835 struct mlx5_ib_dev *dev = to_mdev(pd->device); 836 int err; 837 838 *umem = ib_umem_get(pd->uobject->context, start, length, 839 access_flags, 0); 840 err = PTR_ERR_OR_ZERO(*umem); 841 if (err < 0) { 842 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 843 return err; 844 } 845 846 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, 847 page_shift, ncont, order); 848 if (!*npages) { 849 mlx5_ib_warn(dev, "avoid zero region\n"); 850 ib_umem_release(*umem); 851 return -EINVAL; 852 } 853 854 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", 855 *npages, *ncont, *order, *page_shift); 856 857 return 0; 858 } 859 860 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) 861 { 862 struct mlx5_ib_umr_context *context = 863 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); 864 865 context->status = wc->status; 866 complete(&context->done); 867 } 868 869 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) 870 { 871 context->cqe.done = mlx5_ib_umr_done; 872 context->status = -1; 873 init_completion(&context->done); 874 } 875 876 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, 877 struct mlx5_umr_wr *umrwr) 878 { 879 struct umr_common *umrc = &dev->umrc; 880 struct ib_send_wr *bad; 881 int err; 882 struct mlx5_ib_umr_context umr_context; 883 884 mlx5_ib_init_umr_context(&umr_context); 885 umrwr->wr.wr_cqe = &umr_context.cqe; 886 887 down(&umrc->sem); 888 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); 889 if (err) { 890 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); 891 } else { 892 wait_for_completion(&umr_context.done); 893 if (umr_context.status != IB_WC_SUCCESS) { 894 mlx5_ib_warn(dev, "reg umr failed (%u)\n", 895 umr_context.status); 896 err = -EFAULT; 897 } 898 } 899 up(&umrc->sem); 900 return err; 901 } 902 903 static struct mlx5_ib_mr *alloc_mr_from_cache( 904 struct ib_pd *pd, struct ib_umem *umem, 905 u64 virt_addr, u64 len, int npages, 906 int page_shift, int order, int access_flags) 907 { 908 struct mlx5_ib_dev *dev = to_mdev(pd->device); 909 struct mlx5_ib_mr *mr; 910 int err = 0; 911 int i; 912 913 for (i = 0; i < 1; i++) { 914 mr = alloc_cached_mr(dev, order); 915 if (mr) 916 break; 917 918 err = add_keys(dev, order2idx(dev, order), 1); 919 if (err && err != -EAGAIN) { 920 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err); 921 break; 922 } 923 } 924 925 if (!mr) 926 return ERR_PTR(-EAGAIN); 927 928 mr->ibmr.pd = pd; 929 mr->umem = umem; 930 mr->access_flags = access_flags; 931 mr->desc_size = sizeof(struct mlx5_mtt); 932 mr->mmkey.iova = virt_addr; 933 mr->mmkey.size = len; 934 mr->mmkey.pd = to_mpd(pd)->pdn; 935 936 return mr; 937 } 938 939 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, 940 void *xlt, int page_shift, size_t size, 941 int flags) 942 { 943 struct mlx5_ib_dev *dev = mr->dev; 944 struct ib_umem *umem = mr->umem; 945 if (flags & MLX5_IB_UPD_XLT_INDIRECT) { 946 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags); 947 return npages; 948 } 949 950 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); 951 952 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { 953 __mlx5_ib_populate_pas(dev, umem, page_shift, 954 idx, npages, xlt, 955 MLX5_IB_MTT_PRESENT); 956 /* Clear padding after the pages 957 * brought from the umem. 958 */ 959 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, 960 size - npages * sizeof(struct mlx5_mtt)); 961 } 962 963 return npages; 964 } 965 966 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ 967 MLX5_UMR_MTT_ALIGNMENT) 968 #define MLX5_SPARE_UMR_CHUNK 0x10000 969 970 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, 971 int page_shift, int flags) 972 { 973 struct mlx5_ib_dev *dev = mr->dev; 974 struct device *ddev = dev->ib_dev.dev.parent; 975 struct mlx5_ib_ucontext *uctx = NULL; 976 int size; 977 void *xlt; 978 dma_addr_t dma; 979 struct mlx5_umr_wr wr; 980 struct ib_sge sg; 981 int err = 0; 982 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) 983 ? sizeof(struct mlx5_klm) 984 : sizeof(struct mlx5_mtt); 985 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; 986 const int page_mask = page_align - 1; 987 size_t pages_mapped = 0; 988 size_t pages_to_map = 0; 989 size_t pages_iter = 0; 990 gfp_t gfp; 991 992 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, 993 * so we need to align the offset and length accordingly 994 */ 995 if (idx & page_mask) { 996 npages += idx & page_mask; 997 idx &= ~page_mask; 998 } 999 1000 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL; 1001 gfp |= __GFP_ZERO | __GFP_NOWARN; 1002 1003 pages_to_map = ALIGN(npages, page_align); 1004 size = desc_size * pages_to_map; 1005 size = min_t(int, size, MLX5_MAX_UMR_CHUNK); 1006 1007 xlt = (void *)__get_free_pages(gfp, get_order(size)); 1008 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) { 1009 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n", 1010 size, get_order(size), MLX5_SPARE_UMR_CHUNK); 1011 1012 size = MLX5_SPARE_UMR_CHUNK; 1013 xlt = (void *)__get_free_pages(gfp, get_order(size)); 1014 } 1015 1016 if (!xlt) { 1017 uctx = to_mucontext(mr->ibmr.pd->uobject->context); 1018 mlx5_ib_warn(dev, "Using XLT emergency buffer\n"); 1019 size = PAGE_SIZE; 1020 xlt = (void *)uctx->upd_xlt_page; 1021 mutex_lock(&uctx->upd_xlt_page_mutex); 1022 memset(xlt, 0, size); 1023 } 1024 pages_iter = size / desc_size; 1025 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE); 1026 if (dma_mapping_error(ddev, dma)) { 1027 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n"); 1028 err = -ENOMEM; 1029 goto free_xlt; 1030 } 1031 1032 sg.addr = dma; 1033 sg.lkey = dev->umrc.pd->local_dma_lkey; 1034 1035 memset(&wr, 0, sizeof(wr)); 1036 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; 1037 if (!(flags & MLX5_IB_UPD_XLT_ENABLE)) 1038 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1039 wr.wr.sg_list = &sg; 1040 wr.wr.num_sge = 1; 1041 wr.wr.opcode = MLX5_IB_WR_UMR; 1042 1043 wr.pd = mr->ibmr.pd; 1044 wr.mkey = mr->mmkey.key; 1045 wr.length = mr->mmkey.size; 1046 wr.virt_addr = mr->mmkey.iova; 1047 wr.access_flags = mr->access_flags; 1048 wr.page_shift = page_shift; 1049 1050 for (pages_mapped = 0; 1051 pages_mapped < pages_to_map && !err; 1052 pages_mapped += pages_iter, idx += pages_iter) { 1053 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); 1054 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); 1055 npages = populate_xlt(mr, idx, npages, xlt, 1056 page_shift, size, flags); 1057 1058 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); 1059 1060 sg.length = ALIGN(npages * desc_size, 1061 MLX5_UMR_MTT_ALIGNMENT); 1062 1063 if (pages_mapped + pages_iter >= pages_to_map) { 1064 if (flags & MLX5_IB_UPD_XLT_ENABLE) 1065 wr.wr.send_flags |= 1066 MLX5_IB_SEND_UMR_ENABLE_MR | 1067 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS | 1068 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1069 if (flags & MLX5_IB_UPD_XLT_PD || 1070 flags & MLX5_IB_UPD_XLT_ACCESS) 1071 wr.wr.send_flags |= 1072 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1073 if (flags & MLX5_IB_UPD_XLT_ADDR) 1074 wr.wr.send_flags |= 1075 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; 1076 } 1077 1078 wr.offset = idx * desc_size; 1079 wr.xlt_size = sg.length; 1080 1081 err = mlx5_ib_post_send_wait(dev, &wr); 1082 } 1083 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); 1084 1085 free_xlt: 1086 if (uctx) 1087 mutex_unlock(&uctx->upd_xlt_page_mutex); 1088 else 1089 free_pages((unsigned long)xlt, get_order(size)); 1090 1091 return err; 1092 } 1093 1094 /* 1095 * If ibmr is NULL it will be allocated by reg_create. 1096 * Else, the given ibmr will be used. 1097 */ 1098 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, 1099 u64 virt_addr, u64 length, 1100 struct ib_umem *umem, int npages, 1101 int page_shift, int access_flags, 1102 bool populate) 1103 { 1104 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1105 struct mlx5_ib_mr *mr; 1106 __be64 *pas; 1107 void *mkc; 1108 int inlen; 1109 u32 *in; 1110 int err; 1111 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); 1112 1113 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); 1114 if (!mr) 1115 return ERR_PTR(-ENOMEM); 1116 1117 mr->ibmr.pd = pd; 1118 mr->access_flags = access_flags; 1119 1120 inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1121 if (populate) 1122 inlen += sizeof(*pas) * roundup(npages, 2); 1123 in = kvzalloc(inlen, GFP_KERNEL); 1124 if (!in) { 1125 err = -ENOMEM; 1126 goto err_1; 1127 } 1128 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 1129 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND)) 1130 mlx5_ib_populate_pas(dev, umem, page_shift, pas, 1131 pg_cap ? MLX5_IB_MTT_PRESENT : 0); 1132 1133 /* The pg_access bit allows setting the access flags 1134 * in the page list submitted with the command. */ 1135 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); 1136 1137 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1138 MLX5_SET(mkc, mkc, free, !populate); 1139 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); 1140 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); 1141 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); 1142 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); 1143 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); 1144 MLX5_SET(mkc, mkc, lr, 1); 1145 MLX5_SET(mkc, mkc, umr_en, 1); 1146 1147 MLX5_SET64(mkc, mkc, start_addr, virt_addr); 1148 MLX5_SET64(mkc, mkc, len, length); 1149 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1150 MLX5_SET(mkc, mkc, bsf_octword_size, 0); 1151 MLX5_SET(mkc, mkc, translations_octword_size, 1152 get_octo_len(virt_addr, length, page_shift)); 1153 MLX5_SET(mkc, mkc, log_page_size, page_shift); 1154 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1155 if (populate) { 1156 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 1157 get_octo_len(virt_addr, length, page_shift)); 1158 } 1159 1160 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); 1161 if (err) { 1162 mlx5_ib_warn(dev, "create mkey failed\n"); 1163 goto err_2; 1164 } 1165 mr->mmkey.type = MLX5_MKEY_MR; 1166 mr->desc_size = sizeof(struct mlx5_mtt); 1167 mr->dev = dev; 1168 kvfree(in); 1169 1170 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); 1171 1172 return mr; 1173 1174 err_2: 1175 kvfree(in); 1176 1177 err_1: 1178 if (!ibmr) 1179 kfree(mr); 1180 1181 return ERR_PTR(err); 1182 } 1183 1184 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, 1185 int npages, u64 length, int access_flags) 1186 { 1187 mr->npages = npages; 1188 atomic_add(npages, &dev->mdev->priv.reg_pages); 1189 mr->ibmr.lkey = mr->mmkey.key; 1190 mr->ibmr.rkey = mr->mmkey.key; 1191 mr->ibmr.length = length; 1192 mr->access_flags = access_flags; 1193 } 1194 1195 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1196 u64 virt_addr, int access_flags, 1197 struct ib_udata *udata) 1198 { 1199 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1200 struct mlx5_ib_mr *mr = NULL; 1201 struct ib_umem *umem; 1202 int page_shift; 1203 int npages; 1204 int ncont; 1205 int order; 1206 int err; 1207 bool use_umr = true; 1208 1209 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) 1210 return ERR_PTR(-EINVAL); 1211 1212 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", 1213 start, virt_addr, length, access_flags); 1214 1215 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1216 if (!start && length == U64_MAX) { 1217 if (!(access_flags & IB_ACCESS_ON_DEMAND) || 1218 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) 1219 return ERR_PTR(-EINVAL); 1220 1221 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); 1222 return &mr->ibmr; 1223 } 1224 #endif 1225 1226 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, 1227 &page_shift, &ncont, &order); 1228 1229 if (err < 0) 1230 return ERR_PTR(err); 1231 1232 if (order <= mr_cache_max_order(dev)) { 1233 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1234 page_shift, order, access_flags); 1235 if (PTR_ERR(mr) == -EAGAIN) { 1236 mlx5_ib_dbg(dev, "cache empty for order %d\n", order); 1237 mr = NULL; 1238 } 1239 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { 1240 if (access_flags & IB_ACCESS_ON_DEMAND) { 1241 err = -EINVAL; 1242 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); 1243 goto error; 1244 } 1245 use_umr = false; 1246 } 1247 1248 if (!mr) { 1249 mutex_lock(&dev->slow_path_mutex); 1250 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1251 page_shift, access_flags, !use_umr); 1252 mutex_unlock(&dev->slow_path_mutex); 1253 } 1254 1255 if (IS_ERR(mr)) { 1256 err = PTR_ERR(mr); 1257 goto error; 1258 } 1259 1260 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); 1261 1262 mr->umem = umem; 1263 set_mr_fileds(dev, mr, npages, length, access_flags); 1264 1265 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1266 update_odp_mr(mr); 1267 #endif 1268 1269 if (use_umr) { 1270 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; 1271 1272 if (access_flags & IB_ACCESS_ON_DEMAND) 1273 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; 1274 1275 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, 1276 update_xlt_flags); 1277 1278 if (err) { 1279 dereg_mr(dev, mr); 1280 return ERR_PTR(err); 1281 } 1282 } 1283 1284 mr->live = 1; 1285 return &mr->ibmr; 1286 error: 1287 ib_umem_release(umem); 1288 return ERR_PTR(err); 1289 } 1290 1291 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1292 { 1293 struct mlx5_core_dev *mdev = dev->mdev; 1294 struct mlx5_umr_wr umrwr = {}; 1295 1296 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 1297 return 0; 1298 1299 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | 1300 MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1301 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1302 umrwr.mkey = mr->mmkey.key; 1303 1304 return mlx5_ib_post_send_wait(dev, &umrwr); 1305 } 1306 1307 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, 1308 int access_flags, int flags) 1309 { 1310 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1311 struct mlx5_umr_wr umrwr = {}; 1312 int err; 1313 1314 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1315 1316 umrwr.wr.opcode = MLX5_IB_WR_UMR; 1317 umrwr.mkey = mr->mmkey.key; 1318 1319 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) { 1320 umrwr.pd = pd; 1321 umrwr.access_flags = access_flags; 1322 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; 1323 } 1324 1325 err = mlx5_ib_post_send_wait(dev, &umrwr); 1326 1327 return err; 1328 } 1329 1330 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, 1331 u64 length, u64 virt_addr, int new_access_flags, 1332 struct ib_pd *new_pd, struct ib_udata *udata) 1333 { 1334 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); 1335 struct mlx5_ib_mr *mr = to_mmr(ib_mr); 1336 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; 1337 int access_flags = flags & IB_MR_REREG_ACCESS ? 1338 new_access_flags : 1339 mr->access_flags; 1340 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; 1341 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; 1342 int page_shift = 0; 1343 int upd_flags = 0; 1344 int npages = 0; 1345 int ncont = 0; 1346 int order = 0; 1347 int err; 1348 1349 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", 1350 start, virt_addr, length, access_flags); 1351 1352 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); 1353 1354 if (flags != IB_MR_REREG_PD) { 1355 /* 1356 * Replace umem. This needs to be done whether or not UMR is 1357 * used. 1358 */ 1359 flags |= IB_MR_REREG_TRANS; 1360 ib_umem_release(mr->umem); 1361 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, 1362 &npages, &page_shift, &ncont, &order); 1363 if (err < 0) { 1364 clean_mr(dev, mr); 1365 return err; 1366 } 1367 } 1368 1369 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1370 /* 1371 * UMR can't be used - MKey needs to be replaced. 1372 */ 1373 if (mr->allocated_from_cache) { 1374 err = unreg_umr(dev, mr); 1375 if (err) 1376 mlx5_ib_warn(dev, "Failed to unregister MR\n"); 1377 } else { 1378 err = destroy_mkey(dev, mr); 1379 if (err) 1380 mlx5_ib_warn(dev, "Failed to destroy MKey\n"); 1381 } 1382 if (err) 1383 return err; 1384 1385 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, 1386 page_shift, access_flags, true); 1387 1388 if (IS_ERR(mr)) 1389 return PTR_ERR(mr); 1390 1391 mr->allocated_from_cache = 0; 1392 mr->live = 1; 1393 } else { 1394 /* 1395 * Send a UMR WQE 1396 */ 1397 mr->ibmr.pd = pd; 1398 mr->access_flags = access_flags; 1399 mr->mmkey.iova = addr; 1400 mr->mmkey.size = len; 1401 mr->mmkey.pd = to_mpd(pd)->pdn; 1402 1403 if (flags & IB_MR_REREG_TRANS) { 1404 upd_flags = MLX5_IB_UPD_XLT_ADDR; 1405 if (flags & IB_MR_REREG_PD) 1406 upd_flags |= MLX5_IB_UPD_XLT_PD; 1407 if (flags & IB_MR_REREG_ACCESS) 1408 upd_flags |= MLX5_IB_UPD_XLT_ACCESS; 1409 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, 1410 upd_flags); 1411 } else { 1412 err = rereg_umr(pd, mr, access_flags, flags); 1413 } 1414 1415 if (err) { 1416 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1417 ib_umem_release(mr->umem); 1418 clean_mr(dev, mr); 1419 return err; 1420 } 1421 } 1422 1423 set_mr_fileds(dev, mr, npages, len, access_flags); 1424 1425 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1426 update_odp_mr(mr); 1427 #endif 1428 return 0; 1429 } 1430 1431 static int 1432 mlx5_alloc_priv_descs(struct ib_device *device, 1433 struct mlx5_ib_mr *mr, 1434 int ndescs, 1435 int desc_size) 1436 { 1437 int size = ndescs * desc_size; 1438 int add_size; 1439 int ret; 1440 1441 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 1442 1443 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); 1444 if (!mr->descs_alloc) 1445 return -ENOMEM; 1446 1447 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1448 1449 mr->desc_map = dma_map_single(device->dev.parent, mr->descs, 1450 size, DMA_TO_DEVICE); 1451 if (dma_mapping_error(device->dev.parent, mr->desc_map)) { 1452 ret = -ENOMEM; 1453 goto err; 1454 } 1455 1456 return 0; 1457 err: 1458 kfree(mr->descs_alloc); 1459 1460 return ret; 1461 } 1462 1463 static void 1464 mlx5_free_priv_descs(struct mlx5_ib_mr *mr) 1465 { 1466 if (mr->descs) { 1467 struct ib_device *device = mr->ibmr.device; 1468 int size = mr->max_descs * mr->desc_size; 1469 1470 dma_unmap_single(device->dev.parent, mr->desc_map, 1471 size, DMA_TO_DEVICE); 1472 kfree(mr->descs_alloc); 1473 mr->descs = NULL; 1474 } 1475 } 1476 1477 static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1478 { 1479 int allocated_from_cache = mr->allocated_from_cache; 1480 int err; 1481 1482 if (mr->sig) { 1483 if (mlx5_core_destroy_psv(dev->mdev, 1484 mr->sig->psv_memory.psv_idx)) 1485 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1486 mr->sig->psv_memory.psv_idx); 1487 if (mlx5_core_destroy_psv(dev->mdev, 1488 mr->sig->psv_wire.psv_idx)) 1489 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1490 mr->sig->psv_wire.psv_idx); 1491 kfree(mr->sig); 1492 mr->sig = NULL; 1493 } 1494 1495 mlx5_free_priv_descs(mr); 1496 1497 if (!allocated_from_cache) { 1498 u32 key = mr->mmkey.key; 1499 1500 err = destroy_mkey(dev, mr); 1501 kfree(mr); 1502 if (err) { 1503 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1504 key, err); 1505 return err; 1506 } 1507 } else { 1508 mlx5_mr_cache_free(dev, mr); 1509 } 1510 1511 return 0; 1512 } 1513 1514 static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 1515 { 1516 int npages = mr->npages; 1517 struct ib_umem *umem = mr->umem; 1518 1519 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1520 if (umem && umem->odp_data) { 1521 /* Prevent new page faults from succeeding */ 1522 mr->live = 0; 1523 /* Wait for all running page-fault handlers to finish. */ 1524 synchronize_srcu(&dev->mr_srcu); 1525 /* Destroy all page mappings */ 1526 if (umem->odp_data->page_list) 1527 mlx5_ib_invalidate_range(umem, ib_umem_start(umem), 1528 ib_umem_end(umem)); 1529 else 1530 mlx5_ib_free_implicit_mr(mr); 1531 /* 1532 * We kill the umem before the MR for ODP, 1533 * so that there will not be any invalidations in 1534 * flight, looking at the *mr struct. 1535 */ 1536 ib_umem_release(umem); 1537 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1538 1539 /* Avoid double-freeing the umem. */ 1540 umem = NULL; 1541 } 1542 #endif 1543 1544 clean_mr(dev, mr); 1545 1546 if (umem) { 1547 ib_umem_release(umem); 1548 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1549 } 1550 1551 return 0; 1552 } 1553 1554 int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1555 { 1556 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); 1557 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1558 1559 return dereg_mr(dev, mr); 1560 } 1561 1562 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, 1563 enum ib_mr_type mr_type, 1564 u32 max_num_sg) 1565 { 1566 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1567 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1568 int ndescs = ALIGN(max_num_sg, 4); 1569 struct mlx5_ib_mr *mr; 1570 void *mkc; 1571 u32 *in; 1572 int err; 1573 1574 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1575 if (!mr) 1576 return ERR_PTR(-ENOMEM); 1577 1578 in = kzalloc(inlen, GFP_KERNEL); 1579 if (!in) { 1580 err = -ENOMEM; 1581 goto err_free; 1582 } 1583 1584 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1585 MLX5_SET(mkc, mkc, free, 1); 1586 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1587 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1588 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1589 1590 if (mr_type == IB_MR_TYPE_MEM_REG) { 1591 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; 1592 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); 1593 err = mlx5_alloc_priv_descs(pd->device, mr, 1594 ndescs, sizeof(struct mlx5_mtt)); 1595 if (err) 1596 goto err_free_in; 1597 1598 mr->desc_size = sizeof(struct mlx5_mtt); 1599 mr->max_descs = ndescs; 1600 } else if (mr_type == IB_MR_TYPE_SG_GAPS) { 1601 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; 1602 1603 err = mlx5_alloc_priv_descs(pd->device, mr, 1604 ndescs, sizeof(struct mlx5_klm)); 1605 if (err) 1606 goto err_free_in; 1607 mr->desc_size = sizeof(struct mlx5_klm); 1608 mr->max_descs = ndescs; 1609 } else if (mr_type == IB_MR_TYPE_SIGNATURE) { 1610 u32 psv_index[2]; 1611 1612 MLX5_SET(mkc, mkc, bsf_en, 1); 1613 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); 1614 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); 1615 if (!mr->sig) { 1616 err = -ENOMEM; 1617 goto err_free_in; 1618 } 1619 1620 /* create mem & wire PSVs */ 1621 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 1622 2, psv_index); 1623 if (err) 1624 goto err_free_sig; 1625 1626 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; 1627 mr->sig->psv_memory.psv_idx = psv_index[0]; 1628 mr->sig->psv_wire.psv_idx = psv_index[1]; 1629 1630 mr->sig->sig_status_checked = true; 1631 mr->sig->sig_err_exists = false; 1632 /* Next UMR, Arm SIGERR */ 1633 ++mr->sig->sigerr_count; 1634 } else { 1635 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); 1636 err = -EINVAL; 1637 goto err_free_in; 1638 } 1639 1640 MLX5_SET(mkc, mkc, access_mode, mr->access_mode); 1641 MLX5_SET(mkc, mkc, umr_en, 1); 1642 1643 mr->ibmr.device = pd->device; 1644 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); 1645 if (err) 1646 goto err_destroy_psv; 1647 1648 mr->mmkey.type = MLX5_MKEY_MR; 1649 mr->ibmr.lkey = mr->mmkey.key; 1650 mr->ibmr.rkey = mr->mmkey.key; 1651 mr->umem = NULL; 1652 kfree(in); 1653 1654 return &mr->ibmr; 1655 1656 err_destroy_psv: 1657 if (mr->sig) { 1658 if (mlx5_core_destroy_psv(dev->mdev, 1659 mr->sig->psv_memory.psv_idx)) 1660 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1661 mr->sig->psv_memory.psv_idx); 1662 if (mlx5_core_destroy_psv(dev->mdev, 1663 mr->sig->psv_wire.psv_idx)) 1664 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1665 mr->sig->psv_wire.psv_idx); 1666 } 1667 mlx5_free_priv_descs(mr); 1668 err_free_sig: 1669 kfree(mr->sig); 1670 err_free_in: 1671 kfree(in); 1672 err_free: 1673 kfree(mr); 1674 return ERR_PTR(err); 1675 } 1676 1677 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 1678 struct ib_udata *udata) 1679 { 1680 struct mlx5_ib_dev *dev = to_mdev(pd->device); 1681 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); 1682 struct mlx5_ib_mw *mw = NULL; 1683 u32 *in = NULL; 1684 void *mkc; 1685 int ndescs; 1686 int err; 1687 struct mlx5_ib_alloc_mw req = {}; 1688 struct { 1689 __u32 comp_mask; 1690 __u32 response_length; 1691 } resp = {}; 1692 1693 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1694 if (err) 1695 return ERR_PTR(err); 1696 1697 if (req.comp_mask || req.reserved1 || req.reserved2) 1698 return ERR_PTR(-EOPNOTSUPP); 1699 1700 if (udata->inlen > sizeof(req) && 1701 !ib_is_udata_cleared(udata, sizeof(req), 1702 udata->inlen - sizeof(req))) 1703 return ERR_PTR(-EOPNOTSUPP); 1704 1705 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 1706 1707 mw = kzalloc(sizeof(*mw), GFP_KERNEL); 1708 in = kzalloc(inlen, GFP_KERNEL); 1709 if (!mw || !in) { 1710 err = -ENOMEM; 1711 goto free; 1712 } 1713 1714 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 1715 1716 MLX5_SET(mkc, mkc, free, 1); 1717 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); 1718 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); 1719 MLX5_SET(mkc, mkc, umr_en, 1); 1720 MLX5_SET(mkc, mkc, lr, 1); 1721 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS); 1722 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); 1723 MLX5_SET(mkc, mkc, qpn, 0xffffff); 1724 1725 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen); 1726 if (err) 1727 goto free; 1728 1729 mw->mmkey.type = MLX5_MKEY_MW; 1730 mw->ibmw.rkey = mw->mmkey.key; 1731 mw->ndescs = ndescs; 1732 1733 resp.response_length = min(offsetof(typeof(resp), response_length) + 1734 sizeof(resp.response_length), udata->outlen); 1735 if (resp.response_length) { 1736 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1737 if (err) { 1738 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); 1739 goto free; 1740 } 1741 } 1742 1743 kfree(in); 1744 return &mw->ibmw; 1745 1746 free: 1747 kfree(mw); 1748 kfree(in); 1749 return ERR_PTR(err); 1750 } 1751 1752 int mlx5_ib_dealloc_mw(struct ib_mw *mw) 1753 { 1754 struct mlx5_ib_mw *mmw = to_mmw(mw); 1755 int err; 1756 1757 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, 1758 &mmw->mmkey); 1759 if (!err) 1760 kfree(mmw); 1761 return err; 1762 } 1763 1764 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1765 struct ib_mr_status *mr_status) 1766 { 1767 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 1768 int ret = 0; 1769 1770 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { 1771 pr_err("Invalid status check mask\n"); 1772 ret = -EINVAL; 1773 goto done; 1774 } 1775 1776 mr_status->fail_status = 0; 1777 if (check_mask & IB_MR_CHECK_SIG_STATUS) { 1778 if (!mmr->sig) { 1779 ret = -EINVAL; 1780 pr_err("signature status check requested on a non-signature enabled MR\n"); 1781 goto done; 1782 } 1783 1784 mmr->sig->sig_status_checked = true; 1785 if (!mmr->sig->sig_err_exists) 1786 goto done; 1787 1788 if (ibmr->lkey == mmr->sig->err_item.key) 1789 memcpy(&mr_status->sig_err, &mmr->sig->err_item, 1790 sizeof(mr_status->sig_err)); 1791 else { 1792 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; 1793 mr_status->sig_err.sig_err_offset = 0; 1794 mr_status->sig_err.key = mmr->sig->err_item.key; 1795 } 1796 1797 mmr->sig->sig_err_exists = false; 1798 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; 1799 } 1800 1801 done: 1802 return ret; 1803 } 1804 1805 static int 1806 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, 1807 struct scatterlist *sgl, 1808 unsigned short sg_nents, 1809 unsigned int *sg_offset_p) 1810 { 1811 struct scatterlist *sg = sgl; 1812 struct mlx5_klm *klms = mr->descs; 1813 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1814 u32 lkey = mr->ibmr.pd->local_dma_lkey; 1815 int i; 1816 1817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1818 mr->ibmr.length = 0; 1819 mr->ndescs = sg_nents; 1820 1821 for_each_sg(sgl, sg, sg_nents, i) { 1822 if (unlikely(i >= mr->max_descs)) 1823 break; 1824 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 1825 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 1826 klms[i].key = cpu_to_be32(lkey); 1827 mr->ibmr.length += sg_dma_len(sg) - sg_offset; 1828 1829 sg_offset = 0; 1830 } 1831 1832 if (sg_offset_p) 1833 *sg_offset_p = sg_offset; 1834 1835 return i; 1836 } 1837 1838 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) 1839 { 1840 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1841 __be64 *descs; 1842 1843 if (unlikely(mr->ndescs == mr->max_descs)) 1844 return -ENOMEM; 1845 1846 descs = mr->descs; 1847 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); 1848 1849 return 0; 1850 } 1851 1852 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1853 unsigned int *sg_offset) 1854 { 1855 struct mlx5_ib_mr *mr = to_mmr(ibmr); 1856 int n; 1857 1858 mr->ndescs = 0; 1859 1860 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, 1861 mr->desc_size * mr->max_descs, 1862 DMA_TO_DEVICE); 1863 1864 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 1865 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset); 1866 else 1867 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 1868 mlx5_set_page); 1869 1870 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, 1871 mr->desc_size * mr->max_descs, 1872 DMA_TO_DEVICE); 1873 1874 return n; 1875 } 1876