1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34
35 #include <linux/kref.h>
36 #include <linux/random.h>
37 #include <linux/debugfs.h>
38 #include <linux/export.h>
39 #include <linux/delay.h>
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
42 #include <rdma/ib_umem_odp.h>
43 #include "dm.h"
44 #include "mlx5_ib.h"
45 #include "umr.h"
46
47 enum {
48 MAX_PENDING_REG_MR = 8,
49 };
50
51 #define MLX5_UMR_ALIGN 2048
52
53 static void
54 create_mkey_callback(int status, struct mlx5_async_work *context);
55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
56 u64 iova, int access_flags,
57 unsigned int page_size, bool populate);
58
set_mkc_access_pd_addr_fields(void * mkc,int acc,u64 start_addr,struct ib_pd * pd)59 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
60 struct ib_pd *pd)
61 {
62 struct mlx5_ib_dev *dev = to_mdev(pd->device);
63
64 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
65 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
66 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
67 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
68 MLX5_SET(mkc, mkc, lr, 1);
69
70 if (acc & IB_ACCESS_RELAXED_ORDERING) {
71 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
72 MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
73
74 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
75 (MLX5_CAP_GEN(dev->mdev,
76 relaxed_ordering_read_pci_enabled) &&
77 pcie_relaxed_ordering_enabled(dev->mdev->pdev)))
78 MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
79 }
80
81 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
82 MLX5_SET(mkc, mkc, qpn, 0xffffff);
83 MLX5_SET64(mkc, mkc, start_addr, start_addr);
84 }
85
assign_mkey_variant(struct mlx5_ib_dev * dev,u32 * mkey,u32 * in)86 static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
87 {
88 u8 key = atomic_inc_return(&dev->mkey_var);
89 void *mkc;
90
91 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
92 MLX5_SET(mkc, mkc, mkey_7_0, key);
93 *mkey = key;
94 }
95
mlx5_ib_create_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mkey * mkey,u32 * in,int inlen)96 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
97 struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
98 {
99 int ret;
100
101 assign_mkey_variant(dev, &mkey->key, in);
102 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
103 if (!ret)
104 init_waitqueue_head(&mkey->wait);
105
106 return ret;
107 }
108
mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey * async_create)109 static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
110 {
111 struct mlx5_ib_dev *dev = async_create->ent->dev;
112 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
113 size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
114
115 MLX5_SET(create_mkey_in, async_create->in, opcode,
116 MLX5_CMD_OP_CREATE_MKEY);
117 assign_mkey_variant(dev, &async_create->mkey, async_create->in);
118 return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
119 async_create->out, outlen, create_mkey_callback,
120 &async_create->cb_work);
121 }
122
123 static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
125
destroy_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr)126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
127 {
128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
129
130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
131 }
132
create_mkey_warn(struct mlx5_ib_dev * dev,int status,void * out)133 static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
134 {
135 if (status == -ENXIO) /* core driver is not available */
136 return;
137
138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
139 if (status != -EREMOTEIO) /* driver specific failure */
140 return;
141
142 /* Failed in FW, print cmd out failure details */
143 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
144 }
145
push_mkey_locked(struct mlx5_cache_ent * ent,bool limit_pendings,void * to_store)146 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings,
147 void *to_store)
148 {
149 XA_STATE(xas, &ent->mkeys, 0);
150 void *curr;
151
152 if (limit_pendings &&
153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR)
154 return -EAGAIN;
155
156 while (1) {
157 /*
158 * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
159 * doesn't transparently unlock. Instead we set the xas index to
160 * the current value of reserved every iteration.
161 */
162 xas_set(&xas, ent->reserved);
163 curr = xas_load(&xas);
164 if (!curr) {
165 if (to_store && ent->stored == ent->reserved)
166 xas_store(&xas, to_store);
167 else
168 xas_store(&xas, XA_ZERO_ENTRY);
169 if (xas_valid(&xas)) {
170 ent->reserved++;
171 if (to_store) {
172 if (ent->stored != ent->reserved)
173 __xa_store(&ent->mkeys,
174 ent->stored,
175 to_store,
176 GFP_KERNEL);
177 ent->stored++;
178 queue_adjust_cache_locked(ent);
179 WRITE_ONCE(ent->dev->cache.last_add,
180 jiffies);
181 }
182 }
183 }
184 xa_unlock_irq(&ent->mkeys);
185
186 /*
187 * Notice xas_nomem() must always be called as it cleans
188 * up any cached allocation.
189 */
190 if (!xas_nomem(&xas, GFP_KERNEL))
191 break;
192 xa_lock_irq(&ent->mkeys);
193 }
194 xa_lock_irq(&ent->mkeys);
195 if (xas_error(&xas))
196 return xas_error(&xas);
197 if (WARN_ON(curr))
198 return -EINVAL;
199 return 0;
200 }
201
push_mkey(struct mlx5_cache_ent * ent,bool limit_pendings,void * to_store)202 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
203 void *to_store)
204 {
205 int ret;
206
207 xa_lock_irq(&ent->mkeys);
208 ret = push_mkey_locked(ent, limit_pendings, to_store);
209 xa_unlock_irq(&ent->mkeys);
210 return ret;
211 }
212
undo_push_reserve_mkey(struct mlx5_cache_ent * ent)213 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
214 {
215 void *old;
216
217 ent->reserved--;
218 old = __xa_erase(&ent->mkeys, ent->reserved);
219 WARN_ON(old);
220 }
221
push_to_reserved(struct mlx5_cache_ent * ent,u32 mkey)222 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
223 {
224 void *old;
225
226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
227 WARN_ON(old);
228 ent->stored++;
229 }
230
pop_stored_mkey(struct mlx5_cache_ent * ent)231 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
232 {
233 void *old, *xa_mkey;
234
235 ent->stored--;
236 ent->reserved--;
237
238 if (ent->stored == ent->reserved) {
239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
240 WARN_ON(!xa_mkey);
241 return (u32)xa_to_value(xa_mkey);
242 }
243
244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
245 GFP_KERNEL);
246 WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
247 old = __xa_erase(&ent->mkeys, ent->reserved);
248 WARN_ON(old);
249 return (u32)xa_to_value(xa_mkey);
250 }
251
create_mkey_callback(int status,struct mlx5_async_work * context)252 static void create_mkey_callback(int status, struct mlx5_async_work *context)
253 {
254 struct mlx5r_async_create_mkey *mkey_out =
255 container_of(context, struct mlx5r_async_create_mkey, cb_work);
256 struct mlx5_cache_ent *ent = mkey_out->ent;
257 struct mlx5_ib_dev *dev = ent->dev;
258 unsigned long flags;
259
260 if (status) {
261 create_mkey_warn(dev, status, mkey_out->out);
262 kfree(mkey_out);
263 xa_lock_irqsave(&ent->mkeys, flags);
264 undo_push_reserve_mkey(ent);
265 WRITE_ONCE(dev->fill_delay, 1);
266 xa_unlock_irqrestore(&ent->mkeys, flags);
267 mod_timer(&dev->delay_timer, jiffies + HZ);
268 return;
269 }
270
271 mkey_out->mkey |= mlx5_idx_to_mkey(
272 MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
273 WRITE_ONCE(dev->cache.last_add, jiffies);
274
275 xa_lock_irqsave(&ent->mkeys, flags);
276 push_to_reserved(ent, mkey_out->mkey);
277 /* If we are doing fill_to_high_water then keep going. */
278 queue_adjust_cache_locked(ent);
279 xa_unlock_irqrestore(&ent->mkeys, flags);
280 kfree(mkey_out);
281 }
282
get_mkc_octo_size(unsigned int access_mode,unsigned int ndescs)283 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
284 {
285 int ret = 0;
286
287 switch (access_mode) {
288 case MLX5_MKC_ACCESS_MODE_MTT:
289 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
290 sizeof(struct mlx5_mtt));
291 break;
292 case MLX5_MKC_ACCESS_MODE_KSM:
293 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
294 sizeof(struct mlx5_klm));
295 break;
296 default:
297 WARN_ON(1);
298 }
299 return ret;
300 }
301
set_cache_mkc(struct mlx5_cache_ent * ent,void * mkc)302 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
303 {
304 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
305 ent->dev->umrc.pd);
306 MLX5_SET(mkc, mkc, free, 1);
307 MLX5_SET(mkc, mkc, umr_en, 1);
308 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
309 MLX5_SET(mkc, mkc, access_mode_4_2,
310 (ent->rb_key.access_mode >> 2) & 0x7);
311 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
312
313 MLX5_SET(mkc, mkc, translations_octword_size,
314 get_mkc_octo_size(ent->rb_key.access_mode,
315 ent->rb_key.ndescs));
316 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
317 }
318
319 /* Asynchronously schedule new MRs to be populated in the cache. */
add_keys(struct mlx5_cache_ent * ent,unsigned int num)320 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
321 {
322 struct mlx5r_async_create_mkey *async_create;
323 void *mkc;
324 int err = 0;
325 int i;
326
327 for (i = 0; i < num; i++) {
328 async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
329 GFP_KERNEL);
330 if (!async_create)
331 return -ENOMEM;
332 mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
333 memory_key_mkey_entry);
334 set_cache_mkc(ent, mkc);
335 async_create->ent = ent;
336
337 err = push_mkey(ent, true, NULL);
338 if (err)
339 goto free_async_create;
340
341 err = mlx5_ib_create_mkey_cb(async_create);
342 if (err) {
343 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
344 goto err_undo_reserve;
345 }
346 }
347
348 return 0;
349
350 err_undo_reserve:
351 xa_lock_irq(&ent->mkeys);
352 undo_push_reserve_mkey(ent);
353 xa_unlock_irq(&ent->mkeys);
354 free_async_create:
355 kfree(async_create);
356 return err;
357 }
358
359 /* Synchronously create a MR in the cache */
create_cache_mkey(struct mlx5_cache_ent * ent,u32 * mkey)360 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
361 {
362 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
363 void *mkc;
364 u32 *in;
365 int err;
366
367 in = kzalloc(inlen, GFP_KERNEL);
368 if (!in)
369 return -ENOMEM;
370 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
371 set_cache_mkc(ent, mkc);
372
373 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
374 if (err)
375 goto free_in;
376
377 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
378 free_in:
379 kfree(in);
380 return err;
381 }
382
remove_cache_mr_locked(struct mlx5_cache_ent * ent)383 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
384 {
385 u32 mkey;
386
387 lockdep_assert_held(&ent->mkeys.xa_lock);
388 if (!ent->stored)
389 return;
390 mkey = pop_stored_mkey(ent);
391 xa_unlock_irq(&ent->mkeys);
392 mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
393 xa_lock_irq(&ent->mkeys);
394 }
395
resize_available_mrs(struct mlx5_cache_ent * ent,unsigned int target,bool limit_fill)396 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
397 bool limit_fill)
398 __acquires(&ent->mkeys) __releases(&ent->mkeys)
399 {
400 int err;
401
402 lockdep_assert_held(&ent->mkeys.xa_lock);
403
404 while (true) {
405 if (limit_fill)
406 target = ent->limit * 2;
407 if (target == ent->reserved)
408 return 0;
409 if (target > ent->reserved) {
410 u32 todo = target - ent->reserved;
411
412 xa_unlock_irq(&ent->mkeys);
413 err = add_keys(ent, todo);
414 if (err == -EAGAIN)
415 usleep_range(3000, 5000);
416 xa_lock_irq(&ent->mkeys);
417 if (err) {
418 if (err != -EAGAIN)
419 return err;
420 } else
421 return 0;
422 } else {
423 remove_cache_mr_locked(ent);
424 }
425 }
426 }
427
size_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)428 static ssize_t size_write(struct file *filp, const char __user *buf,
429 size_t count, loff_t *pos)
430 {
431 struct mlx5_cache_ent *ent = filp->private_data;
432 u32 target;
433 int err;
434
435 err = kstrtou32_from_user(buf, count, 0, &target);
436 if (err)
437 return err;
438
439 /*
440 * Target is the new value of total_mrs the user requests, however we
441 * cannot free MRs that are in use. Compute the target value for stored
442 * mkeys.
443 */
444 xa_lock_irq(&ent->mkeys);
445 if (target < ent->in_use) {
446 err = -EINVAL;
447 goto err_unlock;
448 }
449 target = target - ent->in_use;
450 if (target < ent->limit || target > ent->limit*2) {
451 err = -EINVAL;
452 goto err_unlock;
453 }
454 err = resize_available_mrs(ent, target, false);
455 if (err)
456 goto err_unlock;
457 xa_unlock_irq(&ent->mkeys);
458
459 return count;
460
461 err_unlock:
462 xa_unlock_irq(&ent->mkeys);
463 return err;
464 }
465
size_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)466 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
467 loff_t *pos)
468 {
469 struct mlx5_cache_ent *ent = filp->private_data;
470 char lbuf[20];
471 int err;
472
473 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
474 if (err < 0)
475 return err;
476
477 return simple_read_from_buffer(buf, count, pos, lbuf, err);
478 }
479
480 static const struct file_operations size_fops = {
481 .owner = THIS_MODULE,
482 .open = simple_open,
483 .write = size_write,
484 .read = size_read,
485 };
486
limit_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)487 static ssize_t limit_write(struct file *filp, const char __user *buf,
488 size_t count, loff_t *pos)
489 {
490 struct mlx5_cache_ent *ent = filp->private_data;
491 u32 var;
492 int err;
493
494 err = kstrtou32_from_user(buf, count, 0, &var);
495 if (err)
496 return err;
497
498 /*
499 * Upon set we immediately fill the cache to high water mark implied by
500 * the limit.
501 */
502 xa_lock_irq(&ent->mkeys);
503 ent->limit = var;
504 err = resize_available_mrs(ent, 0, true);
505 xa_unlock_irq(&ent->mkeys);
506 if (err)
507 return err;
508 return count;
509 }
510
limit_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)511 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
512 loff_t *pos)
513 {
514 struct mlx5_cache_ent *ent = filp->private_data;
515 char lbuf[20];
516 int err;
517
518 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
519 if (err < 0)
520 return err;
521
522 return simple_read_from_buffer(buf, count, pos, lbuf, err);
523 }
524
525 static const struct file_operations limit_fops = {
526 .owner = THIS_MODULE,
527 .open = simple_open,
528 .write = limit_write,
529 .read = limit_read,
530 };
531
someone_adding(struct mlx5_mkey_cache * cache)532 static bool someone_adding(struct mlx5_mkey_cache *cache)
533 {
534 struct mlx5_cache_ent *ent;
535 struct rb_node *node;
536 bool ret;
537
538 mutex_lock(&cache->rb_lock);
539 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
540 ent = rb_entry(node, struct mlx5_cache_ent, node);
541 xa_lock_irq(&ent->mkeys);
542 ret = ent->stored < ent->limit;
543 xa_unlock_irq(&ent->mkeys);
544 if (ret) {
545 mutex_unlock(&cache->rb_lock);
546 return true;
547 }
548 }
549 mutex_unlock(&cache->rb_lock);
550 return false;
551 }
552
553 /*
554 * Check if the bucket is outside the high/low water mark and schedule an async
555 * update. The cache refill has hysteresis, once the low water mark is hit it is
556 * refilled up to the high mark.
557 */
queue_adjust_cache_locked(struct mlx5_cache_ent * ent)558 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
559 {
560 lockdep_assert_held(&ent->mkeys.xa_lock);
561
562 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
563 return;
564 if (ent->stored < ent->limit) {
565 ent->fill_to_high_water = true;
566 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
567 } else if (ent->fill_to_high_water &&
568 ent->reserved < 2 * ent->limit) {
569 /*
570 * Once we start populating due to hitting a low water mark
571 * continue until we pass the high water mark.
572 */
573 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
574 } else if (ent->stored == 2 * ent->limit) {
575 ent->fill_to_high_water = false;
576 } else if (ent->stored > 2 * ent->limit) {
577 /* Queue deletion of excess entries */
578 ent->fill_to_high_water = false;
579 if (ent->stored != ent->reserved)
580 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
581 msecs_to_jiffies(1000));
582 else
583 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
584 }
585 }
586
__cache_work_func(struct mlx5_cache_ent * ent)587 static void __cache_work_func(struct mlx5_cache_ent *ent)
588 {
589 struct mlx5_ib_dev *dev = ent->dev;
590 struct mlx5_mkey_cache *cache = &dev->cache;
591 int err;
592
593 xa_lock_irq(&ent->mkeys);
594 if (ent->disabled)
595 goto out;
596
597 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
598 !READ_ONCE(dev->fill_delay)) {
599 xa_unlock_irq(&ent->mkeys);
600 err = add_keys(ent, 1);
601 xa_lock_irq(&ent->mkeys);
602 if (ent->disabled)
603 goto out;
604 if (err) {
605 /*
606 * EAGAIN only happens if there are pending MRs, so we
607 * will be rescheduled when storing them. The only
608 * failure path here is ENOMEM.
609 */
610 if (err != -EAGAIN) {
611 mlx5_ib_warn(
612 dev,
613 "add keys command failed, err %d\n",
614 err);
615 queue_delayed_work(cache->wq, &ent->dwork,
616 msecs_to_jiffies(1000));
617 }
618 }
619 } else if (ent->stored > 2 * ent->limit) {
620 bool need_delay;
621
622 /*
623 * The remove_cache_mr() logic is performed as garbage
624 * collection task. Such task is intended to be run when no
625 * other active processes are running.
626 *
627 * The need_resched() will return TRUE if there are user tasks
628 * to be activated in near future.
629 *
630 * In such case, we don't execute remove_cache_mr() and postpone
631 * the garbage collection work to try to run in next cycle, in
632 * order to free CPU resources to other tasks.
633 */
634 xa_unlock_irq(&ent->mkeys);
635 need_delay = need_resched() || someone_adding(cache) ||
636 !time_after(jiffies,
637 READ_ONCE(cache->last_add) + 300 * HZ);
638 xa_lock_irq(&ent->mkeys);
639 if (ent->disabled)
640 goto out;
641 if (need_delay) {
642 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
643 goto out;
644 }
645 remove_cache_mr_locked(ent);
646 queue_adjust_cache_locked(ent);
647 }
648 out:
649 xa_unlock_irq(&ent->mkeys);
650 }
651
delayed_cache_work_func(struct work_struct * work)652 static void delayed_cache_work_func(struct work_struct *work)
653 {
654 struct mlx5_cache_ent *ent;
655
656 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
657 __cache_work_func(ent);
658 }
659
cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,struct mlx5r_cache_rb_key key2)660 static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
661 struct mlx5r_cache_rb_key key2)
662 {
663 int res;
664
665 res = key1.ats - key2.ats;
666 if (res)
667 return res;
668
669 res = key1.access_mode - key2.access_mode;
670 if (res)
671 return res;
672
673 res = key1.access_flags - key2.access_flags;
674 if (res)
675 return res;
676
677 /*
678 * keep ndescs the last in the compare table since the find function
679 * searches for an exact match on all properties and only closest
680 * match in size.
681 */
682 return key1.ndescs - key2.ndescs;
683 }
684
mlx5_cache_ent_insert(struct mlx5_mkey_cache * cache,struct mlx5_cache_ent * ent)685 static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
686 struct mlx5_cache_ent *ent)
687 {
688 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL;
689 struct mlx5_cache_ent *cur;
690 int cmp;
691
692 /* Figure out where to put new node */
693 while (*new) {
694 cur = rb_entry(*new, struct mlx5_cache_ent, node);
695 parent = *new;
696 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
697 if (cmp > 0)
698 new = &((*new)->rb_left);
699 if (cmp < 0)
700 new = &((*new)->rb_right);
701 if (cmp == 0)
702 return -EEXIST;
703 }
704
705 /* Add new node and rebalance tree. */
706 rb_link_node(&ent->node, parent, new);
707 rb_insert_color(&ent->node, &cache->rb_root);
708
709 return 0;
710 }
711
712 static struct mlx5_cache_ent *
mkey_cache_ent_from_rb_key(struct mlx5_ib_dev * dev,struct mlx5r_cache_rb_key rb_key)713 mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
714 struct mlx5r_cache_rb_key rb_key)
715 {
716 struct rb_node *node = dev->cache.rb_root.rb_node;
717 struct mlx5_cache_ent *cur, *smallest = NULL;
718 int cmp;
719
720 /*
721 * Find the smallest ent with order >= requested_order.
722 */
723 while (node) {
724 cur = rb_entry(node, struct mlx5_cache_ent, node);
725 cmp = cache_ent_key_cmp(cur->rb_key, rb_key);
726 if (cmp > 0) {
727 smallest = cur;
728 node = node->rb_left;
729 }
730 if (cmp < 0)
731 node = node->rb_right;
732 if (cmp == 0)
733 return cur;
734 }
735
736 return (smallest &&
737 smallest->rb_key.access_mode == rb_key.access_mode &&
738 smallest->rb_key.access_flags == rb_key.access_flags &&
739 smallest->rb_key.ats == rb_key.ats) ?
740 smallest :
741 NULL;
742 }
743
_mlx5_mr_cache_alloc(struct mlx5_ib_dev * dev,struct mlx5_cache_ent * ent,int access_flags)744 static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
745 struct mlx5_cache_ent *ent,
746 int access_flags)
747 {
748 struct mlx5_ib_mr *mr;
749 int err;
750
751 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
752 if (!mr)
753 return ERR_PTR(-ENOMEM);
754
755 xa_lock_irq(&ent->mkeys);
756 ent->in_use++;
757
758 if (!ent->stored) {
759 queue_adjust_cache_locked(ent);
760 ent->miss++;
761 xa_unlock_irq(&ent->mkeys);
762 err = create_cache_mkey(ent, &mr->mmkey.key);
763 if (err) {
764 xa_lock_irq(&ent->mkeys);
765 ent->in_use--;
766 xa_unlock_irq(&ent->mkeys);
767 kfree(mr);
768 return ERR_PTR(err);
769 }
770 } else {
771 mr->mmkey.key = pop_stored_mkey(ent);
772 queue_adjust_cache_locked(ent);
773 xa_unlock_irq(&ent->mkeys);
774 }
775 mr->mmkey.cache_ent = ent;
776 mr->mmkey.type = MLX5_MKEY_MR;
777 init_waitqueue_head(&mr->mmkey.wait);
778 return mr;
779 }
780
get_unchangeable_access_flags(struct mlx5_ib_dev * dev,int access_flags)781 static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
782 int access_flags)
783 {
784 int ret = 0;
785
786 if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
787 MLX5_CAP_GEN(dev->mdev, atomic) &&
788 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
789 ret |= IB_ACCESS_REMOTE_ATOMIC;
790
791 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
792 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
793 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
794 ret |= IB_ACCESS_RELAXED_ORDERING;
795
796 if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
797 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
798 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
799 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
800 ret |= IB_ACCESS_RELAXED_ORDERING;
801
802 return ret;
803 }
804
mlx5_mr_cache_alloc(struct mlx5_ib_dev * dev,int access_flags,int access_mode,int ndescs)805 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
806 int access_flags, int access_mode,
807 int ndescs)
808 {
809 struct mlx5r_cache_rb_key rb_key = {
810 .ndescs = ndescs,
811 .access_mode = access_mode,
812 .access_flags = get_unchangeable_access_flags(dev, access_flags)
813 };
814 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
815
816 if (!ent)
817 return ERR_PTR(-EOPNOTSUPP);
818
819 return _mlx5_mr_cache_alloc(dev, ent, access_flags);
820 }
821
clean_keys(struct mlx5_ib_dev * dev,struct mlx5_cache_ent * ent)822 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
823 {
824 u32 mkey;
825
826 cancel_delayed_work(&ent->dwork);
827 xa_lock_irq(&ent->mkeys);
828 while (ent->stored) {
829 mkey = pop_stored_mkey(ent);
830 xa_unlock_irq(&ent->mkeys);
831 mlx5_core_destroy_mkey(dev->mdev, mkey);
832 xa_lock_irq(&ent->mkeys);
833 }
834 xa_unlock_irq(&ent->mkeys);
835 }
836
mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev * dev)837 static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
838 {
839 if (!mlx5_debugfs_root || dev->is_rep)
840 return;
841
842 debugfs_remove_recursive(dev->cache.fs_root);
843 dev->cache.fs_root = NULL;
844 }
845
mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev * dev,struct mlx5_cache_ent * ent)846 static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
847 struct mlx5_cache_ent *ent)
848 {
849 int order = order_base_2(ent->rb_key.ndescs);
850 struct dentry *dir;
851
852 if (!mlx5_debugfs_root || dev->is_rep)
853 return;
854
855 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
856 order = MLX5_IMR_KSM_CACHE_ENTRY + 2;
857
858 sprintf(ent->name, "%d", order);
859 dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
860 debugfs_create_file("size", 0600, dir, ent, &size_fops);
861 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
862 debugfs_create_ulong("cur", 0400, dir, &ent->stored);
863 debugfs_create_u32("miss", 0600, dir, &ent->miss);
864 }
865
mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev * dev)866 static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
867 {
868 struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev);
869 struct mlx5_mkey_cache *cache = &dev->cache;
870
871 if (!mlx5_debugfs_root || dev->is_rep)
872 return;
873
874 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root);
875 }
876
delay_time_func(struct timer_list * t)877 static void delay_time_func(struct timer_list *t)
878 {
879 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
880
881 WRITE_ONCE(dev->fill_delay, 0);
882 }
883
884 struct mlx5_cache_ent *
mlx5r_cache_create_ent_locked(struct mlx5_ib_dev * dev,struct mlx5r_cache_rb_key rb_key,bool persistent_entry)885 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
886 struct mlx5r_cache_rb_key rb_key,
887 bool persistent_entry)
888 {
889 struct mlx5_cache_ent *ent;
890 int order;
891 int ret;
892
893 ent = kzalloc(sizeof(*ent), GFP_KERNEL);
894 if (!ent)
895 return ERR_PTR(-ENOMEM);
896
897 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
898 ent->rb_key = rb_key;
899 ent->dev = dev;
900 ent->is_tmp = !persistent_entry;
901
902 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
903
904 ret = mlx5_cache_ent_insert(&dev->cache, ent);
905 if (ret) {
906 kfree(ent);
907 return ERR_PTR(ret);
908 }
909
910 if (persistent_entry) {
911 if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
912 order = MLX5_IMR_KSM_CACHE_ENTRY;
913 else
914 order = order_base_2(rb_key.ndescs) - 2;
915
916 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
917 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
918 mlx5r_umr_can_load_pas(dev, 0))
919 ent->limit = dev->mdev->profile.mr_cache[order].limit;
920 else
921 ent->limit = 0;
922
923 mlx5_mkey_cache_debugfs_add_ent(dev, ent);
924 } else {
925 mod_delayed_work(ent->dev->cache.wq,
926 &ent->dev->cache.remove_ent_dwork,
927 msecs_to_jiffies(30 * 1000));
928 }
929
930 return ent;
931 }
932
remove_ent_work_func(struct work_struct * work)933 static void remove_ent_work_func(struct work_struct *work)
934 {
935 struct mlx5_mkey_cache *cache;
936 struct mlx5_cache_ent *ent;
937 struct rb_node *cur;
938
939 cache = container_of(work, struct mlx5_mkey_cache,
940 remove_ent_dwork.work);
941 mutex_lock(&cache->rb_lock);
942 cur = rb_last(&cache->rb_root);
943 while (cur) {
944 ent = rb_entry(cur, struct mlx5_cache_ent, node);
945 cur = rb_prev(cur);
946 mutex_unlock(&cache->rb_lock);
947
948 xa_lock_irq(&ent->mkeys);
949 if (!ent->is_tmp) {
950 xa_unlock_irq(&ent->mkeys);
951 mutex_lock(&cache->rb_lock);
952 continue;
953 }
954 xa_unlock_irq(&ent->mkeys);
955
956 clean_keys(ent->dev, ent);
957 mutex_lock(&cache->rb_lock);
958 }
959 mutex_unlock(&cache->rb_lock);
960 }
961
mlx5_mkey_cache_init(struct mlx5_ib_dev * dev)962 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
963 {
964 struct mlx5_mkey_cache *cache = &dev->cache;
965 struct rb_root *root = &dev->cache.rb_root;
966 struct mlx5r_cache_rb_key rb_key = {
967 .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
968 };
969 struct mlx5_cache_ent *ent;
970 struct rb_node *node;
971 int ret;
972 int i;
973
974 mutex_init(&dev->slow_path_mutex);
975 mutex_init(&dev->cache.rb_lock);
976 dev->cache.rb_root = RB_ROOT;
977 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
978 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
979 if (!cache->wq) {
980 mlx5_ib_warn(dev, "failed to create work queue\n");
981 return -ENOMEM;
982 }
983
984 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
985 timer_setup(&dev->delay_timer, delay_time_func, 0);
986 mlx5_mkey_cache_debugfs_init(dev);
987 mutex_lock(&cache->rb_lock);
988 for (i = 0; i <= mkey_cache_max_order(dev); i++) {
989 rb_key.ndescs = 1 << (i + 2);
990 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
991 if (IS_ERR(ent)) {
992 ret = PTR_ERR(ent);
993 goto err;
994 }
995 }
996
997 ret = mlx5_odp_init_mkey_cache(dev);
998 if (ret)
999 goto err;
1000
1001 mutex_unlock(&cache->rb_lock);
1002 for (node = rb_first(root); node; node = rb_next(node)) {
1003 ent = rb_entry(node, struct mlx5_cache_ent, node);
1004 xa_lock_irq(&ent->mkeys);
1005 queue_adjust_cache_locked(ent);
1006 xa_unlock_irq(&ent->mkeys);
1007 }
1008
1009 return 0;
1010
1011 err:
1012 mutex_unlock(&cache->rb_lock);
1013 mlx5_mkey_cache_debugfs_cleanup(dev);
1014 mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
1015 return ret;
1016 }
1017
mlx5_mkey_cache_cleanup(struct mlx5_ib_dev * dev)1018 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
1019 {
1020 struct rb_root *root = &dev->cache.rb_root;
1021 struct mlx5_cache_ent *ent;
1022 struct rb_node *node;
1023
1024 if (!dev->cache.wq)
1025 return;
1026
1027 mutex_lock(&dev->cache.rb_lock);
1028 cancel_delayed_work(&dev->cache.remove_ent_dwork);
1029 for (node = rb_first(root); node; node = rb_next(node)) {
1030 ent = rb_entry(node, struct mlx5_cache_ent, node);
1031 xa_lock_irq(&ent->mkeys);
1032 ent->disabled = true;
1033 xa_unlock_irq(&ent->mkeys);
1034 cancel_delayed_work(&ent->dwork);
1035 }
1036 mutex_unlock(&dev->cache.rb_lock);
1037
1038 /*
1039 * After all entries are disabled and will not reschedule on WQ,
1040 * flush it and all async commands.
1041 */
1042 flush_workqueue(dev->cache.wq);
1043
1044 mlx5_mkey_cache_debugfs_cleanup(dev);
1045 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
1046
1047 /* At this point all entries are disabled and have no concurrent work. */
1048 mutex_lock(&dev->cache.rb_lock);
1049 node = rb_first(root);
1050 while (node) {
1051 ent = rb_entry(node, struct mlx5_cache_ent, node);
1052 node = rb_next(node);
1053 clean_keys(dev, ent);
1054 rb_erase(&ent->node, root);
1055 kfree(ent);
1056 }
1057 mutex_unlock(&dev->cache.rb_lock);
1058
1059 destroy_workqueue(dev->cache.wq);
1060 del_timer_sync(&dev->delay_timer);
1061 }
1062
mlx5_ib_get_dma_mr(struct ib_pd * pd,int acc)1063 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
1064 {
1065 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1066 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1067 struct mlx5_ib_mr *mr;
1068 void *mkc;
1069 u32 *in;
1070 int err;
1071
1072 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1073 if (!mr)
1074 return ERR_PTR(-ENOMEM);
1075
1076 in = kzalloc(inlen, GFP_KERNEL);
1077 if (!in) {
1078 err = -ENOMEM;
1079 goto err_free;
1080 }
1081
1082 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1083
1084 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
1085 MLX5_SET(mkc, mkc, length64, 1);
1086 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
1087 pd);
1088
1089 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1090 if (err)
1091 goto err_in;
1092
1093 kfree(in);
1094 mr->mmkey.type = MLX5_MKEY_MR;
1095 mr->ibmr.lkey = mr->mmkey.key;
1096 mr->ibmr.rkey = mr->mmkey.key;
1097 mr->umem = NULL;
1098
1099 return &mr->ibmr;
1100
1101 err_in:
1102 kfree(in);
1103
1104 err_free:
1105 kfree(mr);
1106
1107 return ERR_PTR(err);
1108 }
1109
get_octo_len(u64 addr,u64 len,int page_shift)1110 static int get_octo_len(u64 addr, u64 len, int page_shift)
1111 {
1112 u64 page_size = 1ULL << page_shift;
1113 u64 offset;
1114 int npages;
1115
1116 offset = addr & (page_size - 1);
1117 npages = ALIGN(len + offset, page_size) >> page_shift;
1118 return (npages + 1) / 2;
1119 }
1120
mkey_cache_max_order(struct mlx5_ib_dev * dev)1121 static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
1122 {
1123 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
1124 return MKEY_CACHE_LAST_STD_ENTRY;
1125 return MLX5_MAX_UMR_SHIFT;
1126 }
1127
set_mr_fields(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr,u64 length,int access_flags,u64 iova)1128 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1129 u64 length, int access_flags, u64 iova)
1130 {
1131 mr->ibmr.lkey = mr->mmkey.key;
1132 mr->ibmr.rkey = mr->mmkey.key;
1133 mr->ibmr.length = length;
1134 mr->ibmr.device = &dev->ib_dev;
1135 mr->ibmr.iova = iova;
1136 mr->access_flags = access_flags;
1137 }
1138
mlx5_umem_dmabuf_default_pgsz(struct ib_umem * umem,u64 iova)1139 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
1140 u64 iova)
1141 {
1142 /*
1143 * The alignment of iova has already been checked upon entering
1144 * UVERBS_METHOD_REG_DMABUF_MR
1145 */
1146 umem->iova = iova;
1147 return PAGE_SIZE;
1148 }
1149
alloc_cacheable_mr(struct ib_pd * pd,struct ib_umem * umem,u64 iova,int access_flags)1150 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
1151 struct ib_umem *umem, u64 iova,
1152 int access_flags)
1153 {
1154 struct mlx5r_cache_rb_key rb_key = {
1155 .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
1156 };
1157 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1158 struct mlx5_cache_ent *ent;
1159 struct mlx5_ib_mr *mr;
1160 unsigned int page_size;
1161
1162 if (umem->is_dmabuf)
1163 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
1164 else
1165 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
1166 0, iova);
1167 if (WARN_ON(!page_size))
1168 return ERR_PTR(-EINVAL);
1169
1170 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
1171 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
1172 rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
1173 ent = mkey_cache_ent_from_rb_key(dev, rb_key);
1174 /*
1175 * If the MR can't come from the cache then synchronously create an uncached
1176 * one.
1177 */
1178 if (!ent) {
1179 mutex_lock(&dev->slow_path_mutex);
1180 mr = reg_create(pd, umem, iova, access_flags, page_size, false);
1181 mutex_unlock(&dev->slow_path_mutex);
1182 if (IS_ERR(mr))
1183 return mr;
1184 mr->mmkey.rb_key = rb_key;
1185 return mr;
1186 }
1187
1188 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
1189 if (IS_ERR(mr))
1190 return mr;
1191
1192 mr->ibmr.pd = pd;
1193 mr->umem = umem;
1194 mr->page_shift = order_base_2(page_size);
1195 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1196
1197 return mr;
1198 }
1199
1200 /*
1201 * If ibmr is NULL it will be allocated by reg_create.
1202 * Else, the given ibmr will be used.
1203 */
reg_create(struct ib_pd * pd,struct ib_umem * umem,u64 iova,int access_flags,unsigned int page_size,bool populate)1204 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1205 u64 iova, int access_flags,
1206 unsigned int page_size, bool populate)
1207 {
1208 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1209 struct mlx5_ib_mr *mr;
1210 __be64 *pas;
1211 void *mkc;
1212 int inlen;
1213 u32 *in;
1214 int err;
1215 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1216
1217 if (!page_size)
1218 return ERR_PTR(-EINVAL);
1219 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1220 if (!mr)
1221 return ERR_PTR(-ENOMEM);
1222
1223 mr->ibmr.pd = pd;
1224 mr->access_flags = access_flags;
1225 mr->page_shift = order_base_2(page_size);
1226
1227 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1228 if (populate)
1229 inlen += sizeof(*pas) *
1230 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
1231 in = kvzalloc(inlen, GFP_KERNEL);
1232 if (!in) {
1233 err = -ENOMEM;
1234 goto err_1;
1235 }
1236 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1237 if (populate) {
1238 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1239 err = -EINVAL;
1240 goto err_2;
1241 }
1242 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1243 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1244 }
1245
1246 /* The pg_access bit allows setting the access flags
1247 * in the page list submitted with the command.
1248 */
1249 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1250
1251 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1252 set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
1253 populate ? pd : dev->umrc.pd);
1254 MLX5_SET(mkc, mkc, free, !populate);
1255 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1256 MLX5_SET(mkc, mkc, umr_en, 1);
1257
1258 MLX5_SET64(mkc, mkc, len, umem->length);
1259 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1260 MLX5_SET(mkc, mkc, translations_octword_size,
1261 get_octo_len(iova, umem->length, mr->page_shift));
1262 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1263 if (mlx5_umem_needs_ats(dev, umem, access_flags))
1264 MLX5_SET(mkc, mkc, ma_translation_mode, 1);
1265 if (populate) {
1266 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1267 get_octo_len(iova, umem->length, mr->page_shift));
1268 }
1269
1270 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1271 if (err) {
1272 mlx5_ib_warn(dev, "create mkey failed\n");
1273 goto err_2;
1274 }
1275 mr->mmkey.type = MLX5_MKEY_MR;
1276 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
1277 mr->umem = umem;
1278 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1279 kvfree(in);
1280
1281 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1282
1283 return mr;
1284
1285 err_2:
1286 kvfree(in);
1287 err_1:
1288 kfree(mr);
1289 return ERR_PTR(err);
1290 }
1291
mlx5_ib_get_dm_mr(struct ib_pd * pd,u64 start_addr,u64 length,int acc,int mode)1292 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1293 u64 length, int acc, int mode)
1294 {
1295 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1296 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1297 struct mlx5_ib_mr *mr;
1298 void *mkc;
1299 u32 *in;
1300 int err;
1301
1302 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1303 if (!mr)
1304 return ERR_PTR(-ENOMEM);
1305
1306 in = kzalloc(inlen, GFP_KERNEL);
1307 if (!in) {
1308 err = -ENOMEM;
1309 goto err_free;
1310 }
1311
1312 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1313
1314 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1315 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1316 MLX5_SET64(mkc, mkc, len, length);
1317 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1318
1319 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1320 if (err)
1321 goto err_in;
1322
1323 kfree(in);
1324
1325 set_mr_fields(dev, mr, length, acc, start_addr);
1326
1327 return &mr->ibmr;
1328
1329 err_in:
1330 kfree(in);
1331
1332 err_free:
1333 kfree(mr);
1334
1335 return ERR_PTR(err);
1336 }
1337
mlx5_ib_advise_mr(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge,struct uverbs_attr_bundle * attrs)1338 int mlx5_ib_advise_mr(struct ib_pd *pd,
1339 enum ib_uverbs_advise_mr_advice advice,
1340 u32 flags,
1341 struct ib_sge *sg_list,
1342 u32 num_sge,
1343 struct uverbs_attr_bundle *attrs)
1344 {
1345 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1346 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1347 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1348 return -EOPNOTSUPP;
1349
1350 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1351 sg_list, num_sge);
1352 }
1353
mlx5_ib_reg_dm_mr(struct ib_pd * pd,struct ib_dm * dm,struct ib_dm_mr_attr * attr,struct uverbs_attr_bundle * attrs)1354 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1355 struct ib_dm_mr_attr *attr,
1356 struct uverbs_attr_bundle *attrs)
1357 {
1358 struct mlx5_ib_dm *mdm = to_mdm(dm);
1359 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1360 u64 start_addr = mdm->dev_addr + attr->offset;
1361 int mode;
1362
1363 switch (mdm->type) {
1364 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1365 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1366 return ERR_PTR(-EINVAL);
1367
1368 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1369 start_addr -= pci_resource_start(dev->pdev, 0);
1370 break;
1371 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1372 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1373 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
1374 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1375 return ERR_PTR(-EINVAL);
1376
1377 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1378 break;
1379 default:
1380 return ERR_PTR(-EINVAL);
1381 }
1382
1383 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1384 attr->access_flags, mode);
1385 }
1386
create_real_mr(struct ib_pd * pd,struct ib_umem * umem,u64 iova,int access_flags)1387 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1388 u64 iova, int access_flags)
1389 {
1390 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1391 struct mlx5_ib_mr *mr = NULL;
1392 bool xlt_with_umr;
1393 int err;
1394
1395 xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
1396 if (xlt_with_umr) {
1397 mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1398 } else {
1399 unsigned int page_size = mlx5_umem_find_best_pgsz(
1400 umem, mkc, log_page_size, 0, iova);
1401
1402 mutex_lock(&dev->slow_path_mutex);
1403 mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1404 mutex_unlock(&dev->slow_path_mutex);
1405 }
1406 if (IS_ERR(mr)) {
1407 ib_umem_release(umem);
1408 return ERR_CAST(mr);
1409 }
1410
1411 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1412
1413 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1414
1415 if (xlt_with_umr) {
1416 /*
1417 * If the MR was created with reg_create then it will be
1418 * configured properly but left disabled. It is safe to go ahead
1419 * and configure it again via UMR while enabling it.
1420 */
1421 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1422 if (err) {
1423 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1424 return ERR_PTR(err);
1425 }
1426 }
1427 return &mr->ibmr;
1428 }
1429
create_user_odp_mr(struct ib_pd * pd,u64 start,u64 length,u64 iova,int access_flags,struct ib_udata * udata)1430 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1431 u64 iova, int access_flags,
1432 struct ib_udata *udata)
1433 {
1434 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1435 struct ib_umem_odp *odp;
1436 struct mlx5_ib_mr *mr;
1437 int err;
1438
1439 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1440 return ERR_PTR(-EOPNOTSUPP);
1441
1442 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
1443 if (err)
1444 return ERR_PTR(err);
1445 if (!start && length == U64_MAX) {
1446 if (iova != 0)
1447 return ERR_PTR(-EINVAL);
1448 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1449 return ERR_PTR(-EINVAL);
1450
1451 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1452 if (IS_ERR(mr))
1453 return ERR_CAST(mr);
1454 return &mr->ibmr;
1455 }
1456
1457 /* ODP requires xlt update via umr to work. */
1458 if (!mlx5r_umr_can_load_pas(dev, length))
1459 return ERR_PTR(-EINVAL);
1460
1461 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
1462 &mlx5_mn_ops);
1463 if (IS_ERR(odp))
1464 return ERR_CAST(odp);
1465
1466 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1467 if (IS_ERR(mr)) {
1468 ib_umem_release(&odp->umem);
1469 return ERR_CAST(mr);
1470 }
1471 xa_init(&mr->implicit_children);
1472
1473 odp->private = mr;
1474 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1475 if (err)
1476 goto err_dereg_mr;
1477
1478 err = mlx5_ib_init_odp_mr(mr);
1479 if (err)
1480 goto err_dereg_mr;
1481 return &mr->ibmr;
1482
1483 err_dereg_mr:
1484 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1485 return ERR_PTR(err);
1486 }
1487
mlx5_ib_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 iova,int access_flags,struct ib_udata * udata)1488 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1489 u64 iova, int access_flags,
1490 struct ib_udata *udata)
1491 {
1492 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1493 struct ib_umem *umem;
1494
1495 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1496 return ERR_PTR(-EOPNOTSUPP);
1497
1498 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1499 start, iova, length, access_flags);
1500
1501 if (access_flags & IB_ACCESS_ON_DEMAND)
1502 return create_user_odp_mr(pd, start, length, iova, access_flags,
1503 udata);
1504 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
1505 if (IS_ERR(umem))
1506 return ERR_CAST(umem);
1507 return create_real_mr(pd, umem, iova, access_flags);
1508 }
1509
mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment * attach)1510 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
1511 {
1512 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
1513 struct mlx5_ib_mr *mr = umem_dmabuf->private;
1514
1515 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
1516
1517 if (!umem_dmabuf->sgt)
1518 return;
1519
1520 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1521 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
1522 }
1523
1524 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
1525 .allow_peer2peer = 1,
1526 .move_notify = mlx5_ib_dmabuf_invalidate_cb,
1527 };
1528
mlx5_ib_reg_user_mr_dmabuf(struct ib_pd * pd,u64 offset,u64 length,u64 virt_addr,int fd,int access_flags,struct ib_udata * udata)1529 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1530 u64 length, u64 virt_addr,
1531 int fd, int access_flags,
1532 struct ib_udata *udata)
1533 {
1534 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1535 struct mlx5_ib_mr *mr = NULL;
1536 struct ib_umem_dmabuf *umem_dmabuf;
1537 int err;
1538
1539 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1540 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1541 return ERR_PTR(-EOPNOTSUPP);
1542
1543 mlx5_ib_dbg(dev,
1544 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
1545 offset, virt_addr, length, fd, access_flags);
1546
1547 /* dmabuf requires xlt update via umr to work. */
1548 if (!mlx5r_umr_can_load_pas(dev, length))
1549 return ERR_PTR(-EINVAL);
1550
1551 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
1552 access_flags,
1553 &mlx5_ib_dmabuf_attach_ops);
1554 if (IS_ERR(umem_dmabuf)) {
1555 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
1556 PTR_ERR(umem_dmabuf));
1557 return ERR_CAST(umem_dmabuf);
1558 }
1559
1560 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1561 access_flags);
1562 if (IS_ERR(mr)) {
1563 ib_umem_release(&umem_dmabuf->umem);
1564 return ERR_CAST(mr);
1565 }
1566
1567 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1568
1569 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1570 umem_dmabuf->private = mr;
1571 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1572 if (err)
1573 goto err_dereg_mr;
1574
1575 err = mlx5_ib_init_dmabuf_mr(mr);
1576 if (err)
1577 goto err_dereg_mr;
1578 return &mr->ibmr;
1579
1580 err_dereg_mr:
1581 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1582 return ERR_PTR(err);
1583 }
1584
1585 /*
1586 * True if the change in access flags can be done via UMR, only some access
1587 * flags can be updated.
1588 */
can_use_umr_rereg_access(struct mlx5_ib_dev * dev,unsigned int current_access_flags,unsigned int target_access_flags)1589 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1590 unsigned int current_access_flags,
1591 unsigned int target_access_flags)
1592 {
1593 unsigned int diffs = current_access_flags ^ target_access_flags;
1594
1595 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1596 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
1597 IB_ACCESS_REMOTE_ATOMIC))
1598 return false;
1599 return mlx5r_umr_can_reconfig(dev, current_access_flags,
1600 target_access_flags);
1601 }
1602
can_use_umr_rereg_pas(struct mlx5_ib_mr * mr,struct ib_umem * new_umem,int new_access_flags,u64 iova,unsigned long * page_size)1603 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1604 struct ib_umem *new_umem,
1605 int new_access_flags, u64 iova,
1606 unsigned long *page_size)
1607 {
1608 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1609
1610 /* We only track the allocated sizes of MRs from the cache */
1611 if (!mr->mmkey.cache_ent)
1612 return false;
1613 if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
1614 return false;
1615
1616 *page_size =
1617 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
1618 if (WARN_ON(!*page_size))
1619 return false;
1620 return (mr->mmkey.cache_ent->rb_key.ndescs) >=
1621 ib_umem_num_dma_blocks(new_umem, *page_size);
1622 }
1623
umr_rereg_pas(struct mlx5_ib_mr * mr,struct ib_pd * pd,int access_flags,int flags,struct ib_umem * new_umem,u64 iova,unsigned long page_size)1624 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1625 int access_flags, int flags, struct ib_umem *new_umem,
1626 u64 iova, unsigned long page_size)
1627 {
1628 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1629 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1630 struct ib_umem *old_umem = mr->umem;
1631 int err;
1632
1633 /*
1634 * To keep everything simple the MR is revoked before we start to mess
1635 * with it. This ensure the change is atomic relative to any use of the
1636 * MR.
1637 */
1638 err = mlx5r_umr_revoke_mr(mr);
1639 if (err)
1640 return err;
1641
1642 if (flags & IB_MR_REREG_PD) {
1643 mr->ibmr.pd = pd;
1644 upd_flags |= MLX5_IB_UPD_XLT_PD;
1645 }
1646 if (flags & IB_MR_REREG_ACCESS) {
1647 mr->access_flags = access_flags;
1648 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1649 }
1650
1651 mr->ibmr.iova = iova;
1652 mr->ibmr.length = new_umem->length;
1653 mr->page_shift = order_base_2(page_size);
1654 mr->umem = new_umem;
1655 err = mlx5r_umr_update_mr_pas(mr, upd_flags);
1656 if (err) {
1657 /*
1658 * The MR is revoked at this point so there is no issue to free
1659 * new_umem.
1660 */
1661 mr->umem = old_umem;
1662 return err;
1663 }
1664
1665 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1666 ib_umem_release(old_umem);
1667 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1668 return 0;
1669 }
1670
mlx5_ib_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 length,u64 iova,int new_access_flags,struct ib_pd * new_pd,struct ib_udata * udata)1671 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1672 u64 length, u64 iova, int new_access_flags,
1673 struct ib_pd *new_pd,
1674 struct ib_udata *udata)
1675 {
1676 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1677 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1678 int err;
1679
1680 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1681 return ERR_PTR(-EOPNOTSUPP);
1682
1683 mlx5_ib_dbg(
1684 dev,
1685 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1686 start, iova, length, new_access_flags);
1687
1688 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1689 return ERR_PTR(-EOPNOTSUPP);
1690
1691 if (!(flags & IB_MR_REREG_ACCESS))
1692 new_access_flags = mr->access_flags;
1693 if (!(flags & IB_MR_REREG_PD))
1694 new_pd = ib_mr->pd;
1695
1696 if (!(flags & IB_MR_REREG_TRANS)) {
1697 struct ib_umem *umem;
1698
1699 /* Fast path for PD/access change */
1700 if (can_use_umr_rereg_access(dev, mr->access_flags,
1701 new_access_flags)) {
1702 err = mlx5r_umr_rereg_pd_access(mr, new_pd,
1703 new_access_flags);
1704 if (err)
1705 return ERR_PTR(err);
1706 return NULL;
1707 }
1708 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1709 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1710 goto recreate;
1711
1712 /*
1713 * Only one active MR can refer to a umem at one time, revoke
1714 * the old MR before assigning the umem to the new one.
1715 */
1716 err = mlx5r_umr_revoke_mr(mr);
1717 if (err)
1718 return ERR_PTR(err);
1719 umem = mr->umem;
1720 mr->umem = NULL;
1721 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1722
1723 return create_real_mr(new_pd, umem, mr->ibmr.iova,
1724 new_access_flags);
1725 }
1726
1727 /*
1728 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1729 * but the logic around releasing the umem is different
1730 */
1731 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1732 goto recreate;
1733
1734 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1735 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1736 struct ib_umem *new_umem;
1737 unsigned long page_size;
1738
1739 new_umem = ib_umem_get(&dev->ib_dev, start, length,
1740 new_access_flags);
1741 if (IS_ERR(new_umem))
1742 return ERR_CAST(new_umem);
1743
1744 /* Fast path for PAS change */
1745 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1746 &page_size)) {
1747 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1748 new_umem, iova, page_size);
1749 if (err) {
1750 ib_umem_release(new_umem);
1751 return ERR_PTR(err);
1752 }
1753 return NULL;
1754 }
1755 return create_real_mr(new_pd, new_umem, iova, new_access_flags);
1756 }
1757
1758 /*
1759 * Everything else has no state we can preserve, just create a new MR
1760 * from scratch
1761 */
1762 recreate:
1763 return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1764 new_access_flags, udata);
1765 }
1766
1767 static int
mlx5_alloc_priv_descs(struct ib_device * device,struct mlx5_ib_mr * mr,int ndescs,int desc_size)1768 mlx5_alloc_priv_descs(struct ib_device *device,
1769 struct mlx5_ib_mr *mr,
1770 int ndescs,
1771 int desc_size)
1772 {
1773 struct mlx5_ib_dev *dev = to_mdev(device);
1774 struct device *ddev = &dev->mdev->pdev->dev;
1775 int size = ndescs * desc_size;
1776 int add_size;
1777 int ret;
1778
1779 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1780 if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
1781 int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
1782
1783 add_size = min_t(int, end - size, add_size);
1784 }
1785
1786 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1787 if (!mr->descs_alloc)
1788 return -ENOMEM;
1789
1790 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1791
1792 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1793 if (dma_mapping_error(ddev, mr->desc_map)) {
1794 ret = -ENOMEM;
1795 goto err;
1796 }
1797
1798 return 0;
1799 err:
1800 kfree(mr->descs_alloc);
1801
1802 return ret;
1803 }
1804
1805 static void
mlx5_free_priv_descs(struct mlx5_ib_mr * mr)1806 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1807 {
1808 if (!mr->umem && mr->descs) {
1809 struct ib_device *device = mr->ibmr.device;
1810 int size = mr->max_descs * mr->desc_size;
1811 struct mlx5_ib_dev *dev = to_mdev(device);
1812
1813 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1814 DMA_TO_DEVICE);
1815 kfree(mr->descs_alloc);
1816 mr->descs = NULL;
1817 }
1818 }
1819
cache_ent_find_and_store(struct mlx5_ib_dev * dev,struct mlx5_ib_mr * mr)1820 static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
1821 struct mlx5_ib_mr *mr)
1822 {
1823 struct mlx5_mkey_cache *cache = &dev->cache;
1824 struct mlx5_cache_ent *ent;
1825 int ret;
1826
1827 if (mr->mmkey.cache_ent) {
1828 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1829 mr->mmkey.cache_ent->in_use--;
1830 goto end;
1831 }
1832
1833 mutex_lock(&cache->rb_lock);
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
1835 if (ent) {
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
1837 if (ent->disabled) {
1838 mutex_unlock(&cache->rb_lock);
1839 return -EOPNOTSUPP;
1840 }
1841 mr->mmkey.cache_ent = ent;
1842 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1843 mutex_unlock(&cache->rb_lock);
1844 goto end;
1845 }
1846 }
1847
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
1849 mutex_unlock(&cache->rb_lock);
1850 if (IS_ERR(ent))
1851 return PTR_ERR(ent);
1852
1853 mr->mmkey.cache_ent = ent;
1854 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1855
1856 end:
1857 ret = push_mkey_locked(mr->mmkey.cache_ent, false,
1858 xa_mk_value(mr->mmkey.key));
1859 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
1860 return ret;
1861 }
1862
mlx5_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1863 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1864 {
1865 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1866 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1867 int rc;
1868
1869 /*
1870 * Any async use of the mr must hold the refcount, once the refcount
1871 * goes to zero no other thread, such as ODP page faults, prefetch, any
1872 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1873 */
1874 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1875 refcount_read(&mr->mmkey.usecount) != 0 &&
1876 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1877 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1878
1879 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1880 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1881 mr->sig, NULL, GFP_KERNEL);
1882
1883 if (mr->mtt_mr) {
1884 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1885 if (rc)
1886 return rc;
1887 mr->mtt_mr = NULL;
1888 }
1889 if (mr->klm_mr) {
1890 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1891 if (rc)
1892 return rc;
1893 mr->klm_mr = NULL;
1894 }
1895
1896 if (mlx5_core_destroy_psv(dev->mdev,
1897 mr->sig->psv_memory.psv_idx))
1898 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1899 mr->sig->psv_memory.psv_idx);
1900 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1901 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1902 mr->sig->psv_wire.psv_idx);
1903 kfree(mr->sig);
1904 mr->sig = NULL;
1905 }
1906
1907 /* Stop DMA */
1908 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
1909 if (mlx5r_umr_revoke_mr(mr) ||
1910 cache_ent_find_and_store(dev, mr))
1911 mr->mmkey.cache_ent = NULL;
1912
1913 if (!mr->mmkey.cache_ent) {
1914 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1915 if (rc)
1916 return rc;
1917 }
1918
1919 if (mr->umem) {
1920 bool is_odp = is_odp_mr(mr);
1921
1922 if (!is_odp)
1923 atomic_sub(ib_umem_num_pages(mr->umem),
1924 &dev->mdev->priv.reg_pages);
1925 ib_umem_release(mr->umem);
1926 if (is_odp)
1927 mlx5_ib_free_odp_mr(mr);
1928 }
1929
1930 if (!mr->mmkey.cache_ent)
1931 mlx5_free_priv_descs(mr);
1932
1933 kfree(mr);
1934 return 0;
1935 }
1936
mlx5_set_umr_free_mkey(struct ib_pd * pd,u32 * in,int ndescs,int access_mode,int page_shift)1937 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1938 int access_mode, int page_shift)
1939 {
1940 void *mkc;
1941
1942 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1943
1944 /* This is only used from the kernel, so setting the PD is OK. */
1945 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
1946 MLX5_SET(mkc, mkc, free, 1);
1947 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1948 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1949 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1950 MLX5_SET(mkc, mkc, umr_en, 1);
1951 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1952 }
1953
_mlx5_alloc_mkey_descs(struct ib_pd * pd,struct mlx5_ib_mr * mr,int ndescs,int desc_size,int page_shift,int access_mode,u32 * in,int inlen)1954 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1955 int ndescs, int desc_size, int page_shift,
1956 int access_mode, u32 *in, int inlen)
1957 {
1958 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1959 int err;
1960
1961 mr->access_mode = access_mode;
1962 mr->desc_size = desc_size;
1963 mr->max_descs = ndescs;
1964
1965 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1966 if (err)
1967 return err;
1968
1969 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1970
1971 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1972 if (err)
1973 goto err_free_descs;
1974
1975 mr->mmkey.type = MLX5_MKEY_MR;
1976 mr->ibmr.lkey = mr->mmkey.key;
1977 mr->ibmr.rkey = mr->mmkey.key;
1978
1979 return 0;
1980
1981 err_free_descs:
1982 mlx5_free_priv_descs(mr);
1983 return err;
1984 }
1985
mlx5_ib_alloc_pi_mr(struct ib_pd * pd,u32 max_num_sg,u32 max_num_meta_sg,int desc_size,int access_mode)1986 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1987 u32 max_num_sg, u32 max_num_meta_sg,
1988 int desc_size, int access_mode)
1989 {
1990 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1991 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1992 int page_shift = 0;
1993 struct mlx5_ib_mr *mr;
1994 u32 *in;
1995 int err;
1996
1997 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1998 if (!mr)
1999 return ERR_PTR(-ENOMEM);
2000
2001 mr->ibmr.pd = pd;
2002 mr->ibmr.device = pd->device;
2003
2004 in = kzalloc(inlen, GFP_KERNEL);
2005 if (!in) {
2006 err = -ENOMEM;
2007 goto err_free;
2008 }
2009
2010 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
2011 page_shift = PAGE_SHIFT;
2012
2013 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2014 access_mode, in, inlen);
2015 if (err)
2016 goto err_free_in;
2017
2018 mr->umem = NULL;
2019 kfree(in);
2020
2021 return mr;
2022
2023 err_free_in:
2024 kfree(in);
2025 err_free:
2026 kfree(mr);
2027 return ERR_PTR(err);
2028 }
2029
mlx5_alloc_mem_reg_descs(struct ib_pd * pd,struct mlx5_ib_mr * mr,int ndescs,u32 * in,int inlen)2030 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2031 int ndescs, u32 *in, int inlen)
2032 {
2033 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2034 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
2035 inlen);
2036 }
2037
mlx5_alloc_sg_gaps_descs(struct ib_pd * pd,struct mlx5_ib_mr * mr,int ndescs,u32 * in,int inlen)2038 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2039 int ndescs, u32 *in, int inlen)
2040 {
2041 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2042 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2043 }
2044
mlx5_alloc_integrity_descs(struct ib_pd * pd,struct mlx5_ib_mr * mr,int max_num_sg,int max_num_meta_sg,u32 * in,int inlen)2045 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2046 int max_num_sg, int max_num_meta_sg,
2047 u32 *in, int inlen)
2048 {
2049 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2050 u32 psv_index[2];
2051 void *mkc;
2052 int err;
2053
2054 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2055 if (!mr->sig)
2056 return -ENOMEM;
2057
2058 /* create mem & wire PSVs */
2059 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
2060 if (err)
2061 goto err_free_sig;
2062
2063 mr->sig->psv_memory.psv_idx = psv_index[0];
2064 mr->sig->psv_wire.psv_idx = psv_index[1];
2065
2066 mr->sig->sig_status_checked = true;
2067 mr->sig->sig_err_exists = false;
2068 /* Next UMR, Arm SIGERR */
2069 ++mr->sig->sigerr_count;
2070 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2071 sizeof(struct mlx5_klm),
2072 MLX5_MKC_ACCESS_MODE_KLMS);
2073 if (IS_ERR(mr->klm_mr)) {
2074 err = PTR_ERR(mr->klm_mr);
2075 goto err_destroy_psv;
2076 }
2077 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2078 sizeof(struct mlx5_mtt),
2079 MLX5_MKC_ACCESS_MODE_MTT);
2080 if (IS_ERR(mr->mtt_mr)) {
2081 err = PTR_ERR(mr->mtt_mr);
2082 goto err_free_klm_mr;
2083 }
2084
2085 /* Set bsf descriptors for mkey */
2086 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2087 MLX5_SET(mkc, mkc, bsf_en, 1);
2088 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
2089
2090 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2091 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2092 if (err)
2093 goto err_free_mtt_mr;
2094
2095 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2096 mr->sig, GFP_KERNEL));
2097 if (err)
2098 goto err_free_descs;
2099 return 0;
2100
2101 err_free_descs:
2102 destroy_mkey(dev, mr);
2103 mlx5_free_priv_descs(mr);
2104 err_free_mtt_mr:
2105 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2106 mr->mtt_mr = NULL;
2107 err_free_klm_mr:
2108 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2109 mr->klm_mr = NULL;
2110 err_destroy_psv:
2111 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2112 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2113 mr->sig->psv_memory.psv_idx);
2114 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2115 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2116 mr->sig->psv_wire.psv_idx);
2117 err_free_sig:
2118 kfree(mr->sig);
2119
2120 return err;
2121 }
2122
__mlx5_ib_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg,u32 max_num_meta_sg)2123 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
2124 enum ib_mr_type mr_type, u32 max_num_sg,
2125 u32 max_num_meta_sg)
2126 {
2127 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2128 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2129 int ndescs = ALIGN(max_num_sg, 4);
2130 struct mlx5_ib_mr *mr;
2131 u32 *in;
2132 int err;
2133
2134 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2135 if (!mr)
2136 return ERR_PTR(-ENOMEM);
2137
2138 in = kzalloc(inlen, GFP_KERNEL);
2139 if (!in) {
2140 err = -ENOMEM;
2141 goto err_free;
2142 }
2143
2144 mr->ibmr.device = pd->device;
2145 mr->umem = NULL;
2146
2147 switch (mr_type) {
2148 case IB_MR_TYPE_MEM_REG:
2149 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2150 break;
2151 case IB_MR_TYPE_SG_GAPS:
2152 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2153 break;
2154 case IB_MR_TYPE_INTEGRITY:
2155 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2156 max_num_meta_sg, in, inlen);
2157 break;
2158 default:
2159 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2160 err = -EINVAL;
2161 }
2162
2163 if (err)
2164 goto err_free_in;
2165
2166 kfree(in);
2167
2168 return &mr->ibmr;
2169
2170 err_free_in:
2171 kfree(in);
2172 err_free:
2173 kfree(mr);
2174 return ERR_PTR(err);
2175 }
2176
mlx5_ib_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)2177 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2178 u32 max_num_sg)
2179 {
2180 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
2181 }
2182
mlx5_ib_alloc_mr_integrity(struct ib_pd * pd,u32 max_num_sg,u32 max_num_meta_sg)2183 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
2184 u32 max_num_sg, u32 max_num_meta_sg)
2185 {
2186 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
2187 max_num_meta_sg);
2188 }
2189
mlx5_ib_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)2190 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2191 {
2192 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
2193 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2194 struct mlx5_ib_mw *mw = to_mmw(ibmw);
2195 unsigned int ndescs;
2196 u32 *in = NULL;
2197 void *mkc;
2198 int err;
2199 struct mlx5_ib_alloc_mw req = {};
2200 struct {
2201 __u32 comp_mask;
2202 __u32 response_length;
2203 } resp = {};
2204
2205 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2206 if (err)
2207 return err;
2208
2209 if (req.comp_mask || req.reserved1 || req.reserved2)
2210 return -EOPNOTSUPP;
2211
2212 if (udata->inlen > sizeof(req) &&
2213 !ib_is_udata_cleared(udata, sizeof(req),
2214 udata->inlen - sizeof(req)))
2215 return -EOPNOTSUPP;
2216
2217 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2218
2219 in = kzalloc(inlen, GFP_KERNEL);
2220 if (!in)
2221 return -ENOMEM;
2222
2223 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2224
2225 MLX5_SET(mkc, mkc, free, 1);
2226 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2227 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2228 MLX5_SET(mkc, mkc, umr_en, 1);
2229 MLX5_SET(mkc, mkc, lr, 1);
2230 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2231 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2232 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2233
2234 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2235 if (err)
2236 goto free;
2237
2238 mw->mmkey.type = MLX5_MKEY_MW;
2239 ibmw->rkey = mw->mmkey.key;
2240 mw->mmkey.ndescs = ndescs;
2241
2242 resp.response_length =
2243 min(offsetofend(typeof(resp), response_length), udata->outlen);
2244 if (resp.response_length) {
2245 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2246 if (err)
2247 goto free_mkey;
2248 }
2249
2250 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2251 err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2252 if (err)
2253 goto free_mkey;
2254 }
2255
2256 kfree(in);
2257 return 0;
2258
2259 free_mkey:
2260 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
2261 free:
2262 kfree(in);
2263 return err;
2264 }
2265
mlx5_ib_dealloc_mw(struct ib_mw * mw)2266 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2267 {
2268 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2269 struct mlx5_ib_mw *mmw = to_mmw(mw);
2270
2271 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
2272 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
2273 /*
2274 * pagefault_single_data_segment() may be accessing mmw
2275 * if the user bound an ODP MR to this MW.
2276 */
2277 mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
2278
2279 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
2280 }
2281
mlx5_ib_check_mr_status(struct ib_mr * ibmr,u32 check_mask,struct ib_mr_status * mr_status)2282 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2283 struct ib_mr_status *mr_status)
2284 {
2285 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2286 int ret = 0;
2287
2288 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2289 pr_err("Invalid status check mask\n");
2290 ret = -EINVAL;
2291 goto done;
2292 }
2293
2294 mr_status->fail_status = 0;
2295 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2296 if (!mmr->sig) {
2297 ret = -EINVAL;
2298 pr_err("signature status check requested on a non-signature enabled MR\n");
2299 goto done;
2300 }
2301
2302 mmr->sig->sig_status_checked = true;
2303 if (!mmr->sig->sig_err_exists)
2304 goto done;
2305
2306 if (ibmr->lkey == mmr->sig->err_item.key)
2307 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2308 sizeof(mr_status->sig_err));
2309 else {
2310 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2311 mr_status->sig_err.sig_err_offset = 0;
2312 mr_status->sig_err.key = mmr->sig->err_item.key;
2313 }
2314
2315 mmr->sig->sig_err_exists = false;
2316 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2317 }
2318
2319 done:
2320 return ret;
2321 }
2322
2323 static int
mlx5_ib_map_pa_mr_sg_pi(struct ib_mr * ibmr,struct scatterlist * data_sg,int data_sg_nents,unsigned int * data_sg_offset,struct scatterlist * meta_sg,int meta_sg_nents,unsigned int * meta_sg_offset)2324 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2325 int data_sg_nents, unsigned int *data_sg_offset,
2326 struct scatterlist *meta_sg, int meta_sg_nents,
2327 unsigned int *meta_sg_offset)
2328 {
2329 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2330 unsigned int sg_offset = 0;
2331 int n = 0;
2332
2333 mr->meta_length = 0;
2334 if (data_sg_nents == 1) {
2335 n++;
2336 mr->mmkey.ndescs = 1;
2337 if (data_sg_offset)
2338 sg_offset = *data_sg_offset;
2339 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2340 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2341 if (meta_sg_nents == 1) {
2342 n++;
2343 mr->meta_ndescs = 1;
2344 if (meta_sg_offset)
2345 sg_offset = *meta_sg_offset;
2346 else
2347 sg_offset = 0;
2348 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2349 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2350 }
2351 ibmr->length = mr->data_length + mr->meta_length;
2352 }
2353
2354 return n;
2355 }
2356
2357 static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr * mr,struct scatterlist * sgl,unsigned short sg_nents,unsigned int * sg_offset_p,struct scatterlist * meta_sgl,unsigned short meta_sg_nents,unsigned int * meta_sg_offset_p)2358 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2359 struct scatterlist *sgl,
2360 unsigned short sg_nents,
2361 unsigned int *sg_offset_p,
2362 struct scatterlist *meta_sgl,
2363 unsigned short meta_sg_nents,
2364 unsigned int *meta_sg_offset_p)
2365 {
2366 struct scatterlist *sg = sgl;
2367 struct mlx5_klm *klms = mr->descs;
2368 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2369 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2370 int i, j = 0;
2371
2372 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2373 mr->ibmr.length = 0;
2374
2375 for_each_sg(sgl, sg, sg_nents, i) {
2376 if (unlikely(i >= mr->max_descs))
2377 break;
2378 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2379 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2380 klms[i].key = cpu_to_be32(lkey);
2381 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2382
2383 sg_offset = 0;
2384 }
2385
2386 if (sg_offset_p)
2387 *sg_offset_p = sg_offset;
2388
2389 mr->mmkey.ndescs = i;
2390 mr->data_length = mr->ibmr.length;
2391
2392 if (meta_sg_nents) {
2393 sg = meta_sgl;
2394 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2395 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2396 if (unlikely(i + j >= mr->max_descs))
2397 break;
2398 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2399 sg_offset);
2400 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2401 sg_offset);
2402 klms[i + j].key = cpu_to_be32(lkey);
2403 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2404
2405 sg_offset = 0;
2406 }
2407 if (meta_sg_offset_p)
2408 *meta_sg_offset_p = sg_offset;
2409
2410 mr->meta_ndescs = j;
2411 mr->meta_length = mr->ibmr.length - mr->data_length;
2412 }
2413
2414 return i + j;
2415 }
2416
mlx5_set_page(struct ib_mr * ibmr,u64 addr)2417 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2418 {
2419 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2420 __be64 *descs;
2421
2422 if (unlikely(mr->mmkey.ndescs == mr->max_descs))
2423 return -ENOMEM;
2424
2425 descs = mr->descs;
2426 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2427
2428 return 0;
2429 }
2430
mlx5_set_page_pi(struct ib_mr * ibmr,u64 addr)2431 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2432 {
2433 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2434 __be64 *descs;
2435
2436 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
2437 return -ENOMEM;
2438
2439 descs = mr->descs;
2440 descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
2441 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2442
2443 return 0;
2444 }
2445
2446 static int
mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr * ibmr,struct scatterlist * data_sg,int data_sg_nents,unsigned int * data_sg_offset,struct scatterlist * meta_sg,int meta_sg_nents,unsigned int * meta_sg_offset)2447 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2448 int data_sg_nents, unsigned int *data_sg_offset,
2449 struct scatterlist *meta_sg, int meta_sg_nents,
2450 unsigned int *meta_sg_offset)
2451 {
2452 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2453 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2454 int n;
2455
2456 pi_mr->mmkey.ndescs = 0;
2457 pi_mr->meta_ndescs = 0;
2458 pi_mr->meta_length = 0;
2459
2460 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2461 pi_mr->desc_size * pi_mr->max_descs,
2462 DMA_TO_DEVICE);
2463
2464 pi_mr->ibmr.page_size = ibmr->page_size;
2465 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2466 mlx5_set_page);
2467 if (n != data_sg_nents)
2468 return n;
2469
2470 pi_mr->data_iova = pi_mr->ibmr.iova;
2471 pi_mr->data_length = pi_mr->ibmr.length;
2472 pi_mr->ibmr.length = pi_mr->data_length;
2473 ibmr->length = pi_mr->data_length;
2474
2475 if (meta_sg_nents) {
2476 u64 page_mask = ~((u64)ibmr->page_size - 1);
2477 u64 iova = pi_mr->data_iova;
2478
2479 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2480 meta_sg_offset, mlx5_set_page_pi);
2481
2482 pi_mr->meta_length = pi_mr->ibmr.length;
2483 /*
2484 * PI address for the HW is the offset of the metadata address
2485 * relative to the first data page address.
2486 * It equals to first data page address + size of data pages +
2487 * metadata offset at the first metadata page
2488 */
2489 pi_mr->pi_iova = (iova & page_mask) +
2490 pi_mr->mmkey.ndescs * ibmr->page_size +
2491 (pi_mr->ibmr.iova & ~page_mask);
2492 /*
2493 * In order to use one MTT MR for data and metadata, we register
2494 * also the gaps between the end of the data and the start of
2495 * the metadata (the sig MR will verify that the HW will access
2496 * to right addresses). This mapping is safe because we use
2497 * internal mkey for the registration.
2498 */
2499 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2500 pi_mr->ibmr.iova = iova;
2501 ibmr->length += pi_mr->meta_length;
2502 }
2503
2504 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2505 pi_mr->desc_size * pi_mr->max_descs,
2506 DMA_TO_DEVICE);
2507
2508 return n;
2509 }
2510
2511 static int
mlx5_ib_map_klm_mr_sg_pi(struct ib_mr * ibmr,struct scatterlist * data_sg,int data_sg_nents,unsigned int * data_sg_offset,struct scatterlist * meta_sg,int meta_sg_nents,unsigned int * meta_sg_offset)2512 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2513 int data_sg_nents, unsigned int *data_sg_offset,
2514 struct scatterlist *meta_sg, int meta_sg_nents,
2515 unsigned int *meta_sg_offset)
2516 {
2517 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2518 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2519 int n;
2520
2521 pi_mr->mmkey.ndescs = 0;
2522 pi_mr->meta_ndescs = 0;
2523 pi_mr->meta_length = 0;
2524
2525 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2526 pi_mr->desc_size * pi_mr->max_descs,
2527 DMA_TO_DEVICE);
2528
2529 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2530 meta_sg, meta_sg_nents, meta_sg_offset);
2531
2532 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2533 pi_mr->desc_size * pi_mr->max_descs,
2534 DMA_TO_DEVICE);
2535
2536 /* This is zero-based memory region */
2537 pi_mr->data_iova = 0;
2538 pi_mr->ibmr.iova = 0;
2539 pi_mr->pi_iova = pi_mr->data_length;
2540 ibmr->length = pi_mr->ibmr.length;
2541
2542 return n;
2543 }
2544
mlx5_ib_map_mr_sg_pi(struct ib_mr * ibmr,struct scatterlist * data_sg,int data_sg_nents,unsigned int * data_sg_offset,struct scatterlist * meta_sg,int meta_sg_nents,unsigned int * meta_sg_offset)2545 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2546 int data_sg_nents, unsigned int *data_sg_offset,
2547 struct scatterlist *meta_sg, int meta_sg_nents,
2548 unsigned int *meta_sg_offset)
2549 {
2550 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2551 struct mlx5_ib_mr *pi_mr = NULL;
2552 int n;
2553
2554 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2555
2556 mr->mmkey.ndescs = 0;
2557 mr->data_length = 0;
2558 mr->data_iova = 0;
2559 mr->meta_ndescs = 0;
2560 mr->pi_iova = 0;
2561 /*
2562 * As a performance optimization, if possible, there is no need to
2563 * perform UMR operation to register the data/metadata buffers.
2564 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2565 * Fallback to UMR only in case of a failure.
2566 */
2567 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2568 data_sg_offset, meta_sg, meta_sg_nents,
2569 meta_sg_offset);
2570 if (n == data_sg_nents + meta_sg_nents)
2571 goto out;
2572 /*
2573 * As a performance optimization, if possible, there is no need to map
2574 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2575 * descriptors and fallback to KLM only in case of a failure.
2576 * It's more efficient for the HW to work with MTT descriptors
2577 * (especially in high load).
2578 * Use KLM (indirect access) only if it's mandatory.
2579 */
2580 pi_mr = mr->mtt_mr;
2581 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2582 data_sg_offset, meta_sg, meta_sg_nents,
2583 meta_sg_offset);
2584 if (n == data_sg_nents + meta_sg_nents)
2585 goto out;
2586
2587 pi_mr = mr->klm_mr;
2588 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2589 data_sg_offset, meta_sg, meta_sg_nents,
2590 meta_sg_offset);
2591 if (unlikely(n != data_sg_nents + meta_sg_nents))
2592 return -ENOMEM;
2593
2594 out:
2595 /* This is zero-based memory region */
2596 ibmr->iova = 0;
2597 mr->pi_mr = pi_mr;
2598 if (pi_mr)
2599 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2600 else
2601 ibmr->sig_attrs->meta_length = mr->meta_length;
2602
2603 return 0;
2604 }
2605
mlx5_ib_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)2606 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2607 unsigned int *sg_offset)
2608 {
2609 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2610 int n;
2611
2612 mr->mmkey.ndescs = 0;
2613
2614 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2615 mr->desc_size * mr->max_descs,
2616 DMA_TO_DEVICE);
2617
2618 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2619 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2620 NULL);
2621 else
2622 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2623 mlx5_set_page);
2624
2625 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2626 mr->desc_size * mr->max_descs,
2627 DMA_TO_DEVICE);
2628
2629 return n;
2630 }
2631