xref: /openbmc/linux/drivers/infiniband/hw/mlx5/mr.c (revision ca48739e)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2020, Intel Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 
35 #include <linux/kref.h>
36 #include <linux/random.h>
37 #include <linux/debugfs.h>
38 #include <linux/export.h>
39 #include <linux/delay.h>
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_umem_odp.h>
44 #include <rdma/ib_verbs.h>
45 #include "mlx5_ib.h"
46 
47 /*
48  * We can't use an array for xlt_emergency_page because dma_map_single doesn't
49  * work on kernel modules memory
50  */
51 void *xlt_emergency_page;
52 static DEFINE_MUTEX(xlt_emergency_page_mutex);
53 
54 enum {
55 	MAX_PENDING_REG_MR = 8,
56 };
57 
58 #define MLX5_UMR_ALIGN 2048
59 
60 static void
61 create_mkey_callback(int status, struct mlx5_async_work *context);
62 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
63 				     u64 iova, int access_flags,
64 				     unsigned int page_size, bool populate);
65 
66 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
67 					  struct ib_pd *pd)
68 {
69 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
70 
71 	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
72 	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
73 	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
74 	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
75 	MLX5_SET(mkc, mkc, lr, 1);
76 
77 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
78 		MLX5_SET(mkc, mkc, relaxed_ordering_write,
79 			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
80 	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
81 		MLX5_SET(mkc, mkc, relaxed_ordering_read,
82 			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
83 
84 	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
85 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
86 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
87 }
88 
89 static void
90 assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
91 		    u32 *in)
92 {
93 	u8 key = atomic_inc_return(&dev->mkey_var);
94 	void *mkc;
95 
96 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
97 	MLX5_SET(mkc, mkc, mkey_7_0, key);
98 	mkey->key = key;
99 }
100 
101 static int
102 mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
103 		    u32 *in, int inlen)
104 {
105 	assign_mkey_variant(dev, mkey, in);
106 	return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
107 }
108 
109 static int
110 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
111 		       struct mlx5_core_mkey *mkey,
112 		       struct mlx5_async_ctx *async_ctx,
113 		       u32 *in, int inlen, u32 *out, int outlen,
114 		       struct mlx5_async_work *context)
115 {
116 	MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
117 	assign_mkey_variant(dev, mkey, in);
118 	return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
119 				create_mkey_callback, context);
120 }
121 
122 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
123 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
124 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
125 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
126 
127 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
128 {
129 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
130 }
131 
132 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
133 {
134 	WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
135 
136 	return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
137 }
138 
139 static void create_mkey_callback(int status, struct mlx5_async_work *context)
140 {
141 	struct mlx5_ib_mr *mr =
142 		container_of(context, struct mlx5_ib_mr, cb_work);
143 	struct mlx5_cache_ent *ent = mr->cache_ent;
144 	struct mlx5_ib_dev *dev = ent->dev;
145 	unsigned long flags;
146 
147 	if (status) {
148 		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
149 		kfree(mr);
150 		spin_lock_irqsave(&ent->lock, flags);
151 		ent->pending--;
152 		WRITE_ONCE(dev->fill_delay, 1);
153 		spin_unlock_irqrestore(&ent->lock, flags);
154 		mod_timer(&dev->delay_timer, jiffies + HZ);
155 		return;
156 	}
157 
158 	mr->mmkey.type = MLX5_MKEY_MR;
159 	mr->mmkey.key |= mlx5_idx_to_mkey(
160 		MLX5_GET(create_mkey_out, mr->out, mkey_index));
161 	init_waitqueue_head(&mr->mmkey.wait);
162 
163 	WRITE_ONCE(dev->cache.last_add, jiffies);
164 
165 	spin_lock_irqsave(&ent->lock, flags);
166 	list_add_tail(&mr->list, &ent->head);
167 	ent->available_mrs++;
168 	ent->total_mrs++;
169 	/* If we are doing fill_to_high_water then keep going. */
170 	queue_adjust_cache_locked(ent);
171 	ent->pending--;
172 	spin_unlock_irqrestore(&ent->lock, flags);
173 }
174 
175 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
176 {
177 	struct mlx5_ib_mr *mr;
178 
179 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
180 	if (!mr)
181 		return NULL;
182 	mr->cache_ent = ent;
183 
184 	set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
185 	MLX5_SET(mkc, mkc, free, 1);
186 	MLX5_SET(mkc, mkc, umr_en, 1);
187 	MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
188 	MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
189 
190 	MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
191 	MLX5_SET(mkc, mkc, log_page_size, ent->page);
192 	return mr;
193 }
194 
195 /* Asynchronously schedule new MRs to be populated in the cache. */
196 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
197 {
198 	size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
199 	struct mlx5_ib_mr *mr;
200 	void *mkc;
201 	u32 *in;
202 	int err = 0;
203 	int i;
204 
205 	in = kzalloc(inlen, GFP_KERNEL);
206 	if (!in)
207 		return -ENOMEM;
208 
209 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
210 	for (i = 0; i < num; i++) {
211 		mr = alloc_cache_mr(ent, mkc);
212 		if (!mr) {
213 			err = -ENOMEM;
214 			break;
215 		}
216 		spin_lock_irq(&ent->lock);
217 		if (ent->pending >= MAX_PENDING_REG_MR) {
218 			err = -EAGAIN;
219 			spin_unlock_irq(&ent->lock);
220 			kfree(mr);
221 			break;
222 		}
223 		ent->pending++;
224 		spin_unlock_irq(&ent->lock);
225 		err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
226 					     &ent->dev->async_ctx, in, inlen,
227 					     mr->out, sizeof(mr->out),
228 					     &mr->cb_work);
229 		if (err) {
230 			spin_lock_irq(&ent->lock);
231 			ent->pending--;
232 			spin_unlock_irq(&ent->lock);
233 			mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
234 			kfree(mr);
235 			break;
236 		}
237 	}
238 
239 	kfree(in);
240 	return err;
241 }
242 
243 /* Synchronously create a MR in the cache */
244 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
245 {
246 	size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
247 	struct mlx5_ib_mr *mr;
248 	void *mkc;
249 	u32 *in;
250 	int err;
251 
252 	in = kzalloc(inlen, GFP_KERNEL);
253 	if (!in)
254 		return ERR_PTR(-ENOMEM);
255 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
256 
257 	mr = alloc_cache_mr(ent, mkc);
258 	if (!mr) {
259 		err = -ENOMEM;
260 		goto free_in;
261 	}
262 
263 	err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
264 	if (err)
265 		goto free_mr;
266 
267 	mr->mmkey.type = MLX5_MKEY_MR;
268 	WRITE_ONCE(ent->dev->cache.last_add, jiffies);
269 	spin_lock_irq(&ent->lock);
270 	ent->total_mrs++;
271 	spin_unlock_irq(&ent->lock);
272 	kfree(in);
273 	return mr;
274 free_mr:
275 	kfree(mr);
276 free_in:
277 	kfree(in);
278 	return ERR_PTR(err);
279 }
280 
281 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
282 {
283 	struct mlx5_ib_mr *mr;
284 
285 	lockdep_assert_held(&ent->lock);
286 	if (list_empty(&ent->head))
287 		return;
288 	mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
289 	list_del(&mr->list);
290 	ent->available_mrs--;
291 	ent->total_mrs--;
292 	spin_unlock_irq(&ent->lock);
293 	mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
294 	kfree(mr);
295 	spin_lock_irq(&ent->lock);
296 }
297 
298 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
299 				bool limit_fill)
300 {
301 	int err;
302 
303 	lockdep_assert_held(&ent->lock);
304 
305 	while (true) {
306 		if (limit_fill)
307 			target = ent->limit * 2;
308 		if (target == ent->available_mrs + ent->pending)
309 			return 0;
310 		if (target > ent->available_mrs + ent->pending) {
311 			u32 todo = target - (ent->available_mrs + ent->pending);
312 
313 			spin_unlock_irq(&ent->lock);
314 			err = add_keys(ent, todo);
315 			if (err == -EAGAIN)
316 				usleep_range(3000, 5000);
317 			spin_lock_irq(&ent->lock);
318 			if (err) {
319 				if (err != -EAGAIN)
320 					return err;
321 			} else
322 				return 0;
323 		} else {
324 			remove_cache_mr_locked(ent);
325 		}
326 	}
327 }
328 
329 static ssize_t size_write(struct file *filp, const char __user *buf,
330 			  size_t count, loff_t *pos)
331 {
332 	struct mlx5_cache_ent *ent = filp->private_data;
333 	u32 target;
334 	int err;
335 
336 	err = kstrtou32_from_user(buf, count, 0, &target);
337 	if (err)
338 		return err;
339 
340 	/*
341 	 * Target is the new value of total_mrs the user requests, however we
342 	 * cannot free MRs that are in use. Compute the target value for
343 	 * available_mrs.
344 	 */
345 	spin_lock_irq(&ent->lock);
346 	if (target < ent->total_mrs - ent->available_mrs) {
347 		err = -EINVAL;
348 		goto err_unlock;
349 	}
350 	target = target - (ent->total_mrs - ent->available_mrs);
351 	if (target < ent->limit || target > ent->limit*2) {
352 		err = -EINVAL;
353 		goto err_unlock;
354 	}
355 	err = resize_available_mrs(ent, target, false);
356 	if (err)
357 		goto err_unlock;
358 	spin_unlock_irq(&ent->lock);
359 
360 	return count;
361 
362 err_unlock:
363 	spin_unlock_irq(&ent->lock);
364 	return err;
365 }
366 
367 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
368 			 loff_t *pos)
369 {
370 	struct mlx5_cache_ent *ent = filp->private_data;
371 	char lbuf[20];
372 	int err;
373 
374 	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
375 	if (err < 0)
376 		return err;
377 
378 	return simple_read_from_buffer(buf, count, pos, lbuf, err);
379 }
380 
381 static const struct file_operations size_fops = {
382 	.owner	= THIS_MODULE,
383 	.open	= simple_open,
384 	.write	= size_write,
385 	.read	= size_read,
386 };
387 
388 static ssize_t limit_write(struct file *filp, const char __user *buf,
389 			   size_t count, loff_t *pos)
390 {
391 	struct mlx5_cache_ent *ent = filp->private_data;
392 	u32 var;
393 	int err;
394 
395 	err = kstrtou32_from_user(buf, count, 0, &var);
396 	if (err)
397 		return err;
398 
399 	/*
400 	 * Upon set we immediately fill the cache to high water mark implied by
401 	 * the limit.
402 	 */
403 	spin_lock_irq(&ent->lock);
404 	ent->limit = var;
405 	err = resize_available_mrs(ent, 0, true);
406 	spin_unlock_irq(&ent->lock);
407 	if (err)
408 		return err;
409 	return count;
410 }
411 
412 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
413 			  loff_t *pos)
414 {
415 	struct mlx5_cache_ent *ent = filp->private_data;
416 	char lbuf[20];
417 	int err;
418 
419 	err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
420 	if (err < 0)
421 		return err;
422 
423 	return simple_read_from_buffer(buf, count, pos, lbuf, err);
424 }
425 
426 static const struct file_operations limit_fops = {
427 	.owner	= THIS_MODULE,
428 	.open	= simple_open,
429 	.write	= limit_write,
430 	.read	= limit_read,
431 };
432 
433 static bool someone_adding(struct mlx5_mr_cache *cache)
434 {
435 	unsigned int i;
436 
437 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
438 		struct mlx5_cache_ent *ent = &cache->ent[i];
439 		bool ret;
440 
441 		spin_lock_irq(&ent->lock);
442 		ret = ent->available_mrs < ent->limit;
443 		spin_unlock_irq(&ent->lock);
444 		if (ret)
445 			return true;
446 	}
447 	return false;
448 }
449 
450 /*
451  * Check if the bucket is outside the high/low water mark and schedule an async
452  * update. The cache refill has hysteresis, once the low water mark is hit it is
453  * refilled up to the high mark.
454  */
455 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
456 {
457 	lockdep_assert_held(&ent->lock);
458 
459 	if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
460 		return;
461 	if (ent->available_mrs < ent->limit) {
462 		ent->fill_to_high_water = true;
463 		queue_work(ent->dev->cache.wq, &ent->work);
464 	} else if (ent->fill_to_high_water &&
465 		   ent->available_mrs + ent->pending < 2 * ent->limit) {
466 		/*
467 		 * Once we start populating due to hitting a low water mark
468 		 * continue until we pass the high water mark.
469 		 */
470 		queue_work(ent->dev->cache.wq, &ent->work);
471 	} else if (ent->available_mrs == 2 * ent->limit) {
472 		ent->fill_to_high_water = false;
473 	} else if (ent->available_mrs > 2 * ent->limit) {
474 		/* Queue deletion of excess entries */
475 		ent->fill_to_high_water = false;
476 		if (ent->pending)
477 			queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
478 					   msecs_to_jiffies(1000));
479 		else
480 			queue_work(ent->dev->cache.wq, &ent->work);
481 	}
482 }
483 
484 static void __cache_work_func(struct mlx5_cache_ent *ent)
485 {
486 	struct mlx5_ib_dev *dev = ent->dev;
487 	struct mlx5_mr_cache *cache = &dev->cache;
488 	int err;
489 
490 	spin_lock_irq(&ent->lock);
491 	if (ent->disabled)
492 		goto out;
493 
494 	if (ent->fill_to_high_water &&
495 	    ent->available_mrs + ent->pending < 2 * ent->limit &&
496 	    !READ_ONCE(dev->fill_delay)) {
497 		spin_unlock_irq(&ent->lock);
498 		err = add_keys(ent, 1);
499 		spin_lock_irq(&ent->lock);
500 		if (ent->disabled)
501 			goto out;
502 		if (err) {
503 			/*
504 			 * EAGAIN only happens if pending is positive, so we
505 			 * will be rescheduled from reg_mr_callback(). The only
506 			 * failure path here is ENOMEM.
507 			 */
508 			if (err != -EAGAIN) {
509 				mlx5_ib_warn(
510 					dev,
511 					"command failed order %d, err %d\n",
512 					ent->order, err);
513 				queue_delayed_work(cache->wq, &ent->dwork,
514 						   msecs_to_jiffies(1000));
515 			}
516 		}
517 	} else if (ent->available_mrs > 2 * ent->limit) {
518 		bool need_delay;
519 
520 		/*
521 		 * The remove_cache_mr() logic is performed as garbage
522 		 * collection task. Such task is intended to be run when no
523 		 * other active processes are running.
524 		 *
525 		 * The need_resched() will return TRUE if there are user tasks
526 		 * to be activated in near future.
527 		 *
528 		 * In such case, we don't execute remove_cache_mr() and postpone
529 		 * the garbage collection work to try to run in next cycle, in
530 		 * order to free CPU resources to other tasks.
531 		 */
532 		spin_unlock_irq(&ent->lock);
533 		need_delay = need_resched() || someone_adding(cache) ||
534 			     time_after(jiffies,
535 					READ_ONCE(cache->last_add) + 300 * HZ);
536 		spin_lock_irq(&ent->lock);
537 		if (ent->disabled)
538 			goto out;
539 		if (need_delay)
540 			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
541 		remove_cache_mr_locked(ent);
542 		queue_adjust_cache_locked(ent);
543 	}
544 out:
545 	spin_unlock_irq(&ent->lock);
546 }
547 
548 static void delayed_cache_work_func(struct work_struct *work)
549 {
550 	struct mlx5_cache_ent *ent;
551 
552 	ent = container_of(work, struct mlx5_cache_ent, dwork.work);
553 	__cache_work_func(ent);
554 }
555 
556 static void cache_work_func(struct work_struct *work)
557 {
558 	struct mlx5_cache_ent *ent;
559 
560 	ent = container_of(work, struct mlx5_cache_ent, work);
561 	__cache_work_func(ent);
562 }
563 
564 /* Allocate a special entry from the cache */
565 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
566 				       unsigned int entry, int access_flags)
567 {
568 	struct mlx5_mr_cache *cache = &dev->cache;
569 	struct mlx5_cache_ent *ent;
570 	struct mlx5_ib_mr *mr;
571 
572 	if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
573 		    entry >= ARRAY_SIZE(cache->ent)))
574 		return ERR_PTR(-EINVAL);
575 
576 	/* Matches access in alloc_cache_mr() */
577 	if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
578 		return ERR_PTR(-EOPNOTSUPP);
579 
580 	ent = &cache->ent[entry];
581 	spin_lock_irq(&ent->lock);
582 	if (list_empty(&ent->head)) {
583 		spin_unlock_irq(&ent->lock);
584 		mr = create_cache_mr(ent);
585 		if (IS_ERR(mr))
586 			return mr;
587 	} else {
588 		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
589 		list_del(&mr->list);
590 		ent->available_mrs--;
591 		queue_adjust_cache_locked(ent);
592 		spin_unlock_irq(&ent->lock);
593 	}
594 	mr->access_flags = access_flags;
595 	return mr;
596 }
597 
598 /* Return a MR already available in the cache */
599 static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
600 {
601 	struct mlx5_ib_dev *dev = req_ent->dev;
602 	struct mlx5_ib_mr *mr = NULL;
603 	struct mlx5_cache_ent *ent = req_ent;
604 
605 	/* Try larger MR pools from the cache to satisfy the allocation */
606 	for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
607 		mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
608 			    ent - dev->cache.ent);
609 
610 		spin_lock_irq(&ent->lock);
611 		if (!list_empty(&ent->head)) {
612 			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
613 					      list);
614 			list_del(&mr->list);
615 			ent->available_mrs--;
616 			queue_adjust_cache_locked(ent);
617 			spin_unlock_irq(&ent->lock);
618 			break;
619 		}
620 		queue_adjust_cache_locked(ent);
621 		spin_unlock_irq(&ent->lock);
622 	}
623 
624 	if (!mr)
625 		req_ent->miss++;
626 
627 	return mr;
628 }
629 
630 static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
631 {
632 	struct mlx5_cache_ent *ent = mr->cache_ent;
633 
634 	mr->cache_ent = NULL;
635 	spin_lock_irq(&ent->lock);
636 	ent->total_mrs--;
637 	spin_unlock_irq(&ent->lock);
638 }
639 
640 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
641 {
642 	struct mlx5_cache_ent *ent = mr->cache_ent;
643 
644 	if (!ent)
645 		return;
646 
647 	if (mlx5_mr_cache_invalidate(mr)) {
648 		detach_mr_from_cache(mr);
649 		destroy_mkey(dev, mr);
650 		kfree(mr);
651 		return;
652 	}
653 
654 	spin_lock_irq(&ent->lock);
655 	list_add_tail(&mr->list, &ent->head);
656 	ent->available_mrs++;
657 	queue_adjust_cache_locked(ent);
658 	spin_unlock_irq(&ent->lock);
659 }
660 
661 static void clean_keys(struct mlx5_ib_dev *dev, int c)
662 {
663 	struct mlx5_mr_cache *cache = &dev->cache;
664 	struct mlx5_cache_ent *ent = &cache->ent[c];
665 	struct mlx5_ib_mr *tmp_mr;
666 	struct mlx5_ib_mr *mr;
667 	LIST_HEAD(del_list);
668 
669 	cancel_delayed_work(&ent->dwork);
670 	while (1) {
671 		spin_lock_irq(&ent->lock);
672 		if (list_empty(&ent->head)) {
673 			spin_unlock_irq(&ent->lock);
674 			break;
675 		}
676 		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
677 		list_move(&mr->list, &del_list);
678 		ent->available_mrs--;
679 		ent->total_mrs--;
680 		spin_unlock_irq(&ent->lock);
681 		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
682 	}
683 
684 	list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
685 		list_del(&mr->list);
686 		kfree(mr);
687 	}
688 }
689 
690 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
691 {
692 	if (!mlx5_debugfs_root || dev->is_rep)
693 		return;
694 
695 	debugfs_remove_recursive(dev->cache.root);
696 	dev->cache.root = NULL;
697 }
698 
699 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
700 {
701 	struct mlx5_mr_cache *cache = &dev->cache;
702 	struct mlx5_cache_ent *ent;
703 	struct dentry *dir;
704 	int i;
705 
706 	if (!mlx5_debugfs_root || dev->is_rep)
707 		return;
708 
709 	cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
710 
711 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
712 		ent = &cache->ent[i];
713 		sprintf(ent->name, "%d", ent->order);
714 		dir = debugfs_create_dir(ent->name, cache->root);
715 		debugfs_create_file("size", 0600, dir, ent, &size_fops);
716 		debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
717 		debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
718 		debugfs_create_u32("miss", 0600, dir, &ent->miss);
719 	}
720 }
721 
722 static void delay_time_func(struct timer_list *t)
723 {
724 	struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
725 
726 	WRITE_ONCE(dev->fill_delay, 0);
727 }
728 
729 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
730 {
731 	struct mlx5_mr_cache *cache = &dev->cache;
732 	struct mlx5_cache_ent *ent;
733 	int i;
734 
735 	mutex_init(&dev->slow_path_mutex);
736 	cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
737 	if (!cache->wq) {
738 		mlx5_ib_warn(dev, "failed to create work queue\n");
739 		return -ENOMEM;
740 	}
741 
742 	mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
743 	timer_setup(&dev->delay_timer, delay_time_func, 0);
744 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
745 		ent = &cache->ent[i];
746 		INIT_LIST_HEAD(&ent->head);
747 		spin_lock_init(&ent->lock);
748 		ent->order = i + 2;
749 		ent->dev = dev;
750 		ent->limit = 0;
751 
752 		INIT_WORK(&ent->work, cache_work_func);
753 		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
754 
755 		if (i > MR_CACHE_LAST_STD_ENTRY) {
756 			mlx5_odp_init_mr_cache_entry(ent);
757 			continue;
758 		}
759 
760 		if (ent->order > mr_cache_max_order(dev))
761 			continue;
762 
763 		ent->page = PAGE_SHIFT;
764 		ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
765 			   MLX5_IB_UMR_OCTOWORD;
766 		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
767 		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
768 		    !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
769 		    mlx5_ib_can_load_pas_with_umr(dev, 0))
770 			ent->limit = dev->mdev->profile->mr_cache[i].limit;
771 		else
772 			ent->limit = 0;
773 		spin_lock_irq(&ent->lock);
774 		queue_adjust_cache_locked(ent);
775 		spin_unlock_irq(&ent->lock);
776 	}
777 
778 	mlx5_mr_cache_debugfs_init(dev);
779 
780 	return 0;
781 }
782 
783 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
784 {
785 	unsigned int i;
786 
787 	if (!dev->cache.wq)
788 		return 0;
789 
790 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
791 		struct mlx5_cache_ent *ent = &dev->cache.ent[i];
792 
793 		spin_lock_irq(&ent->lock);
794 		ent->disabled = true;
795 		spin_unlock_irq(&ent->lock);
796 		cancel_work_sync(&ent->work);
797 		cancel_delayed_work_sync(&ent->dwork);
798 	}
799 
800 	mlx5_mr_cache_debugfs_cleanup(dev);
801 	mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
802 
803 	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
804 		clean_keys(dev, i);
805 
806 	destroy_workqueue(dev->cache.wq);
807 	del_timer_sync(&dev->delay_timer);
808 
809 	return 0;
810 }
811 
812 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
813 {
814 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
815 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
816 	struct mlx5_ib_mr *mr;
817 	void *mkc;
818 	u32 *in;
819 	int err;
820 
821 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
822 	if (!mr)
823 		return ERR_PTR(-ENOMEM);
824 
825 	in = kzalloc(inlen, GFP_KERNEL);
826 	if (!in) {
827 		err = -ENOMEM;
828 		goto err_free;
829 	}
830 
831 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
832 
833 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
834 	MLX5_SET(mkc, mkc, length64, 1);
835 	set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
836 
837 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
838 	if (err)
839 		goto err_in;
840 
841 	kfree(in);
842 	mr->mmkey.type = MLX5_MKEY_MR;
843 	mr->ibmr.lkey = mr->mmkey.key;
844 	mr->ibmr.rkey = mr->mmkey.key;
845 	mr->umem = NULL;
846 
847 	return &mr->ibmr;
848 
849 err_in:
850 	kfree(in);
851 
852 err_free:
853 	kfree(mr);
854 
855 	return ERR_PTR(err);
856 }
857 
858 static int get_octo_len(u64 addr, u64 len, int page_shift)
859 {
860 	u64 page_size = 1ULL << page_shift;
861 	u64 offset;
862 	int npages;
863 
864 	offset = addr & (page_size - 1);
865 	npages = ALIGN(len + offset, page_size) >> page_shift;
866 	return (npages + 1) / 2;
867 }
868 
869 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
870 {
871 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
872 		return MR_CACHE_LAST_STD_ENTRY + 2;
873 	return MLX5_MAX_UMR_SHIFT;
874 }
875 
876 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
877 {
878 	struct mlx5_ib_umr_context *context =
879 		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
880 
881 	context->status = wc->status;
882 	complete(&context->done);
883 }
884 
885 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
886 {
887 	context->cqe.done = mlx5_ib_umr_done;
888 	context->status = -1;
889 	init_completion(&context->done);
890 }
891 
892 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
893 				  struct mlx5_umr_wr *umrwr)
894 {
895 	struct umr_common *umrc = &dev->umrc;
896 	const struct ib_send_wr *bad;
897 	int err;
898 	struct mlx5_ib_umr_context umr_context;
899 
900 	mlx5_ib_init_umr_context(&umr_context);
901 	umrwr->wr.wr_cqe = &umr_context.cqe;
902 
903 	down(&umrc->sem);
904 	err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
905 	if (err) {
906 		mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
907 	} else {
908 		wait_for_completion(&umr_context.done);
909 		if (umr_context.status != IB_WC_SUCCESS) {
910 			mlx5_ib_warn(dev, "reg umr failed (%u)\n",
911 				     umr_context.status);
912 			err = -EFAULT;
913 		}
914 	}
915 	up(&umrc->sem);
916 	return err;
917 }
918 
919 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
920 						      unsigned int order)
921 {
922 	struct mlx5_mr_cache *cache = &dev->cache;
923 
924 	if (order < cache->ent[0].order)
925 		return &cache->ent[0];
926 	order = order - cache->ent[0].order;
927 	if (order > MR_CACHE_LAST_STD_ENTRY)
928 		return NULL;
929 	return &cache->ent[order];
930 }
931 
932 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
933 			  u64 length, int access_flags)
934 {
935 	mr->ibmr.lkey = mr->mmkey.key;
936 	mr->ibmr.rkey = mr->mmkey.key;
937 	mr->ibmr.length = length;
938 	mr->ibmr.device = &dev->ib_dev;
939 	mr->access_flags = access_flags;
940 }
941 
942 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
943 						  u64 iova)
944 {
945 	/*
946 	 * The alignment of iova has already been checked upon entering
947 	 * UVERBS_METHOD_REG_DMABUF_MR
948 	 */
949 	umem->iova = iova;
950 	return PAGE_SIZE;
951 }
952 
953 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
954 					     struct ib_umem *umem, u64 iova,
955 					     int access_flags)
956 {
957 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
958 	struct mlx5_cache_ent *ent;
959 	struct mlx5_ib_mr *mr;
960 	unsigned int page_size;
961 
962 	if (umem->is_dmabuf)
963 		page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
964 	else
965 		page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
966 						     0, iova);
967 	if (WARN_ON(!page_size))
968 		return ERR_PTR(-EINVAL);
969 	ent = mr_cache_ent_from_order(
970 		dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
971 	/*
972 	 * Matches access in alloc_cache_mr(). If the MR can't come from the
973 	 * cache then synchronously create an uncached one.
974 	 */
975 	if (!ent || ent->limit == 0 ||
976 	    !mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) {
977 		mutex_lock(&dev->slow_path_mutex);
978 		mr = reg_create(pd, umem, iova, access_flags, page_size, false);
979 		mutex_unlock(&dev->slow_path_mutex);
980 		return mr;
981 	}
982 
983 	mr = get_cache_mr(ent);
984 	if (!mr) {
985 		mr = create_cache_mr(ent);
986 		/*
987 		 * The above already tried to do the same stuff as reg_create(),
988 		 * no reason to try it again.
989 		 */
990 		if (IS_ERR(mr))
991 			return mr;
992 	}
993 
994 	mr->ibmr.pd = pd;
995 	mr->umem = umem;
996 	mr->access_flags = access_flags;
997 	mr->desc_size = sizeof(struct mlx5_mtt);
998 	mr->mmkey.iova = iova;
999 	mr->mmkey.size = umem->length;
1000 	mr->mmkey.pd = to_mpd(pd)->pdn;
1001 	mr->page_shift = order_base_2(page_size);
1002 	set_mr_fields(dev, mr, umem->length, access_flags);
1003 
1004 	return mr;
1005 }
1006 
1007 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1008 			    MLX5_UMR_MTT_ALIGNMENT)
1009 #define MLX5_SPARE_UMR_CHUNK 0x10000
1010 
1011 /*
1012  * Allocate a temporary buffer to hold the per-page information to transfer to
1013  * HW. For efficiency this should be as large as it can be, but buffer
1014  * allocation failure is not allowed, so try smaller sizes.
1015  */
1016 static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
1017 {
1018 	const size_t xlt_chunk_align =
1019 		MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
1020 	size_t size;
1021 	void *res = NULL;
1022 
1023 	static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
1024 
1025 	/*
1026 	 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
1027 	 * allocation can't trigger any kind of reclaim.
1028 	 */
1029 	might_sleep();
1030 
1031 	gfp_mask |= __GFP_ZERO;
1032 
1033 	/*
1034 	 * If the system already has a suitable high order page then just use
1035 	 * that, but don't try hard to create one. This max is about 1M, so a
1036 	 * free x86 huge page will satisfy it.
1037 	 */
1038 	size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
1039 		     MLX5_MAX_UMR_CHUNK);
1040 	*nents = size / ent_size;
1041 	res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1042 				       get_order(size));
1043 	if (res)
1044 		return res;
1045 
1046 	if (size > MLX5_SPARE_UMR_CHUNK) {
1047 		size = MLX5_SPARE_UMR_CHUNK;
1048 		*nents = get_order(size) / ent_size;
1049 		res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1050 					       get_order(size));
1051 		if (res)
1052 			return res;
1053 	}
1054 
1055 	*nents = PAGE_SIZE / ent_size;
1056 	res = (void *)__get_free_page(gfp_mask);
1057 	if (res)
1058 		return res;
1059 
1060 	mutex_lock(&xlt_emergency_page_mutex);
1061 	memset(xlt_emergency_page, 0, PAGE_SIZE);
1062 	return xlt_emergency_page;
1063 }
1064 
1065 static void mlx5_ib_free_xlt(void *xlt, size_t length)
1066 {
1067 	if (xlt == xlt_emergency_page) {
1068 		mutex_unlock(&xlt_emergency_page_mutex);
1069 		return;
1070 	}
1071 
1072 	free_pages((unsigned long)xlt, get_order(length));
1073 }
1074 
1075 /*
1076  * Create a MLX5_IB_SEND_UMR_UPDATE_XLT work request and XLT buffer ready for
1077  * submission.
1078  */
1079 static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
1080 				   struct mlx5_umr_wr *wr, struct ib_sge *sg,
1081 				   size_t nents, size_t ent_size,
1082 				   unsigned int flags)
1083 {
1084 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1085 	struct device *ddev = &dev->mdev->pdev->dev;
1086 	dma_addr_t dma;
1087 	void *xlt;
1088 
1089 	xlt = mlx5_ib_alloc_xlt(&nents, ent_size,
1090 				flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
1091 								 GFP_KERNEL);
1092 	sg->length = nents * ent_size;
1093 	dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
1094 	if (dma_mapping_error(ddev, dma)) {
1095 		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1096 		mlx5_ib_free_xlt(xlt, sg->length);
1097 		return NULL;
1098 	}
1099 	sg->addr = dma;
1100 	sg->lkey = dev->umrc.pd->local_dma_lkey;
1101 
1102 	memset(wr, 0, sizeof(*wr));
1103 	wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1104 	if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1105 		wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1106 	wr->wr.sg_list = sg;
1107 	wr->wr.num_sge = 1;
1108 	wr->wr.opcode = MLX5_IB_WR_UMR;
1109 	wr->pd = mr->ibmr.pd;
1110 	wr->mkey = mr->mmkey.key;
1111 	wr->length = mr->mmkey.size;
1112 	wr->virt_addr = mr->mmkey.iova;
1113 	wr->access_flags = mr->access_flags;
1114 	wr->page_shift = mr->page_shift;
1115 	wr->xlt_size = sg->length;
1116 	return xlt;
1117 }
1118 
1119 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
1120 				   struct ib_sge *sg)
1121 {
1122 	struct device *ddev = &dev->mdev->pdev->dev;
1123 
1124 	dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
1125 	mlx5_ib_free_xlt(xlt, sg->length);
1126 }
1127 
1128 static unsigned int xlt_wr_final_send_flags(unsigned int flags)
1129 {
1130 	unsigned int res = 0;
1131 
1132 	if (flags & MLX5_IB_UPD_XLT_ENABLE)
1133 		res |= MLX5_IB_SEND_UMR_ENABLE_MR |
1134 		       MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1135 		       MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1136 	if (flags & MLX5_IB_UPD_XLT_PD || flags & MLX5_IB_UPD_XLT_ACCESS)
1137 		res |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1138 	if (flags & MLX5_IB_UPD_XLT_ADDR)
1139 		res |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1140 	return res;
1141 }
1142 
1143 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1144 		       int page_shift, int flags)
1145 {
1146 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1147 	struct device *ddev = &dev->mdev->pdev->dev;
1148 	void *xlt;
1149 	struct mlx5_umr_wr wr;
1150 	struct ib_sge sg;
1151 	int err = 0;
1152 	int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1153 			       ? sizeof(struct mlx5_klm)
1154 			       : sizeof(struct mlx5_mtt);
1155 	const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1156 	const int page_mask = page_align - 1;
1157 	size_t pages_mapped = 0;
1158 	size_t pages_to_map = 0;
1159 	size_t pages_iter;
1160 	size_t size_to_map = 0;
1161 	size_t orig_sg_length;
1162 
1163 	if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1164 	    !umr_can_use_indirect_mkey(dev))
1165 		return -EPERM;
1166 
1167 	if (WARN_ON(!mr->umem->is_odp))
1168 		return -EINVAL;
1169 
1170 	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1171 	 * so we need to align the offset and length accordingly
1172 	 */
1173 	if (idx & page_mask) {
1174 		npages += idx & page_mask;
1175 		idx &= ~page_mask;
1176 	}
1177 	pages_to_map = ALIGN(npages, page_align);
1178 
1179 	xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags);
1180 	if (!xlt)
1181 		return -ENOMEM;
1182 	pages_iter = sg.length / desc_size;
1183 	orig_sg_length = sg.length;
1184 
1185 	if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
1186 		struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1187 		size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
1188 
1189 		pages_to_map = min_t(size_t, pages_to_map, max_pages);
1190 	}
1191 
1192 	wr.page_shift = page_shift;
1193 
1194 	for (pages_mapped = 0;
1195 	     pages_mapped < pages_to_map && !err;
1196 	     pages_mapped += pages_iter, idx += pages_iter) {
1197 		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1198 		size_to_map = npages * desc_size;
1199 		dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1200 					DMA_TO_DEVICE);
1201 		mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1202 		dma_sync_single_for_device(ddev, sg.addr, sg.length,
1203 					   DMA_TO_DEVICE);
1204 
1205 		sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
1206 
1207 		if (pages_mapped + pages_iter >= pages_to_map)
1208 			wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1209 
1210 		wr.offset = idx * desc_size;
1211 		wr.xlt_size = sg.length;
1212 
1213 		err = mlx5_ib_post_send_wait(dev, &wr);
1214 	}
1215 	sg.length = orig_sg_length;
1216 	mlx5_ib_unmap_free_xlt(dev, xlt, &sg);
1217 	return err;
1218 }
1219 
1220 /*
1221  * Send the DMA list to the HW for a normal MR using UMR.
1222  * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
1223  * flag may be used.
1224  */
1225 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
1226 {
1227 	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1228 	struct device *ddev = &dev->mdev->pdev->dev;
1229 	struct ib_block_iter biter;
1230 	struct mlx5_mtt *cur_mtt;
1231 	struct mlx5_umr_wr wr;
1232 	size_t orig_sg_length;
1233 	struct mlx5_mtt *mtt;
1234 	size_t final_size;
1235 	struct ib_sge sg;
1236 	int err = 0;
1237 
1238 	if (WARN_ON(mr->umem->is_odp))
1239 		return -EINVAL;
1240 
1241 	mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg,
1242 				    ib_umem_num_dma_blocks(mr->umem,
1243 							   1 << mr->page_shift),
1244 				    sizeof(*mtt), flags);
1245 	if (!mtt)
1246 		return -ENOMEM;
1247 	orig_sg_length = sg.length;
1248 
1249 	cur_mtt = mtt;
1250 	rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
1251 			     BIT(mr->page_shift)) {
1252 		if (cur_mtt == (void *)mtt + sg.length) {
1253 			dma_sync_single_for_device(ddev, sg.addr, sg.length,
1254 						   DMA_TO_DEVICE);
1255 			err = mlx5_ib_post_send_wait(dev, &wr);
1256 			if (err)
1257 				goto err;
1258 			dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1259 						DMA_TO_DEVICE);
1260 			wr.offset += sg.length;
1261 			cur_mtt = mtt;
1262 		}
1263 
1264 		cur_mtt->ptag =
1265 			cpu_to_be64(rdma_block_iter_dma_address(&biter) |
1266 				    MLX5_IB_MTT_PRESENT);
1267 
1268 		if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
1269 			cur_mtt->ptag = 0;
1270 
1271 		cur_mtt++;
1272 	}
1273 
1274 	final_size = (void *)cur_mtt - (void *)mtt;
1275 	sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
1276 	memset(cur_mtt, 0, sg.length - final_size);
1277 	wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1278 	wr.xlt_size = sg.length;
1279 
1280 	dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
1281 	err = mlx5_ib_post_send_wait(dev, &wr);
1282 
1283 err:
1284 	sg.length = orig_sg_length;
1285 	mlx5_ib_unmap_free_xlt(dev, mtt, &sg);
1286 	return err;
1287 }
1288 
1289 /*
1290  * If ibmr is NULL it will be allocated by reg_create.
1291  * Else, the given ibmr will be used.
1292  */
1293 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1294 				     u64 iova, int access_flags,
1295 				     unsigned int page_size, bool populate)
1296 {
1297 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1298 	struct mlx5_ib_mr *mr;
1299 	__be64 *pas;
1300 	void *mkc;
1301 	int inlen;
1302 	u32 *in;
1303 	int err;
1304 	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1305 
1306 	if (!page_size)
1307 		return ERR_PTR(-EINVAL);
1308 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1309 	if (!mr)
1310 		return ERR_PTR(-ENOMEM);
1311 
1312 	mr->ibmr.pd = pd;
1313 	mr->access_flags = access_flags;
1314 	mr->page_shift = order_base_2(page_size);
1315 
1316 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1317 	if (populate)
1318 		inlen += sizeof(*pas) *
1319 			 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
1320 	in = kvzalloc(inlen, GFP_KERNEL);
1321 	if (!in) {
1322 		err = -ENOMEM;
1323 		goto err_1;
1324 	}
1325 	pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1326 	if (populate) {
1327 		if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1328 			err = -EINVAL;
1329 			goto err_2;
1330 		}
1331 		mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1332 				     pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1333 	}
1334 
1335 	/* The pg_access bit allows setting the access flags
1336 	 * in the page list submitted with the command. */
1337 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1338 
1339 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1340 	set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
1341 				      populate ? pd : dev->umrc.pd);
1342 	MLX5_SET(mkc, mkc, free, !populate);
1343 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1344 	MLX5_SET(mkc, mkc, umr_en, 1);
1345 
1346 	MLX5_SET64(mkc, mkc, len, umem->length);
1347 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1348 	MLX5_SET(mkc, mkc, translations_octword_size,
1349 		 get_octo_len(iova, umem->length, mr->page_shift));
1350 	MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1351 	if (populate) {
1352 		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1353 			 get_octo_len(iova, umem->length, mr->page_shift));
1354 	}
1355 
1356 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1357 	if (err) {
1358 		mlx5_ib_warn(dev, "create mkey failed\n");
1359 		goto err_2;
1360 	}
1361 	mr->mmkey.type = MLX5_MKEY_MR;
1362 	mr->desc_size = sizeof(struct mlx5_mtt);
1363 	mr->umem = umem;
1364 	set_mr_fields(dev, mr, umem->length, access_flags);
1365 	kvfree(in);
1366 
1367 	mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1368 
1369 	return mr;
1370 
1371 err_2:
1372 	kvfree(in);
1373 err_1:
1374 	kfree(mr);
1375 	return ERR_PTR(err);
1376 }
1377 
1378 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1379 				       u64 length, int acc, int mode)
1380 {
1381 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1382 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1383 	struct mlx5_ib_mr *mr;
1384 	void *mkc;
1385 	u32 *in;
1386 	int err;
1387 
1388 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1389 	if (!mr)
1390 		return ERR_PTR(-ENOMEM);
1391 
1392 	in = kzalloc(inlen, GFP_KERNEL);
1393 	if (!in) {
1394 		err = -ENOMEM;
1395 		goto err_free;
1396 	}
1397 
1398 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1399 
1400 	MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1401 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1402 	MLX5_SET64(mkc, mkc, len, length);
1403 	set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1404 
1405 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1406 	if (err)
1407 		goto err_in;
1408 
1409 	kfree(in);
1410 
1411 	set_mr_fields(dev, mr, length, acc);
1412 
1413 	return &mr->ibmr;
1414 
1415 err_in:
1416 	kfree(in);
1417 
1418 err_free:
1419 	kfree(mr);
1420 
1421 	return ERR_PTR(err);
1422 }
1423 
1424 int mlx5_ib_advise_mr(struct ib_pd *pd,
1425 		      enum ib_uverbs_advise_mr_advice advice,
1426 		      u32 flags,
1427 		      struct ib_sge *sg_list,
1428 		      u32 num_sge,
1429 		      struct uverbs_attr_bundle *attrs)
1430 {
1431 	if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1432 	    advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1433 	    advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1434 		return -EOPNOTSUPP;
1435 
1436 	return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1437 					 sg_list, num_sge);
1438 }
1439 
1440 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1441 				struct ib_dm_mr_attr *attr,
1442 				struct uverbs_attr_bundle *attrs)
1443 {
1444 	struct mlx5_ib_dm *mdm = to_mdm(dm);
1445 	struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1446 	u64 start_addr = mdm->dev_addr + attr->offset;
1447 	int mode;
1448 
1449 	switch (mdm->type) {
1450 	case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1451 		if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1452 			return ERR_PTR(-EINVAL);
1453 
1454 		mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1455 		start_addr -= pci_resource_start(dev->pdev, 0);
1456 		break;
1457 	case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1458 	case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1459 		if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1460 			return ERR_PTR(-EINVAL);
1461 
1462 		mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1463 		break;
1464 	default:
1465 		return ERR_PTR(-EINVAL);
1466 	}
1467 
1468 	return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1469 				 attr->access_flags, mode);
1470 }
1471 
1472 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1473 				    u64 iova, int access_flags)
1474 {
1475 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1476 	struct mlx5_ib_mr *mr = NULL;
1477 	bool xlt_with_umr;
1478 	int err;
1479 
1480 	xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length);
1481 	if (xlt_with_umr) {
1482 		mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1483 	} else {
1484 		unsigned int page_size = mlx5_umem_find_best_pgsz(
1485 			umem, mkc, log_page_size, 0, iova);
1486 
1487 		mutex_lock(&dev->slow_path_mutex);
1488 		mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1489 		mutex_unlock(&dev->slow_path_mutex);
1490 	}
1491 	if (IS_ERR(mr)) {
1492 		ib_umem_release(umem);
1493 		return ERR_CAST(mr);
1494 	}
1495 
1496 	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1497 
1498 	atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1499 
1500 	if (xlt_with_umr) {
1501 		/*
1502 		 * If the MR was created with reg_create then it will be
1503 		 * configured properly but left disabled. It is safe to go ahead
1504 		 * and configure it again via UMR while enabling it.
1505 		 */
1506 		err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1507 		if (err) {
1508 			dereg_mr(dev, mr);
1509 			return ERR_PTR(err);
1510 		}
1511 	}
1512 	return &mr->ibmr;
1513 }
1514 
1515 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1516 					u64 iova, int access_flags,
1517 					struct ib_udata *udata)
1518 {
1519 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1520 	struct ib_umem_odp *odp;
1521 	struct mlx5_ib_mr *mr;
1522 	int err;
1523 
1524 	if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1525 		return ERR_PTR(-EOPNOTSUPP);
1526 
1527 	if (!start && length == U64_MAX) {
1528 		if (iova != 0)
1529 			return ERR_PTR(-EINVAL);
1530 		if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1531 			return ERR_PTR(-EINVAL);
1532 
1533 		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1534 		if (IS_ERR(mr))
1535 			return ERR_CAST(mr);
1536 		return &mr->ibmr;
1537 	}
1538 
1539 	/* ODP requires xlt update via umr to work. */
1540 	if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1541 		return ERR_PTR(-EINVAL);
1542 
1543 	odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
1544 			      &mlx5_mn_ops);
1545 	if (IS_ERR(odp))
1546 		return ERR_CAST(odp);
1547 
1548 	mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1549 	if (IS_ERR(mr)) {
1550 		ib_umem_release(&odp->umem);
1551 		return ERR_CAST(mr);
1552 	}
1553 
1554 	odp->private = mr;
1555 	err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1556 	if (err)
1557 		goto err_dereg_mr;
1558 
1559 	err = mlx5_ib_init_odp_mr(mr);
1560 	if (err)
1561 		goto err_dereg_mr;
1562 	return &mr->ibmr;
1563 
1564 err_dereg_mr:
1565 	dereg_mr(dev, mr);
1566 	return ERR_PTR(err);
1567 }
1568 
1569 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1570 				  u64 iova, int access_flags,
1571 				  struct ib_udata *udata)
1572 {
1573 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1574 	struct ib_umem *umem;
1575 
1576 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1577 		return ERR_PTR(-EOPNOTSUPP);
1578 
1579 	mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1580 		    start, iova, length, access_flags);
1581 
1582 	if (access_flags & IB_ACCESS_ON_DEMAND)
1583 		return create_user_odp_mr(pd, start, length, iova, access_flags,
1584 					  udata);
1585 	umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
1586 	if (IS_ERR(umem))
1587 		return ERR_CAST(umem);
1588 	return create_real_mr(pd, umem, iova, access_flags);
1589 }
1590 
1591 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
1592 {
1593 	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
1594 	struct mlx5_ib_mr *mr = umem_dmabuf->private;
1595 
1596 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
1597 
1598 	if (!umem_dmabuf->sgt)
1599 		return;
1600 
1601 	mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1602 	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
1603 }
1604 
1605 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
1606 	.allow_peer2peer = 1,
1607 	.move_notify = mlx5_ib_dmabuf_invalidate_cb,
1608 };
1609 
1610 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1611 					 u64 length, u64 virt_addr,
1612 					 int fd, int access_flags,
1613 					 struct ib_udata *udata)
1614 {
1615 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1616 	struct mlx5_ib_mr *mr = NULL;
1617 	struct ib_umem_dmabuf *umem_dmabuf;
1618 	int err;
1619 
1620 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1621 	    !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1622 		return ERR_PTR(-EOPNOTSUPP);
1623 
1624 	mlx5_ib_dbg(dev,
1625 		    "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
1626 		    offset, virt_addr, length, fd, access_flags);
1627 
1628 	/* dmabuf requires xlt update via umr to work. */
1629 	if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1630 		return ERR_PTR(-EINVAL);
1631 
1632 	umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
1633 					 access_flags,
1634 					 &mlx5_ib_dmabuf_attach_ops);
1635 	if (IS_ERR(umem_dmabuf)) {
1636 		mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
1637 			    PTR_ERR(umem_dmabuf));
1638 		return ERR_CAST(umem_dmabuf);
1639 	}
1640 
1641 	mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1642 				access_flags);
1643 	if (IS_ERR(mr)) {
1644 		ib_umem_release(&umem_dmabuf->umem);
1645 		return ERR_CAST(mr);
1646 	}
1647 
1648 	mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1649 
1650 	atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1651 	umem_dmabuf->private = mr;
1652 	err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1653 	if (err)
1654 		goto err_dereg_mr;
1655 
1656 	err = mlx5_ib_init_dmabuf_mr(mr);
1657 	if (err)
1658 		goto err_dereg_mr;
1659 	return &mr->ibmr;
1660 
1661 err_dereg_mr:
1662 	dereg_mr(dev, mr);
1663 	return ERR_PTR(err);
1664 }
1665 
1666 /**
1667  * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1668  * @mr: The MR to fence
1669  *
1670  * Upon return the NIC will not be doing any DMA to the pages under the MR,
1671  * and any DMA inprogress will be completed. Failure of this function
1672  * indicates the HW has failed catastrophically.
1673  */
1674 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
1675 {
1676 	struct mlx5_umr_wr umrwr = {};
1677 
1678 	if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1679 		return 0;
1680 
1681 	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1682 			      MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1683 	umrwr.wr.opcode = MLX5_IB_WR_UMR;
1684 	umrwr.pd = mr_to_mdev(mr)->umrc.pd;
1685 	umrwr.mkey = mr->mmkey.key;
1686 	umrwr.ignore_free_state = 1;
1687 
1688 	return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr);
1689 }
1690 
1691 /*
1692  * True if the change in access flags can be done via UMR, only some access
1693  * flags can be updated.
1694  */
1695 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1696 				     unsigned int current_access_flags,
1697 				     unsigned int target_access_flags)
1698 {
1699 	unsigned int diffs = current_access_flags ^ target_access_flags;
1700 
1701 	if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1702 		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
1703 		return false;
1704 	return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags,
1705 					     target_access_flags);
1706 }
1707 
1708 static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1709 			       int access_flags)
1710 {
1711 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1712 	struct mlx5_umr_wr umrwr = {
1713 		.wr = {
1714 			.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
1715 				      MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
1716 			.opcode = MLX5_IB_WR_UMR,
1717 		},
1718 		.mkey = mr->mmkey.key,
1719 		.pd = pd,
1720 		.access_flags = access_flags,
1721 	};
1722 	int err;
1723 
1724 	err = mlx5_ib_post_send_wait(dev, &umrwr);
1725 	if (err)
1726 		return err;
1727 
1728 	mr->access_flags = access_flags;
1729 	mr->mmkey.pd = to_mpd(pd)->pdn;
1730 	return 0;
1731 }
1732 
1733 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1734 				  struct ib_umem *new_umem,
1735 				  int new_access_flags, u64 iova,
1736 				  unsigned long *page_size)
1737 {
1738 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1739 
1740 	/* We only track the allocated sizes of MRs from the cache */
1741 	if (!mr->cache_ent)
1742 		return false;
1743 	if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length))
1744 		return false;
1745 
1746 	*page_size =
1747 		mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
1748 	if (WARN_ON(!*page_size))
1749 		return false;
1750 	return (1ULL << mr->cache_ent->order) >=
1751 	       ib_umem_num_dma_blocks(new_umem, *page_size);
1752 }
1753 
1754 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1755 			 int access_flags, int flags, struct ib_umem *new_umem,
1756 			 u64 iova, unsigned long page_size)
1757 {
1758 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1759 	int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1760 	struct ib_umem *old_umem = mr->umem;
1761 	int err;
1762 
1763 	/*
1764 	 * To keep everything simple the MR is revoked before we start to mess
1765 	 * with it. This ensure the change is atomic relative to any use of the
1766 	 * MR.
1767 	 */
1768 	err = mlx5_mr_cache_invalidate(mr);
1769 	if (err)
1770 		return err;
1771 
1772 	if (flags & IB_MR_REREG_PD) {
1773 		mr->ibmr.pd = pd;
1774 		mr->mmkey.pd = to_mpd(pd)->pdn;
1775 		upd_flags |= MLX5_IB_UPD_XLT_PD;
1776 	}
1777 	if (flags & IB_MR_REREG_ACCESS) {
1778 		mr->access_flags = access_flags;
1779 		upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1780 	}
1781 
1782 	mr->ibmr.length = new_umem->length;
1783 	mr->mmkey.iova = iova;
1784 	mr->mmkey.size = new_umem->length;
1785 	mr->page_shift = order_base_2(page_size);
1786 	mr->umem = new_umem;
1787 	err = mlx5_ib_update_mr_pas(mr, upd_flags);
1788 	if (err) {
1789 		/*
1790 		 * The MR is revoked at this point so there is no issue to free
1791 		 * new_umem.
1792 		 */
1793 		mr->umem = old_umem;
1794 		return err;
1795 	}
1796 
1797 	atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1798 	ib_umem_release(old_umem);
1799 	atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1800 	return 0;
1801 }
1802 
1803 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1804 				    u64 length, u64 iova, int new_access_flags,
1805 				    struct ib_pd *new_pd,
1806 				    struct ib_udata *udata)
1807 {
1808 	struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1809 	struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1810 	int err;
1811 
1812 	if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1813 		return ERR_PTR(-EOPNOTSUPP);
1814 
1815 	mlx5_ib_dbg(
1816 		dev,
1817 		"start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1818 		start, iova, length, new_access_flags);
1819 
1820 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1821 		return ERR_PTR(-EOPNOTSUPP);
1822 
1823 	if (!(flags & IB_MR_REREG_ACCESS))
1824 		new_access_flags = mr->access_flags;
1825 	if (!(flags & IB_MR_REREG_PD))
1826 		new_pd = ib_mr->pd;
1827 
1828 	if (!(flags & IB_MR_REREG_TRANS)) {
1829 		struct ib_umem *umem;
1830 
1831 		/* Fast path for PD/access change */
1832 		if (can_use_umr_rereg_access(dev, mr->access_flags,
1833 					     new_access_flags)) {
1834 			err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
1835 			if (err)
1836 				return ERR_PTR(err);
1837 			return NULL;
1838 		}
1839 		/* DM or ODP MR's don't have a normal umem so we can't re-use it */
1840 		if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1841 			goto recreate;
1842 
1843 		/*
1844 		 * Only one active MR can refer to a umem at one time, revoke
1845 		 * the old MR before assigning the umem to the new one.
1846 		 */
1847 		err = mlx5_mr_cache_invalidate(mr);
1848 		if (err)
1849 			return ERR_PTR(err);
1850 		umem = mr->umem;
1851 		mr->umem = NULL;
1852 		atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1853 
1854 		return create_real_mr(new_pd, umem, mr->mmkey.iova,
1855 				      new_access_flags);
1856 	}
1857 
1858 	/*
1859 	 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1860 	 * but the logic around releasing the umem is different
1861 	 */
1862 	if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1863 		goto recreate;
1864 
1865 	if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1866 	    can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1867 		struct ib_umem *new_umem;
1868 		unsigned long page_size;
1869 
1870 		new_umem = ib_umem_get(&dev->ib_dev, start, length,
1871 				       new_access_flags);
1872 		if (IS_ERR(new_umem))
1873 			return ERR_CAST(new_umem);
1874 
1875 		/* Fast path for PAS change */
1876 		if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1877 					  &page_size)) {
1878 			err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1879 					    new_umem, iova, page_size);
1880 			if (err) {
1881 				ib_umem_release(new_umem);
1882 				return ERR_PTR(err);
1883 			}
1884 			return NULL;
1885 		}
1886 		return create_real_mr(new_pd, new_umem, iova, new_access_flags);
1887 	}
1888 
1889 	/*
1890 	 * Everything else has no state we can preserve, just create a new MR
1891 	 * from scratch
1892 	 */
1893 recreate:
1894 	return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1895 				   new_access_flags, udata);
1896 }
1897 
1898 static int
1899 mlx5_alloc_priv_descs(struct ib_device *device,
1900 		      struct mlx5_ib_mr *mr,
1901 		      int ndescs,
1902 		      int desc_size)
1903 {
1904 	struct mlx5_ib_dev *dev = to_mdev(device);
1905 	struct device *ddev = &dev->mdev->pdev->dev;
1906 	int size = ndescs * desc_size;
1907 	int add_size;
1908 	int ret;
1909 
1910 	add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1911 
1912 	mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1913 	if (!mr->descs_alloc)
1914 		return -ENOMEM;
1915 
1916 	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1917 
1918 	mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1919 	if (dma_mapping_error(ddev, mr->desc_map)) {
1920 		ret = -ENOMEM;
1921 		goto err;
1922 	}
1923 
1924 	return 0;
1925 err:
1926 	kfree(mr->descs_alloc);
1927 
1928 	return ret;
1929 }
1930 
1931 static void
1932 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1933 {
1934 	if (mr->descs) {
1935 		struct ib_device *device = mr->ibmr.device;
1936 		int size = mr->max_descs * mr->desc_size;
1937 		struct mlx5_ib_dev *dev = to_mdev(device);
1938 
1939 		dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1940 				 DMA_TO_DEVICE);
1941 		kfree(mr->descs_alloc);
1942 		mr->descs = NULL;
1943 	}
1944 }
1945 
1946 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1947 {
1948 	if (mr->sig) {
1949 		if (mlx5_core_destroy_psv(dev->mdev,
1950 					  mr->sig->psv_memory.psv_idx))
1951 			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1952 				     mr->sig->psv_memory.psv_idx);
1953 		if (mlx5_core_destroy_psv(dev->mdev,
1954 					  mr->sig->psv_wire.psv_idx))
1955 			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1956 				     mr->sig->psv_wire.psv_idx);
1957 		xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
1958 		kfree(mr->sig);
1959 		mr->sig = NULL;
1960 	}
1961 
1962 	if (!mr->cache_ent) {
1963 		destroy_mkey(dev, mr);
1964 		mlx5_free_priv_descs(mr);
1965 	}
1966 }
1967 
1968 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1969 {
1970 	struct ib_umem *umem = mr->umem;
1971 
1972 	/* Stop all DMA */
1973 	if (is_odp_mr(mr))
1974 		mlx5_ib_fence_odp_mr(mr);
1975 	else if (is_dmabuf_mr(mr))
1976 		mlx5_ib_fence_dmabuf_mr(mr);
1977 	else
1978 		clean_mr(dev, mr);
1979 
1980 	if (umem) {
1981 		if (!is_odp_mr(mr))
1982 			atomic_sub(ib_umem_num_pages(umem),
1983 				   &dev->mdev->priv.reg_pages);
1984 		ib_umem_release(umem);
1985 	}
1986 
1987 	if (mr->cache_ent)
1988 		mlx5_mr_cache_free(dev, mr);
1989 	else
1990 		kfree(mr);
1991 }
1992 
1993 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1994 {
1995 	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1996 
1997 	if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1998 		dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1999 		dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
2000 	}
2001 
2002 	if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
2003 		mlx5_ib_free_implicit_mr(mmr);
2004 		return 0;
2005 	}
2006 
2007 	dereg_mr(to_mdev(ibmr->device), mmr);
2008 
2009 	return 0;
2010 }
2011 
2012 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
2013 				   int access_mode, int page_shift)
2014 {
2015 	void *mkc;
2016 
2017 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2018 
2019 	/* This is only used from the kernel, so setting the PD is OK. */
2020 	set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
2021 	MLX5_SET(mkc, mkc, free, 1);
2022 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2023 	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
2024 	MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
2025 	MLX5_SET(mkc, mkc, umr_en, 1);
2026 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
2027 }
2028 
2029 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2030 				  int ndescs, int desc_size, int page_shift,
2031 				  int access_mode, u32 *in, int inlen)
2032 {
2033 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
2034 	int err;
2035 
2036 	mr->access_mode = access_mode;
2037 	mr->desc_size = desc_size;
2038 	mr->max_descs = ndescs;
2039 
2040 	err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
2041 	if (err)
2042 		return err;
2043 
2044 	mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
2045 
2046 	err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
2047 	if (err)
2048 		goto err_free_descs;
2049 
2050 	mr->mmkey.type = MLX5_MKEY_MR;
2051 	mr->ibmr.lkey = mr->mmkey.key;
2052 	mr->ibmr.rkey = mr->mmkey.key;
2053 
2054 	return 0;
2055 
2056 err_free_descs:
2057 	mlx5_free_priv_descs(mr);
2058 	return err;
2059 }
2060 
2061 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
2062 				u32 max_num_sg, u32 max_num_meta_sg,
2063 				int desc_size, int access_mode)
2064 {
2065 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2066 	int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
2067 	int page_shift = 0;
2068 	struct mlx5_ib_mr *mr;
2069 	u32 *in;
2070 	int err;
2071 
2072 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2073 	if (!mr)
2074 		return ERR_PTR(-ENOMEM);
2075 
2076 	mr->ibmr.pd = pd;
2077 	mr->ibmr.device = pd->device;
2078 
2079 	in = kzalloc(inlen, GFP_KERNEL);
2080 	if (!in) {
2081 		err = -ENOMEM;
2082 		goto err_free;
2083 	}
2084 
2085 	if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
2086 		page_shift = PAGE_SHIFT;
2087 
2088 	err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2089 				     access_mode, in, inlen);
2090 	if (err)
2091 		goto err_free_in;
2092 
2093 	mr->umem = NULL;
2094 	kfree(in);
2095 
2096 	return mr;
2097 
2098 err_free_in:
2099 	kfree(in);
2100 err_free:
2101 	kfree(mr);
2102 	return ERR_PTR(err);
2103 }
2104 
2105 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2106 				    int ndescs, u32 *in, int inlen)
2107 {
2108 	return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2109 				      PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
2110 				      inlen);
2111 }
2112 
2113 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2114 				    int ndescs, u32 *in, int inlen)
2115 {
2116 	return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2117 				      0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2118 }
2119 
2120 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2121 				      int max_num_sg, int max_num_meta_sg,
2122 				      u32 *in, int inlen)
2123 {
2124 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
2125 	u32 psv_index[2];
2126 	void *mkc;
2127 	int err;
2128 
2129 	mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2130 	if (!mr->sig)
2131 		return -ENOMEM;
2132 
2133 	/* create mem & wire PSVs */
2134 	err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
2135 	if (err)
2136 		goto err_free_sig;
2137 
2138 	mr->sig->psv_memory.psv_idx = psv_index[0];
2139 	mr->sig->psv_wire.psv_idx = psv_index[1];
2140 
2141 	mr->sig->sig_status_checked = true;
2142 	mr->sig->sig_err_exists = false;
2143 	/* Next UMR, Arm SIGERR */
2144 	++mr->sig->sigerr_count;
2145 	mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2146 					 sizeof(struct mlx5_klm),
2147 					 MLX5_MKC_ACCESS_MODE_KLMS);
2148 	if (IS_ERR(mr->klm_mr)) {
2149 		err = PTR_ERR(mr->klm_mr);
2150 		goto err_destroy_psv;
2151 	}
2152 	mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2153 					 sizeof(struct mlx5_mtt),
2154 					 MLX5_MKC_ACCESS_MODE_MTT);
2155 	if (IS_ERR(mr->mtt_mr)) {
2156 		err = PTR_ERR(mr->mtt_mr);
2157 		goto err_free_klm_mr;
2158 	}
2159 
2160 	/* Set bsf descriptors for mkey */
2161 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2162 	MLX5_SET(mkc, mkc, bsf_en, 1);
2163 	MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
2164 
2165 	err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2166 				     MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2167 	if (err)
2168 		goto err_free_mtt_mr;
2169 
2170 	err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2171 			      mr->sig, GFP_KERNEL));
2172 	if (err)
2173 		goto err_free_descs;
2174 	return 0;
2175 
2176 err_free_descs:
2177 	destroy_mkey(dev, mr);
2178 	mlx5_free_priv_descs(mr);
2179 err_free_mtt_mr:
2180 	dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
2181 	mr->mtt_mr = NULL;
2182 err_free_klm_mr:
2183 	dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
2184 	mr->klm_mr = NULL;
2185 err_destroy_psv:
2186 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2187 		mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2188 			     mr->sig->psv_memory.psv_idx);
2189 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2190 		mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2191 			     mr->sig->psv_wire.psv_idx);
2192 err_free_sig:
2193 	kfree(mr->sig);
2194 
2195 	return err;
2196 }
2197 
2198 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
2199 					enum ib_mr_type mr_type, u32 max_num_sg,
2200 					u32 max_num_meta_sg)
2201 {
2202 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
2203 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2204 	int ndescs = ALIGN(max_num_sg, 4);
2205 	struct mlx5_ib_mr *mr;
2206 	u32 *in;
2207 	int err;
2208 
2209 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2210 	if (!mr)
2211 		return ERR_PTR(-ENOMEM);
2212 
2213 	in = kzalloc(inlen, GFP_KERNEL);
2214 	if (!in) {
2215 		err = -ENOMEM;
2216 		goto err_free;
2217 	}
2218 
2219 	mr->ibmr.device = pd->device;
2220 	mr->umem = NULL;
2221 
2222 	switch (mr_type) {
2223 	case IB_MR_TYPE_MEM_REG:
2224 		err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2225 		break;
2226 	case IB_MR_TYPE_SG_GAPS:
2227 		err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2228 		break;
2229 	case IB_MR_TYPE_INTEGRITY:
2230 		err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2231 						 max_num_meta_sg, in, inlen);
2232 		break;
2233 	default:
2234 		mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2235 		err = -EINVAL;
2236 	}
2237 
2238 	if (err)
2239 		goto err_free_in;
2240 
2241 	kfree(in);
2242 
2243 	return &mr->ibmr;
2244 
2245 err_free_in:
2246 	kfree(in);
2247 err_free:
2248 	kfree(mr);
2249 	return ERR_PTR(err);
2250 }
2251 
2252 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2253 			       u32 max_num_sg)
2254 {
2255 	return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
2256 }
2257 
2258 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
2259 					 u32 max_num_sg, u32 max_num_meta_sg)
2260 {
2261 	return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
2262 				  max_num_meta_sg);
2263 }
2264 
2265 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2266 {
2267 	struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
2268 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2269 	struct mlx5_ib_mw *mw = to_mmw(ibmw);
2270 	u32 *in = NULL;
2271 	void *mkc;
2272 	int ndescs;
2273 	int err;
2274 	struct mlx5_ib_alloc_mw req = {};
2275 	struct {
2276 		__u32	comp_mask;
2277 		__u32	response_length;
2278 	} resp = {};
2279 
2280 	err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2281 	if (err)
2282 		return err;
2283 
2284 	if (req.comp_mask || req.reserved1 || req.reserved2)
2285 		return -EOPNOTSUPP;
2286 
2287 	if (udata->inlen > sizeof(req) &&
2288 	    !ib_is_udata_cleared(udata, sizeof(req),
2289 				 udata->inlen - sizeof(req)))
2290 		return -EOPNOTSUPP;
2291 
2292 	ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2293 
2294 	in = kzalloc(inlen, GFP_KERNEL);
2295 	if (!in) {
2296 		err = -ENOMEM;
2297 		goto free;
2298 	}
2299 
2300 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2301 
2302 	MLX5_SET(mkc, mkc, free, 1);
2303 	MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2304 	MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2305 	MLX5_SET(mkc, mkc, umr_en, 1);
2306 	MLX5_SET(mkc, mkc, lr, 1);
2307 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2308 	MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2309 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
2310 
2311 	err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2312 	if (err)
2313 		goto free;
2314 
2315 	mw->mmkey.type = MLX5_MKEY_MW;
2316 	ibmw->rkey = mw->mmkey.key;
2317 	mw->ndescs = ndescs;
2318 
2319 	resp.response_length =
2320 		min(offsetofend(typeof(resp), response_length), udata->outlen);
2321 	if (resp.response_length) {
2322 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
2323 		if (err)
2324 			goto free_mkey;
2325 	}
2326 
2327 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2328 		err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2329 		if (err)
2330 			goto free_mkey;
2331 	}
2332 
2333 	kfree(in);
2334 	return 0;
2335 
2336 free_mkey:
2337 	mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
2338 free:
2339 	kfree(in);
2340 	return err;
2341 }
2342 
2343 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2344 {
2345 	struct mlx5_ib_dev *dev = to_mdev(mw->device);
2346 	struct mlx5_ib_mw *mmw = to_mmw(mw);
2347 
2348 	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
2349 	    xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
2350 		/*
2351 		 * pagefault_single_data_segment() may be accessing mmw
2352 		 * if the user bound an ODP MR to this MW.
2353 		 */
2354 		mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
2355 
2356 	return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
2357 }
2358 
2359 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2360 			    struct ib_mr_status *mr_status)
2361 {
2362 	struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2363 	int ret = 0;
2364 
2365 	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2366 		pr_err("Invalid status check mask\n");
2367 		ret = -EINVAL;
2368 		goto done;
2369 	}
2370 
2371 	mr_status->fail_status = 0;
2372 	if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2373 		if (!mmr->sig) {
2374 			ret = -EINVAL;
2375 			pr_err("signature status check requested on a non-signature enabled MR\n");
2376 			goto done;
2377 		}
2378 
2379 		mmr->sig->sig_status_checked = true;
2380 		if (!mmr->sig->sig_err_exists)
2381 			goto done;
2382 
2383 		if (ibmr->lkey == mmr->sig->err_item.key)
2384 			memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2385 			       sizeof(mr_status->sig_err));
2386 		else {
2387 			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2388 			mr_status->sig_err.sig_err_offset = 0;
2389 			mr_status->sig_err.key = mmr->sig->err_item.key;
2390 		}
2391 
2392 		mmr->sig->sig_err_exists = false;
2393 		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2394 	}
2395 
2396 done:
2397 	return ret;
2398 }
2399 
2400 static int
2401 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2402 			int data_sg_nents, unsigned int *data_sg_offset,
2403 			struct scatterlist *meta_sg, int meta_sg_nents,
2404 			unsigned int *meta_sg_offset)
2405 {
2406 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2407 	unsigned int sg_offset = 0;
2408 	int n = 0;
2409 
2410 	mr->meta_length = 0;
2411 	if (data_sg_nents == 1) {
2412 		n++;
2413 		mr->ndescs = 1;
2414 		if (data_sg_offset)
2415 			sg_offset = *data_sg_offset;
2416 		mr->data_length = sg_dma_len(data_sg) - sg_offset;
2417 		mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2418 		if (meta_sg_nents == 1) {
2419 			n++;
2420 			mr->meta_ndescs = 1;
2421 			if (meta_sg_offset)
2422 				sg_offset = *meta_sg_offset;
2423 			else
2424 				sg_offset = 0;
2425 			mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2426 			mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2427 		}
2428 		ibmr->length = mr->data_length + mr->meta_length;
2429 	}
2430 
2431 	return n;
2432 }
2433 
2434 static int
2435 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2436 		   struct scatterlist *sgl,
2437 		   unsigned short sg_nents,
2438 		   unsigned int *sg_offset_p,
2439 		   struct scatterlist *meta_sgl,
2440 		   unsigned short meta_sg_nents,
2441 		   unsigned int *meta_sg_offset_p)
2442 {
2443 	struct scatterlist *sg = sgl;
2444 	struct mlx5_klm *klms = mr->descs;
2445 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2446 	u32 lkey = mr->ibmr.pd->local_dma_lkey;
2447 	int i, j = 0;
2448 
2449 	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2450 	mr->ibmr.length = 0;
2451 
2452 	for_each_sg(sgl, sg, sg_nents, i) {
2453 		if (unlikely(i >= mr->max_descs))
2454 			break;
2455 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2456 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2457 		klms[i].key = cpu_to_be32(lkey);
2458 		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2459 
2460 		sg_offset = 0;
2461 	}
2462 
2463 	if (sg_offset_p)
2464 		*sg_offset_p = sg_offset;
2465 
2466 	mr->ndescs = i;
2467 	mr->data_length = mr->ibmr.length;
2468 
2469 	if (meta_sg_nents) {
2470 		sg = meta_sgl;
2471 		sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2472 		for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2473 			if (unlikely(i + j >= mr->max_descs))
2474 				break;
2475 			klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2476 						     sg_offset);
2477 			klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2478 							 sg_offset);
2479 			klms[i + j].key = cpu_to_be32(lkey);
2480 			mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2481 
2482 			sg_offset = 0;
2483 		}
2484 		if (meta_sg_offset_p)
2485 			*meta_sg_offset_p = sg_offset;
2486 
2487 		mr->meta_ndescs = j;
2488 		mr->meta_length = mr->ibmr.length - mr->data_length;
2489 	}
2490 
2491 	return i + j;
2492 }
2493 
2494 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2495 {
2496 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2497 	__be64 *descs;
2498 
2499 	if (unlikely(mr->ndescs == mr->max_descs))
2500 		return -ENOMEM;
2501 
2502 	descs = mr->descs;
2503 	descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2504 
2505 	return 0;
2506 }
2507 
2508 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2509 {
2510 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2511 	__be64 *descs;
2512 
2513 	if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2514 		return -ENOMEM;
2515 
2516 	descs = mr->descs;
2517 	descs[mr->ndescs + mr->meta_ndescs++] =
2518 		cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2519 
2520 	return 0;
2521 }
2522 
2523 static int
2524 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2525 			 int data_sg_nents, unsigned int *data_sg_offset,
2526 			 struct scatterlist *meta_sg, int meta_sg_nents,
2527 			 unsigned int *meta_sg_offset)
2528 {
2529 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2530 	struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2531 	int n;
2532 
2533 	pi_mr->ndescs = 0;
2534 	pi_mr->meta_ndescs = 0;
2535 	pi_mr->meta_length = 0;
2536 
2537 	ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2538 				   pi_mr->desc_size * pi_mr->max_descs,
2539 				   DMA_TO_DEVICE);
2540 
2541 	pi_mr->ibmr.page_size = ibmr->page_size;
2542 	n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2543 			   mlx5_set_page);
2544 	if (n != data_sg_nents)
2545 		return n;
2546 
2547 	pi_mr->data_iova = pi_mr->ibmr.iova;
2548 	pi_mr->data_length = pi_mr->ibmr.length;
2549 	pi_mr->ibmr.length = pi_mr->data_length;
2550 	ibmr->length = pi_mr->data_length;
2551 
2552 	if (meta_sg_nents) {
2553 		u64 page_mask = ~((u64)ibmr->page_size - 1);
2554 		u64 iova = pi_mr->data_iova;
2555 
2556 		n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2557 				    meta_sg_offset, mlx5_set_page_pi);
2558 
2559 		pi_mr->meta_length = pi_mr->ibmr.length;
2560 		/*
2561 		 * PI address for the HW is the offset of the metadata address
2562 		 * relative to the first data page address.
2563 		 * It equals to first data page address + size of data pages +
2564 		 * metadata offset at the first metadata page
2565 		 */
2566 		pi_mr->pi_iova = (iova & page_mask) +
2567 				 pi_mr->ndescs * ibmr->page_size +
2568 				 (pi_mr->ibmr.iova & ~page_mask);
2569 		/*
2570 		 * In order to use one MTT MR for data and metadata, we register
2571 		 * also the gaps between the end of the data and the start of
2572 		 * the metadata (the sig MR will verify that the HW will access
2573 		 * to right addresses). This mapping is safe because we use
2574 		 * internal mkey for the registration.
2575 		 */
2576 		pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2577 		pi_mr->ibmr.iova = iova;
2578 		ibmr->length += pi_mr->meta_length;
2579 	}
2580 
2581 	ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2582 				      pi_mr->desc_size * pi_mr->max_descs,
2583 				      DMA_TO_DEVICE);
2584 
2585 	return n;
2586 }
2587 
2588 static int
2589 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2590 			 int data_sg_nents, unsigned int *data_sg_offset,
2591 			 struct scatterlist *meta_sg, int meta_sg_nents,
2592 			 unsigned int *meta_sg_offset)
2593 {
2594 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2595 	struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2596 	int n;
2597 
2598 	pi_mr->ndescs = 0;
2599 	pi_mr->meta_ndescs = 0;
2600 	pi_mr->meta_length = 0;
2601 
2602 	ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2603 				   pi_mr->desc_size * pi_mr->max_descs,
2604 				   DMA_TO_DEVICE);
2605 
2606 	n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2607 			       meta_sg, meta_sg_nents, meta_sg_offset);
2608 
2609 	ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2610 				      pi_mr->desc_size * pi_mr->max_descs,
2611 				      DMA_TO_DEVICE);
2612 
2613 	/* This is zero-based memory region */
2614 	pi_mr->data_iova = 0;
2615 	pi_mr->ibmr.iova = 0;
2616 	pi_mr->pi_iova = pi_mr->data_length;
2617 	ibmr->length = pi_mr->ibmr.length;
2618 
2619 	return n;
2620 }
2621 
2622 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2623 			 int data_sg_nents, unsigned int *data_sg_offset,
2624 			 struct scatterlist *meta_sg, int meta_sg_nents,
2625 			 unsigned int *meta_sg_offset)
2626 {
2627 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2628 	struct mlx5_ib_mr *pi_mr = NULL;
2629 	int n;
2630 
2631 	WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2632 
2633 	mr->ndescs = 0;
2634 	mr->data_length = 0;
2635 	mr->data_iova = 0;
2636 	mr->meta_ndescs = 0;
2637 	mr->pi_iova = 0;
2638 	/*
2639 	 * As a performance optimization, if possible, there is no need to
2640 	 * perform UMR operation to register the data/metadata buffers.
2641 	 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2642 	 * Fallback to UMR only in case of a failure.
2643 	 */
2644 	n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2645 				    data_sg_offset, meta_sg, meta_sg_nents,
2646 				    meta_sg_offset);
2647 	if (n == data_sg_nents + meta_sg_nents)
2648 		goto out;
2649 	/*
2650 	 * As a performance optimization, if possible, there is no need to map
2651 	 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2652 	 * descriptors and fallback to KLM only in case of a failure.
2653 	 * It's more efficient for the HW to work with MTT descriptors
2654 	 * (especially in high load).
2655 	 * Use KLM (indirect access) only if it's mandatory.
2656 	 */
2657 	pi_mr = mr->mtt_mr;
2658 	n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2659 				     data_sg_offset, meta_sg, meta_sg_nents,
2660 				     meta_sg_offset);
2661 	if (n == data_sg_nents + meta_sg_nents)
2662 		goto out;
2663 
2664 	pi_mr = mr->klm_mr;
2665 	n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2666 				     data_sg_offset, meta_sg, meta_sg_nents,
2667 				     meta_sg_offset);
2668 	if (unlikely(n != data_sg_nents + meta_sg_nents))
2669 		return -ENOMEM;
2670 
2671 out:
2672 	/* This is zero-based memory region */
2673 	ibmr->iova = 0;
2674 	mr->pi_mr = pi_mr;
2675 	if (pi_mr)
2676 		ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2677 	else
2678 		ibmr->sig_attrs->meta_length = mr->meta_length;
2679 
2680 	return 0;
2681 }
2682 
2683 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2684 		      unsigned int *sg_offset)
2685 {
2686 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
2687 	int n;
2688 
2689 	mr->ndescs = 0;
2690 
2691 	ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2692 				   mr->desc_size * mr->max_descs,
2693 				   DMA_TO_DEVICE);
2694 
2695 	if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2696 		n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2697 				       NULL);
2698 	else
2699 		n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2700 				mlx5_set_page);
2701 
2702 	ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2703 				      mr->desc_size * mr->max_descs,
2704 				      DMA_TO_DEVICE);
2705 
2706 	return n;
2707 }
2708