xref: /openbmc/linux/drivers/infiniband/hw/mlx5/odp.c (revision 5ff32883)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
36 
37 #include "mlx5_ib.h"
38 #include "cmd.h"
39 
40 #include <linux/mlx5/eq.h>
41 
42 /* Contains the details of a pagefault. */
43 struct mlx5_pagefault {
44 	u32			bytes_committed;
45 	u32			token;
46 	u8			event_subtype;
47 	u8			type;
48 	union {
49 		/* Initiator or send message responder pagefault details. */
50 		struct {
51 			/* Received packet size, only valid for responders. */
52 			u32	packet_size;
53 			/*
54 			 * Number of resource holding WQE, depends on type.
55 			 */
56 			u32	wq_num;
57 			/*
58 			 * WQE index. Refers to either the send queue or
59 			 * receive queue, according to event_subtype.
60 			 */
61 			u16	wqe_index;
62 		} wqe;
63 		/* RDMA responder pagefault details */
64 		struct {
65 			u32	r_key;
66 			/*
67 			 * Received packet size, minimal size page fault
68 			 * resolution required for forward progress.
69 			 */
70 			u32	packet_size;
71 			u32	rdma_op_len;
72 			u64	rdma_va;
73 		} rdma;
74 	};
75 
76 	struct mlx5_ib_pf_eq	*eq;
77 	struct work_struct	work;
78 };
79 
80 #define MAX_PREFETCH_LEN (4*1024*1024U)
81 
82 /* Timeout in ms to wait for an active mmu notifier to complete when handling
83  * a pagefault. */
84 #define MMU_NOTIFIER_TIMEOUT 1000
85 
86 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
87 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
88 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
89 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
90 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
91 
92 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
93 
94 static u64 mlx5_imr_ksm_entries;
95 
96 static int check_parent(struct ib_umem_odp *odp,
97 			       struct mlx5_ib_mr *parent)
98 {
99 	struct mlx5_ib_mr *mr = odp->private;
100 
101 	return mr && mr->parent == parent && !odp->dying;
102 }
103 
104 struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
105 {
106 	if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
107 		return NULL;
108 
109 	return to_ib_umem_odp(mr->umem)->per_mm;
110 }
111 
112 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
113 {
114 	struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
115 	struct ib_ucontext_per_mm *per_mm = odp->per_mm;
116 	struct rb_node *rb;
117 
118 	down_read(&per_mm->umem_rwsem);
119 	while (1) {
120 		rb = rb_next(&odp->interval_tree.rb);
121 		if (!rb)
122 			goto not_found;
123 		odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
124 		if (check_parent(odp, parent))
125 			goto end;
126 	}
127 not_found:
128 	odp = NULL;
129 end:
130 	up_read(&per_mm->umem_rwsem);
131 	return odp;
132 }
133 
134 static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
135 				      struct mlx5_ib_mr *parent)
136 {
137 	struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
138 	struct ib_umem_odp *odp;
139 	struct rb_node *rb;
140 
141 	down_read(&per_mm->umem_rwsem);
142 	odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
143 	if (!odp)
144 		goto end;
145 
146 	while (1) {
147 		if (check_parent(odp, parent))
148 			goto end;
149 		rb = rb_next(&odp->interval_tree.rb);
150 		if (!rb)
151 			goto not_found;
152 		odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
153 		if (ib_umem_start(&odp->umem) > start + length)
154 			goto not_found;
155 	}
156 not_found:
157 	odp = NULL;
158 end:
159 	up_read(&per_mm->umem_rwsem);
160 	return odp;
161 }
162 
163 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
164 			   size_t nentries, struct mlx5_ib_mr *mr, int flags)
165 {
166 	struct ib_pd *pd = mr->ibmr.pd;
167 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
168 	struct ib_umem_odp *odp;
169 	unsigned long va;
170 	int i;
171 
172 	if (flags & MLX5_IB_UPD_XLT_ZAP) {
173 		for (i = 0; i < nentries; i++, pklm++) {
174 			pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
175 			pklm->key = cpu_to_be32(dev->null_mkey);
176 			pklm->va = 0;
177 		}
178 		return;
179 	}
180 
181 	odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
182 			 nentries * MLX5_IMR_MTT_SIZE, mr);
183 
184 	for (i = 0; i < nentries; i++, pklm++) {
185 		pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
186 		va = (offset + i) * MLX5_IMR_MTT_SIZE;
187 		if (odp && odp->umem.address == va) {
188 			struct mlx5_ib_mr *mtt = odp->private;
189 
190 			pklm->key = cpu_to_be32(mtt->ibmr.lkey);
191 			odp = odp_next(odp);
192 		} else {
193 			pklm->key = cpu_to_be32(dev->null_mkey);
194 		}
195 		mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
196 			    i, va, be32_to_cpu(pklm->key));
197 	}
198 }
199 
200 static void mr_leaf_free_action(struct work_struct *work)
201 {
202 	struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
203 	int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
204 	struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
205 
206 	mr->parent = NULL;
207 	synchronize_srcu(&mr->dev->mr_srcu);
208 
209 	ib_umem_release(&odp->umem);
210 	if (imr->live)
211 		mlx5_ib_update_xlt(imr, idx, 1, 0,
212 				   MLX5_IB_UPD_XLT_INDIRECT |
213 				   MLX5_IB_UPD_XLT_ATOMIC);
214 	mlx5_mr_cache_free(mr->dev, mr);
215 
216 	if (atomic_dec_and_test(&imr->num_leaf_free))
217 		wake_up(&imr->q_leaf_free);
218 }
219 
220 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
221 			      unsigned long end)
222 {
223 	struct mlx5_ib_mr *mr;
224 	const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
225 				    sizeof(struct mlx5_mtt)) - 1;
226 	u64 idx = 0, blk_start_idx = 0;
227 	struct ib_umem *umem;
228 	int in_block = 0;
229 	u64 addr;
230 
231 	if (!umem_odp) {
232 		pr_err("invalidation called on NULL umem or non-ODP umem\n");
233 		return;
234 	}
235 	umem = &umem_odp->umem;
236 
237 	mr = umem_odp->private;
238 
239 	if (!mr || !mr->ibmr.pd)
240 		return;
241 
242 	start = max_t(u64, ib_umem_start(umem), start);
243 	end = min_t(u64, ib_umem_end(umem), end);
244 
245 	/*
246 	 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
247 	 * while we are doing the invalidation, no page fault will attempt to
248 	 * overwrite the same MTTs.  Concurent invalidations might race us,
249 	 * but they will write 0s as well, so no difference in the end result.
250 	 */
251 
252 	for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
253 		idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
254 		/*
255 		 * Strive to write the MTTs in chunks, but avoid overwriting
256 		 * non-existing MTTs. The huristic here can be improved to
257 		 * estimate the cost of another UMR vs. the cost of bigger
258 		 * UMR.
259 		 */
260 		if (umem_odp->dma_list[idx] &
261 		    (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
262 			if (!in_block) {
263 				blk_start_idx = idx;
264 				in_block = 1;
265 			}
266 		} else {
267 			u64 umr_offset = idx & umr_block_mask;
268 
269 			if (in_block && umr_offset == 0) {
270 				mlx5_ib_update_xlt(mr, blk_start_idx,
271 						   idx - blk_start_idx, 0,
272 						   MLX5_IB_UPD_XLT_ZAP |
273 						   MLX5_IB_UPD_XLT_ATOMIC);
274 				in_block = 0;
275 			}
276 		}
277 	}
278 	if (in_block)
279 		mlx5_ib_update_xlt(mr, blk_start_idx,
280 				   idx - blk_start_idx + 1, 0,
281 				   MLX5_IB_UPD_XLT_ZAP |
282 				   MLX5_IB_UPD_XLT_ATOMIC);
283 	/*
284 	 * We are now sure that the device will not access the
285 	 * memory. We can safely unmap it, and mark it as dirty if
286 	 * needed.
287 	 */
288 
289 	ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
290 
291 	if (unlikely(!umem->npages && mr->parent &&
292 		     !umem_odp->dying)) {
293 		WRITE_ONCE(umem_odp->dying, 1);
294 		atomic_inc(&mr->parent->num_leaf_free);
295 		schedule_work(&umem_odp->work);
296 	}
297 }
298 
299 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
300 {
301 	struct ib_odp_caps *caps = &dev->odp_caps;
302 
303 	memset(caps, 0, sizeof(*caps));
304 
305 	if (!MLX5_CAP_GEN(dev->mdev, pg))
306 		return;
307 
308 	caps->general_caps = IB_ODP_SUPPORT;
309 
310 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
311 		dev->odp_max_size = U64_MAX;
312 	else
313 		dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
314 
315 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
316 		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
317 
318 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
319 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
320 
321 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
322 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
323 
324 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
325 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
326 
327 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
328 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
329 
330 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
331 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
332 
333 	if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
334 	    MLX5_CAP_GEN(dev->mdev, null_mkey) &&
335 	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
336 		caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
337 
338 	return;
339 }
340 
341 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
342 				      struct mlx5_pagefault *pfault,
343 				      int error)
344 {
345 	int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
346 		     pfault->wqe.wq_num : pfault->token;
347 	u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
348 	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = { };
349 	int err;
350 
351 	MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
352 	MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
353 	MLX5_SET(page_fault_resume_in, in, token, pfault->token);
354 	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
355 	MLX5_SET(page_fault_resume_in, in, error, !!error);
356 
357 	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
358 	if (err)
359 		mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
360 			    wq_num, err);
361 }
362 
363 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
364 					    struct ib_umem *umem,
365 					    bool ksm, int access_flags)
366 {
367 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
368 	struct mlx5_ib_mr *mr;
369 	int err;
370 
371 	mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
372 					    MLX5_IMR_MTT_CACHE_ENTRY);
373 
374 	if (IS_ERR(mr))
375 		return mr;
376 
377 	mr->ibmr.pd = pd;
378 
379 	mr->dev = dev;
380 	mr->access_flags = access_flags;
381 	mr->mmkey.iova = 0;
382 	mr->umem = umem;
383 
384 	if (ksm) {
385 		err = mlx5_ib_update_xlt(mr, 0,
386 					 mlx5_imr_ksm_entries,
387 					 MLX5_KSM_PAGE_SHIFT,
388 					 MLX5_IB_UPD_XLT_INDIRECT |
389 					 MLX5_IB_UPD_XLT_ZAP |
390 					 MLX5_IB_UPD_XLT_ENABLE);
391 
392 	} else {
393 		err = mlx5_ib_update_xlt(mr, 0,
394 					 MLX5_IMR_MTT_ENTRIES,
395 					 PAGE_SHIFT,
396 					 MLX5_IB_UPD_XLT_ZAP |
397 					 MLX5_IB_UPD_XLT_ENABLE |
398 					 MLX5_IB_UPD_XLT_ATOMIC);
399 	}
400 
401 	if (err)
402 		goto fail;
403 
404 	mr->ibmr.lkey = mr->mmkey.key;
405 	mr->ibmr.rkey = mr->mmkey.key;
406 
407 	mr->live = 1;
408 
409 	mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
410 		    mr->mmkey.key, dev->mdev, mr);
411 
412 	return mr;
413 
414 fail:
415 	mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
416 	mlx5_mr_cache_free(dev, mr);
417 
418 	return ERR_PTR(err);
419 }
420 
421 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
422 						u64 io_virt, size_t bcnt)
423 {
424 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
425 	struct ib_umem_odp *odp, *result = NULL;
426 	struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
427 	u64 addr = io_virt & MLX5_IMR_MTT_MASK;
428 	int nentries = 0, start_idx = 0, ret;
429 	struct mlx5_ib_mr *mtt;
430 
431 	mutex_lock(&odp_mr->umem_mutex);
432 	odp = odp_lookup(addr, 1, mr);
433 
434 	mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
435 		    io_virt, bcnt, addr, odp);
436 
437 next_mr:
438 	if (likely(odp)) {
439 		if (nentries)
440 			nentries++;
441 	} else {
442 		odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
443 					MLX5_IMR_MTT_SIZE);
444 		if (IS_ERR(odp)) {
445 			mutex_unlock(&odp_mr->umem_mutex);
446 			return ERR_CAST(odp);
447 		}
448 
449 		mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
450 					mr->access_flags);
451 		if (IS_ERR(mtt)) {
452 			mutex_unlock(&odp_mr->umem_mutex);
453 			ib_umem_release(&odp->umem);
454 			return ERR_CAST(mtt);
455 		}
456 
457 		odp->private = mtt;
458 		mtt->umem = &odp->umem;
459 		mtt->mmkey.iova = addr;
460 		mtt->parent = mr;
461 		INIT_WORK(&odp->work, mr_leaf_free_action);
462 
463 		if (!nentries)
464 			start_idx = addr >> MLX5_IMR_MTT_SHIFT;
465 		nentries++;
466 	}
467 
468 	/* Return first odp if region not covered by single one */
469 	if (likely(!result))
470 		result = odp;
471 
472 	addr += MLX5_IMR_MTT_SIZE;
473 	if (unlikely(addr < io_virt + bcnt)) {
474 		odp = odp_next(odp);
475 		if (odp && odp->umem.address != addr)
476 			odp = NULL;
477 		goto next_mr;
478 	}
479 
480 	if (unlikely(nentries)) {
481 		ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
482 					 MLX5_IB_UPD_XLT_INDIRECT |
483 					 MLX5_IB_UPD_XLT_ATOMIC);
484 		if (ret) {
485 			mlx5_ib_err(dev, "Failed to update PAS\n");
486 			result = ERR_PTR(ret);
487 		}
488 	}
489 
490 	mutex_unlock(&odp_mr->umem_mutex);
491 	return result;
492 }
493 
494 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
495 					     int access_flags)
496 {
497 	struct ib_ucontext *ctx = pd->ibpd.uobject->context;
498 	struct mlx5_ib_mr *imr;
499 	struct ib_umem *umem;
500 
501 	umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0);
502 	if (IS_ERR(umem))
503 		return ERR_CAST(umem);
504 
505 	imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
506 	if (IS_ERR(imr)) {
507 		ib_umem_release(umem);
508 		return ERR_CAST(imr);
509 	}
510 
511 	imr->umem = umem;
512 	init_waitqueue_head(&imr->q_leaf_free);
513 	atomic_set(&imr->num_leaf_free, 0);
514 
515 	return imr;
516 }
517 
518 static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
519 			void *cookie)
520 {
521 	struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
522 	struct ib_umem *umem = &umem_odp->umem;
523 
524 	if (mr->parent != imr)
525 		return 0;
526 
527 	ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
528 				    ib_umem_end(umem));
529 
530 	if (umem_odp->dying)
531 		return 0;
532 
533 	WRITE_ONCE(umem_odp->dying, 1);
534 	atomic_inc(&imr->num_leaf_free);
535 	schedule_work(&umem_odp->work);
536 
537 	return 0;
538 }
539 
540 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
541 {
542 	struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
543 
544 	down_read(&per_mm->umem_rwsem);
545 	rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
546 				      mr_leaf_free, true, imr);
547 	up_read(&per_mm->umem_rwsem);
548 
549 	wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
550 }
551 
552 #define MLX5_PF_FLAGS_PREFETCH  BIT(0)
553 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
554 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
555 			u64 io_virt, size_t bcnt, u32 *bytes_mapped,
556 			u32 flags)
557 {
558 	int npages = 0, current_seq, page_shift, ret, np;
559 	bool implicit = false;
560 	struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
561 	bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
562 	bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
563 	u64 access_mask = ODP_READ_ALLOWED_BIT;
564 	u64 start_idx, page_mask;
565 	struct ib_umem_odp *odp;
566 	size_t size;
567 
568 	if (!odp_mr->page_list) {
569 		odp = implicit_mr_get_data(mr, io_virt, bcnt);
570 
571 		if (IS_ERR(odp))
572 			return PTR_ERR(odp);
573 		mr = odp->private;
574 		implicit = true;
575 	} else {
576 		odp = odp_mr;
577 	}
578 
579 next_mr:
580 	size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
581 
582 	page_shift = mr->umem->page_shift;
583 	page_mask = ~(BIT(page_shift) - 1);
584 	start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
585 
586 	if (prefetch && !downgrade && !mr->umem->writable) {
587 		/* prefetch with write-access must
588 		 * be supported by the MR
589 		 */
590 		ret = -EINVAL;
591 		goto out;
592 	}
593 
594 	if (mr->umem->writable && !downgrade)
595 		access_mask |= ODP_WRITE_ALLOWED_BIT;
596 
597 	current_seq = READ_ONCE(odp->notifiers_seq);
598 	/*
599 	 * Ensure the sequence number is valid for some time before we call
600 	 * gup.
601 	 */
602 	smp_rmb();
603 
604 	ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
605 					access_mask, current_seq);
606 
607 	if (ret < 0)
608 		goto out;
609 
610 	np = ret;
611 
612 	mutex_lock(&odp->umem_mutex);
613 	if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
614 					current_seq)) {
615 		/*
616 		 * No need to check whether the MTTs really belong to
617 		 * this MR, since ib_umem_odp_map_dma_pages already
618 		 * checks this.
619 		 */
620 		ret = mlx5_ib_update_xlt(mr, start_idx, np,
621 					 page_shift, MLX5_IB_UPD_XLT_ATOMIC);
622 	} else {
623 		ret = -EAGAIN;
624 	}
625 	mutex_unlock(&odp->umem_mutex);
626 
627 	if (ret < 0) {
628 		if (ret != -EAGAIN)
629 			mlx5_ib_err(dev, "Failed to update mkey page tables\n");
630 		goto out;
631 	}
632 
633 	if (bytes_mapped) {
634 		u32 new_mappings = (np << page_shift) -
635 			(io_virt - round_down(io_virt, 1 << page_shift));
636 		*bytes_mapped += min_t(u32, new_mappings, size);
637 	}
638 
639 	npages += np << (page_shift - PAGE_SHIFT);
640 	bcnt -= size;
641 
642 	if (unlikely(bcnt)) {
643 		struct ib_umem_odp *next;
644 
645 		io_virt += size;
646 		next = odp_next(odp);
647 		if (unlikely(!next || next->umem.address != io_virt)) {
648 			mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
649 				    io_virt, next);
650 			return -EAGAIN;
651 		}
652 		odp = next;
653 		mr = odp->private;
654 		goto next_mr;
655 	}
656 
657 	return npages;
658 
659 out:
660 	if (ret == -EAGAIN) {
661 		if (implicit || !odp->dying) {
662 			unsigned long timeout =
663 				msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
664 
665 			if (!wait_for_completion_timeout(
666 					&odp->notifier_completion,
667 					timeout)) {
668 				mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
669 					     current_seq, odp->notifiers_seq, odp->notifiers_count);
670 			}
671 		} else {
672 			/* The MR is being killed, kill the QP as well. */
673 			ret = -EFAULT;
674 		}
675 	}
676 
677 	return ret;
678 }
679 
680 struct pf_frame {
681 	struct pf_frame *next;
682 	u32 key;
683 	u64 io_virt;
684 	size_t bcnt;
685 	int depth;
686 };
687 
688 /*
689  * Handle a single data segment in a page-fault WQE or RDMA region.
690  *
691  * Returns number of OS pages retrieved on success. The caller may continue to
692  * the next data segment.
693  * Can return the following error codes:
694  * -EAGAIN to designate a temporary error. The caller will abort handling the
695  *  page fault and resolve it.
696  * -EFAULT when there's an error mapping the requested pages. The caller will
697  *  abort the page fault handling.
698  */
699 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 key,
700 					 u64 io_virt, size_t bcnt,
701 					 u32 *bytes_committed,
702 					 u32 *bytes_mapped, u32 flags)
703 {
704 	int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
705 	bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
706 	struct pf_frame *head = NULL, *frame;
707 	struct mlx5_core_mkey *mmkey;
708 	struct mlx5_ib_mw *mw;
709 	struct mlx5_ib_mr *mr;
710 	struct mlx5_klm *pklm;
711 	u32 *out = NULL;
712 	size_t offset;
713 
714 	srcu_key = srcu_read_lock(&dev->mr_srcu);
715 
716 	io_virt += *bytes_committed;
717 	bcnt -= *bytes_committed;
718 
719 next_mr:
720 	mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
721 	if (!mmkey || mmkey->key != key) {
722 		mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
723 		ret = -EFAULT;
724 		goto srcu_unlock;
725 	}
726 
727 	if (prefetch && mmkey->type != MLX5_MKEY_MR) {
728 		mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
729 		ret = -EINVAL;
730 		goto srcu_unlock;
731 	}
732 
733 	switch (mmkey->type) {
734 	case MLX5_MKEY_MR:
735 		mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
736 		if (!mr->live || !mr->ibmr.pd) {
737 			mlx5_ib_dbg(dev, "got dead MR\n");
738 			ret = -EFAULT;
739 			goto srcu_unlock;
740 		}
741 
742 		if (prefetch && !mr->umem->is_odp) {
743 			ret = -EINVAL;
744 			goto srcu_unlock;
745 		}
746 
747 		if (!mr->umem->is_odp) {
748 			mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
749 				    key);
750 			if (bytes_mapped)
751 				*bytes_mapped += bcnt;
752 			ret = 0;
753 			goto srcu_unlock;
754 		}
755 
756 		ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
757 		if (ret < 0)
758 			goto srcu_unlock;
759 
760 		npages += ret;
761 		ret = 0;
762 		break;
763 
764 	case MLX5_MKEY_MW:
765 		mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
766 
767 		if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
768 			mlx5_ib_dbg(dev, "indirection level exceeded\n");
769 			ret = -EFAULT;
770 			goto srcu_unlock;
771 		}
772 
773 		outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
774 			sizeof(*pklm) * (mw->ndescs - 2);
775 
776 		if (outlen > cur_outlen) {
777 			kfree(out);
778 			out = kzalloc(outlen, GFP_KERNEL);
779 			if (!out) {
780 				ret = -ENOMEM;
781 				goto srcu_unlock;
782 			}
783 			cur_outlen = outlen;
784 		}
785 
786 		pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
787 						       bsf0_klm0_pas_mtt0_1);
788 
789 		ret = mlx5_core_query_mkey(dev->mdev, &mw->mmkey, out, outlen);
790 		if (ret)
791 			goto srcu_unlock;
792 
793 		offset = io_virt - MLX5_GET64(query_mkey_out, out,
794 					      memory_key_mkey_entry.start_addr);
795 
796 		for (i = 0; bcnt && i < mw->ndescs; i++, pklm++) {
797 			if (offset >= be32_to_cpu(pklm->bcount)) {
798 				offset -= be32_to_cpu(pklm->bcount);
799 				continue;
800 			}
801 
802 			frame = kzalloc(sizeof(*frame), GFP_KERNEL);
803 			if (!frame) {
804 				ret = -ENOMEM;
805 				goto srcu_unlock;
806 			}
807 
808 			frame->key = be32_to_cpu(pklm->key);
809 			frame->io_virt = be64_to_cpu(pklm->va) + offset;
810 			frame->bcnt = min_t(size_t, bcnt,
811 					    be32_to_cpu(pklm->bcount) - offset);
812 			frame->depth = depth + 1;
813 			frame->next = head;
814 			head = frame;
815 
816 			bcnt -= frame->bcnt;
817 			offset = 0;
818 		}
819 		break;
820 
821 	default:
822 		mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
823 		ret = -EFAULT;
824 		goto srcu_unlock;
825 	}
826 
827 	if (head) {
828 		frame = head;
829 		head = frame->next;
830 
831 		key = frame->key;
832 		io_virt = frame->io_virt;
833 		bcnt = frame->bcnt;
834 		depth = frame->depth;
835 		kfree(frame);
836 
837 		goto next_mr;
838 	}
839 
840 srcu_unlock:
841 	while (head) {
842 		frame = head;
843 		head = frame->next;
844 		kfree(frame);
845 	}
846 	kfree(out);
847 
848 	srcu_read_unlock(&dev->mr_srcu, srcu_key);
849 	*bytes_committed = 0;
850 	return ret ? ret : npages;
851 }
852 
853 /**
854  * Parse a series of data segments for page fault handling.
855  *
856  * @qp the QP on which the fault occurred.
857  * @pfault contains page fault information.
858  * @wqe points at the first data segment in the WQE.
859  * @wqe_end points after the end of the WQE.
860  * @bytes_mapped receives the number of bytes that the function was able to
861  *               map. This allows the caller to decide intelligently whether
862  *               enough memory was mapped to resolve the page fault
863  *               successfully (e.g. enough for the next MTU, or the entire
864  *               WQE).
865  * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
866  *                  the committed bytes).
867  *
868  * Returns the number of pages loaded if positive, zero for an empty WQE, or a
869  * negative error code.
870  */
871 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
872 				   struct mlx5_pagefault *pfault,
873 				   struct mlx5_ib_qp *qp, void *wqe,
874 				   void *wqe_end, u32 *bytes_mapped,
875 				   u32 *total_wqe_bytes, int receive_queue)
876 {
877 	int ret = 0, npages = 0;
878 	u64 io_virt;
879 	u32 key;
880 	u32 byte_count;
881 	size_t bcnt;
882 	int inline_segment;
883 
884 	/* Skip SRQ next-WQE segment. */
885 	if (receive_queue && qp->ibqp.srq)
886 		wqe += sizeof(struct mlx5_wqe_srq_next_seg);
887 
888 	if (bytes_mapped)
889 		*bytes_mapped = 0;
890 	if (total_wqe_bytes)
891 		*total_wqe_bytes = 0;
892 
893 	while (wqe < wqe_end) {
894 		struct mlx5_wqe_data_seg *dseg = wqe;
895 
896 		io_virt = be64_to_cpu(dseg->addr);
897 		key = be32_to_cpu(dseg->lkey);
898 		byte_count = be32_to_cpu(dseg->byte_count);
899 		inline_segment = !!(byte_count &  MLX5_INLINE_SEG);
900 		bcnt	       = byte_count & ~MLX5_INLINE_SEG;
901 
902 		if (inline_segment) {
903 			bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
904 			wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
905 				     16);
906 		} else {
907 			wqe += sizeof(*dseg);
908 		}
909 
910 		/* receive WQE end of sg list. */
911 		if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
912 		    io_virt == 0)
913 			break;
914 
915 		if (!inline_segment && total_wqe_bytes) {
916 			*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
917 					pfault->bytes_committed);
918 		}
919 
920 		/* A zero length data segment designates a length of 2GB. */
921 		if (bcnt == 0)
922 			bcnt = 1U << 31;
923 
924 		if (inline_segment || bcnt <= pfault->bytes_committed) {
925 			pfault->bytes_committed -=
926 				min_t(size_t, bcnt,
927 				      pfault->bytes_committed);
928 			continue;
929 		}
930 
931 		ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
932 						    &pfault->bytes_committed,
933 						    bytes_mapped, 0);
934 		if (ret < 0)
935 			break;
936 		npages += ret;
937 	}
938 
939 	return ret < 0 ? ret : npages;
940 }
941 
942 static const u32 mlx5_ib_odp_opcode_cap[] = {
943 	[MLX5_OPCODE_SEND]	       = IB_ODP_SUPPORT_SEND,
944 	[MLX5_OPCODE_SEND_IMM]	       = IB_ODP_SUPPORT_SEND,
945 	[MLX5_OPCODE_SEND_INVAL]       = IB_ODP_SUPPORT_SEND,
946 	[MLX5_OPCODE_RDMA_WRITE]       = IB_ODP_SUPPORT_WRITE,
947 	[MLX5_OPCODE_RDMA_WRITE_IMM]   = IB_ODP_SUPPORT_WRITE,
948 	[MLX5_OPCODE_RDMA_READ]	       = IB_ODP_SUPPORT_READ,
949 	[MLX5_OPCODE_ATOMIC_CS]	       = IB_ODP_SUPPORT_ATOMIC,
950 	[MLX5_OPCODE_ATOMIC_FA]	       = IB_ODP_SUPPORT_ATOMIC,
951 };
952 
953 /*
954  * Parse initiator WQE. Advances the wqe pointer to point at the
955  * scatter-gather list, and set wqe_end to the end of the WQE.
956  */
957 static int mlx5_ib_mr_initiator_pfault_handler(
958 	struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
959 	struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
960 {
961 	struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
962 	u16 wqe_index = pfault->wqe.wqe_index;
963 	u32 transport_caps;
964 	struct mlx5_base_av *av;
965 	unsigned ds, opcode;
966 #if defined(DEBUG)
967 	u32 ctrl_wqe_index, ctrl_qpn;
968 #endif
969 	u32 qpn = qp->trans_qp.base.mqp.qpn;
970 
971 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
972 	if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
973 		mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
974 			    ds, wqe_length);
975 		return -EFAULT;
976 	}
977 
978 	if (ds == 0) {
979 		mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
980 			    wqe_index, qpn);
981 		return -EFAULT;
982 	}
983 
984 #if defined(DEBUG)
985 	ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
986 			MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
987 			MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
988 	if (wqe_index != ctrl_wqe_index) {
989 		mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
990 			    wqe_index, qpn,
991 			    ctrl_wqe_index);
992 		return -EFAULT;
993 	}
994 
995 	ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
996 		MLX5_WQE_CTRL_QPN_SHIFT;
997 	if (qpn != ctrl_qpn) {
998 		mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
999 			    wqe_index, qpn,
1000 			    ctrl_qpn);
1001 		return -EFAULT;
1002 	}
1003 #endif /* DEBUG */
1004 
1005 	*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1006 	*wqe += sizeof(*ctrl);
1007 
1008 	opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1009 		 MLX5_WQE_CTRL_OPCODE_MASK;
1010 
1011 	switch (qp->ibqp.qp_type) {
1012 	case IB_QPT_RC:
1013 		transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
1014 		break;
1015 	case IB_QPT_UD:
1016 		transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
1017 		break;
1018 	default:
1019 		mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
1020 			    qp->ibqp.qp_type);
1021 		return -EFAULT;
1022 	}
1023 
1024 	if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
1025 		     !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
1026 		mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
1027 			    opcode);
1028 		return -EFAULT;
1029 	}
1030 
1031 	if (qp->ibqp.qp_type != IB_QPT_RC) {
1032 		av = *wqe;
1033 		if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1034 			*wqe += sizeof(struct mlx5_av);
1035 		else
1036 			*wqe += sizeof(struct mlx5_base_av);
1037 	}
1038 
1039 	switch (opcode) {
1040 	case MLX5_OPCODE_RDMA_WRITE:
1041 	case MLX5_OPCODE_RDMA_WRITE_IMM:
1042 	case MLX5_OPCODE_RDMA_READ:
1043 		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
1044 		break;
1045 	case MLX5_OPCODE_ATOMIC_CS:
1046 	case MLX5_OPCODE_ATOMIC_FA:
1047 		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
1048 		*wqe += sizeof(struct mlx5_wqe_atomic_seg);
1049 		break;
1050 	}
1051 
1052 	return 0;
1053 }
1054 
1055 /*
1056  * Parse responder WQE. Advances the wqe pointer to point at the
1057  * scatter-gather list, and set wqe_end to the end of the WQE.
1058  */
1059 static int mlx5_ib_mr_responder_pfault_handler(
1060 	struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1061 	struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1062 {
1063 	struct mlx5_ib_wq *wq = &qp->rq;
1064 	int wqe_size = 1 << wq->wqe_shift;
1065 
1066 	if (qp->ibqp.srq) {
1067 		mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
1068 		return -EFAULT;
1069 	}
1070 
1071 	if (qp->wq_sig) {
1072 		mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1073 		return -EFAULT;
1074 	}
1075 
1076 	if (wqe_size > wqe_length) {
1077 		mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1078 		return -EFAULT;
1079 	}
1080 
1081 	switch (qp->ibqp.qp_type) {
1082 	case IB_QPT_RC:
1083 		if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
1084 		      IB_ODP_SUPPORT_RECV))
1085 			goto invalid_transport_or_opcode;
1086 		break;
1087 	default:
1088 invalid_transport_or_opcode:
1089 		mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
1090 			    qp->ibqp.qp_type);
1091 		return -EFAULT;
1092 	}
1093 
1094 	*wqe_end = *wqe + wqe_size;
1095 
1096 	return 0;
1097 }
1098 
1099 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1100 						       u32 wq_num, int pf_type)
1101 {
1102 	enum mlx5_res_type res_type;
1103 
1104 	switch (pf_type) {
1105 	case MLX5_WQE_PF_TYPE_RMP:
1106 		res_type = MLX5_RES_SRQ;
1107 		break;
1108 	case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1109 	case MLX5_WQE_PF_TYPE_RESP:
1110 	case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1111 		res_type = MLX5_RES_QP;
1112 		break;
1113 	default:
1114 		return NULL;
1115 	}
1116 
1117 	return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
1118 }
1119 
1120 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1121 {
1122 	struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1123 
1124 	return to_mibqp(mqp);
1125 }
1126 
1127 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1128 					  struct mlx5_pagefault *pfault)
1129 {
1130 	int ret;
1131 	void *wqe, *wqe_end;
1132 	u32 bytes_mapped, total_wqe_bytes;
1133 	char *buffer = NULL;
1134 	int resume_with_error = 1;
1135 	u16 wqe_index = pfault->wqe.wqe_index;
1136 	int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
1137 	struct mlx5_core_rsc_common *res;
1138 	struct mlx5_ib_qp *qp;
1139 
1140 	res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1141 	if (!res) {
1142 		mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1143 		return;
1144 	}
1145 
1146 	switch (res->res) {
1147 	case MLX5_RES_QP:
1148 		qp = res_to_qp(res);
1149 		break;
1150 	default:
1151 		mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
1152 		goto resolve_page_fault;
1153 	}
1154 
1155 	buffer = (char *)__get_free_page(GFP_KERNEL);
1156 	if (!buffer) {
1157 		mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1158 		goto resolve_page_fault;
1159 	}
1160 
1161 	ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
1162 				    PAGE_SIZE, &qp->trans_qp.base);
1163 	if (ret < 0) {
1164 		mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
1165 			    ret, wqe_index, pfault->token);
1166 		goto resolve_page_fault;
1167 	}
1168 
1169 	wqe = buffer;
1170 	if (requestor)
1171 		ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
1172 							  &wqe_end, ret);
1173 	else
1174 		ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
1175 							  &wqe_end, ret);
1176 	if (ret < 0)
1177 		goto resolve_page_fault;
1178 
1179 	if (wqe >= wqe_end) {
1180 		mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
1181 		goto resolve_page_fault;
1182 	}
1183 
1184 	ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
1185 				      &bytes_mapped, &total_wqe_bytes,
1186 				      !requestor);
1187 	if (ret == -EAGAIN) {
1188 		resume_with_error = 0;
1189 		goto resolve_page_fault;
1190 	} else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
1191 		goto resolve_page_fault;
1192 	}
1193 
1194 	resume_with_error = 0;
1195 resolve_page_fault:
1196 	mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1197 	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1198 		    pfault->wqe.wq_num, resume_with_error,
1199 		    pfault->type);
1200 	mlx5_core_res_put(res);
1201 	free_page((unsigned long)buffer);
1202 }
1203 
1204 static int pages_in_range(u64 address, u32 length)
1205 {
1206 	return (ALIGN(address + length, PAGE_SIZE) -
1207 		(address & PAGE_MASK)) >> PAGE_SHIFT;
1208 }
1209 
1210 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1211 					   struct mlx5_pagefault *pfault)
1212 {
1213 	u64 address;
1214 	u32 length;
1215 	u32 prefetch_len = pfault->bytes_committed;
1216 	int prefetch_activated = 0;
1217 	u32 rkey = pfault->rdma.r_key;
1218 	int ret;
1219 
1220 	/* The RDMA responder handler handles the page fault in two parts.
1221 	 * First it brings the necessary pages for the current packet
1222 	 * (and uses the pfault context), and then (after resuming the QP)
1223 	 * prefetches more pages. The second operation cannot use the pfault
1224 	 * context and therefore uses the dummy_pfault context allocated on
1225 	 * the stack */
1226 	pfault->rdma.rdma_va += pfault->bytes_committed;
1227 	pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1228 					 pfault->rdma.rdma_op_len);
1229 	pfault->bytes_committed = 0;
1230 
1231 	address = pfault->rdma.rdma_va;
1232 	length  = pfault->rdma.rdma_op_len;
1233 
1234 	/* For some operations, the hardware cannot tell the exact message
1235 	 * length, and in those cases it reports zero. Use prefetch
1236 	 * logic. */
1237 	if (length == 0) {
1238 		prefetch_activated = 1;
1239 		length = pfault->rdma.packet_size;
1240 		prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1241 	}
1242 
1243 	ret = pagefault_single_data_segment(dev, rkey, address, length,
1244 					    &pfault->bytes_committed, NULL,
1245 					    0);
1246 	if (ret == -EAGAIN) {
1247 		/* We're racing with an invalidation, don't prefetch */
1248 		prefetch_activated = 0;
1249 	} else if (ret < 0 || pages_in_range(address, length) > ret) {
1250 		mlx5_ib_page_fault_resume(dev, pfault, 1);
1251 		if (ret != -ENOENT)
1252 			mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1253 				    ret, pfault->token, pfault->type);
1254 		return;
1255 	}
1256 
1257 	mlx5_ib_page_fault_resume(dev, pfault, 0);
1258 	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1259 		    pfault->token, pfault->type,
1260 		    prefetch_activated);
1261 
1262 	/* At this point, there might be a new pagefault already arriving in
1263 	 * the eq, switch to the dummy pagefault for the rest of the
1264 	 * processing. We're still OK with the objects being alive as the
1265 	 * work-queue is being fenced. */
1266 
1267 	if (prefetch_activated) {
1268 		u32 bytes_committed = 0;
1269 
1270 		ret = pagefault_single_data_segment(dev, rkey, address,
1271 						    prefetch_len,
1272 						    &bytes_committed, NULL,
1273 						    0);
1274 		if (ret < 0 && ret != -EAGAIN) {
1275 			mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1276 				    ret, pfault->token, address, prefetch_len);
1277 		}
1278 	}
1279 }
1280 
1281 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1282 {
1283 	u8 event_subtype = pfault->event_subtype;
1284 
1285 	switch (event_subtype) {
1286 	case MLX5_PFAULT_SUBTYPE_WQE:
1287 		mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1288 		break;
1289 	case MLX5_PFAULT_SUBTYPE_RDMA:
1290 		mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1291 		break;
1292 	default:
1293 		mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1294 			    event_subtype);
1295 		mlx5_ib_page_fault_resume(dev, pfault, 1);
1296 	}
1297 }
1298 
1299 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1300 {
1301 	struct mlx5_pagefault *pfault = container_of(work,
1302 						     struct mlx5_pagefault,
1303 						     work);
1304 	struct mlx5_ib_pf_eq *eq = pfault->eq;
1305 
1306 	mlx5_ib_pfault(eq->dev, pfault);
1307 	mempool_free(pfault, eq->pool);
1308 }
1309 
1310 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1311 {
1312 	struct mlx5_eqe_page_fault *pf_eqe;
1313 	struct mlx5_pagefault *pfault;
1314 	struct mlx5_eqe *eqe;
1315 	int cc = 0;
1316 
1317 	while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1318 		pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1319 		if (!pfault) {
1320 			schedule_work(&eq->work);
1321 			break;
1322 		}
1323 
1324 		pf_eqe = &eqe->data.page_fault;
1325 		pfault->event_subtype = eqe->sub_type;
1326 		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1327 
1328 		mlx5_ib_dbg(eq->dev,
1329 			    "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1330 			    eqe->sub_type, pfault->bytes_committed);
1331 
1332 		switch (eqe->sub_type) {
1333 		case MLX5_PFAULT_SUBTYPE_RDMA:
1334 			/* RDMA based event */
1335 			pfault->type =
1336 				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1337 			pfault->token =
1338 				be32_to_cpu(pf_eqe->rdma.pftype_token) &
1339 				MLX5_24BIT_MASK;
1340 			pfault->rdma.r_key =
1341 				be32_to_cpu(pf_eqe->rdma.r_key);
1342 			pfault->rdma.packet_size =
1343 				be16_to_cpu(pf_eqe->rdma.packet_length);
1344 			pfault->rdma.rdma_op_len =
1345 				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1346 			pfault->rdma.rdma_va =
1347 				be64_to_cpu(pf_eqe->rdma.rdma_va);
1348 			mlx5_ib_dbg(eq->dev,
1349 				    "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1350 				    pfault->type, pfault->token,
1351 				    pfault->rdma.r_key);
1352 			mlx5_ib_dbg(eq->dev,
1353 				    "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1354 				    pfault->rdma.rdma_op_len,
1355 				    pfault->rdma.rdma_va);
1356 			break;
1357 
1358 		case MLX5_PFAULT_SUBTYPE_WQE:
1359 			/* WQE based event */
1360 			pfault->type =
1361 				(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1362 			pfault->token =
1363 				be32_to_cpu(pf_eqe->wqe.token);
1364 			pfault->wqe.wq_num =
1365 				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1366 				MLX5_24BIT_MASK;
1367 			pfault->wqe.wqe_index =
1368 				be16_to_cpu(pf_eqe->wqe.wqe_index);
1369 			pfault->wqe.packet_size =
1370 				be16_to_cpu(pf_eqe->wqe.packet_length);
1371 			mlx5_ib_dbg(eq->dev,
1372 				    "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1373 				    pfault->type, pfault->token,
1374 				    pfault->wqe.wq_num,
1375 				    pfault->wqe.wqe_index);
1376 			break;
1377 
1378 		default:
1379 			mlx5_ib_warn(eq->dev,
1380 				     "Unsupported page fault event sub-type: 0x%02hhx\n",
1381 				     eqe->sub_type);
1382 			/* Unsupported page faults should still be
1383 			 * resolved by the page fault handler
1384 			 */
1385 		}
1386 
1387 		pfault->eq = eq;
1388 		INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1389 		queue_work(eq->wq, &pfault->work);
1390 
1391 		cc = mlx5_eq_update_cc(eq->core, ++cc);
1392 	}
1393 
1394 	mlx5_eq_update_ci(eq->core, cc, 1);
1395 }
1396 
1397 static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr)
1398 {
1399 	struct mlx5_ib_pf_eq *eq = eq_ptr;
1400 	unsigned long flags;
1401 
1402 	if (spin_trylock_irqsave(&eq->lock, flags)) {
1403 		mlx5_ib_eq_pf_process(eq);
1404 		spin_unlock_irqrestore(&eq->lock, flags);
1405 	} else {
1406 		schedule_work(&eq->work);
1407 	}
1408 
1409 	return IRQ_HANDLED;
1410 }
1411 
1412 /* mempool_refill() was proposed but unfortunately wasn't accepted
1413  * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1414  * Cheap workaround.
1415  */
1416 static void mempool_refill(mempool_t *pool)
1417 {
1418 	while (pool->curr_nr < pool->min_nr)
1419 		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1420 }
1421 
1422 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1423 {
1424 	struct mlx5_ib_pf_eq *eq =
1425 		container_of(work, struct mlx5_ib_pf_eq, work);
1426 
1427 	mempool_refill(eq->pool);
1428 
1429 	spin_lock_irq(&eq->lock);
1430 	mlx5_ib_eq_pf_process(eq);
1431 	spin_unlock_irq(&eq->lock);
1432 }
1433 
1434 enum {
1435 	MLX5_IB_NUM_PF_EQE	= 0x1000,
1436 	MLX5_IB_NUM_PF_DRAIN	= 64,
1437 };
1438 
1439 static int
1440 mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1441 {
1442 	struct mlx5_eq_param param = {};
1443 	int err;
1444 
1445 	INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1446 	spin_lock_init(&eq->lock);
1447 	eq->dev = dev;
1448 
1449 	eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1450 					       sizeof(struct mlx5_pagefault));
1451 	if (!eq->pool)
1452 		return -ENOMEM;
1453 
1454 	eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1455 				 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1456 				 MLX5_NUM_CMD_EQE);
1457 	if (!eq->wq) {
1458 		err = -ENOMEM;
1459 		goto err_mempool;
1460 	}
1461 
1462 	param = (struct mlx5_eq_param) {
1463 		.index = MLX5_EQ_PFAULT_IDX,
1464 		.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
1465 		.nent = MLX5_IB_NUM_PF_EQE,
1466 		.context = eq,
1467 		.handler = mlx5_ib_eq_pf_int
1468 	};
1469 	eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param);
1470 	if (IS_ERR(eq->core)) {
1471 		err = PTR_ERR(eq->core);
1472 		goto err_wq;
1473 	}
1474 
1475 	return 0;
1476 err_wq:
1477 	destroy_workqueue(eq->wq);
1478 err_mempool:
1479 	mempool_destroy(eq->pool);
1480 	return err;
1481 }
1482 
1483 static int
1484 mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1485 {
1486 	int err;
1487 
1488 	err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1489 	cancel_work_sync(&eq->work);
1490 	destroy_workqueue(eq->wq);
1491 	mempool_destroy(eq->pool);
1492 
1493 	return err;
1494 }
1495 
1496 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1497 {
1498 	if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1499 		return;
1500 
1501 	switch (ent->order - 2) {
1502 	case MLX5_IMR_MTT_CACHE_ENTRY:
1503 		ent->page = PAGE_SHIFT;
1504 		ent->xlt = MLX5_IMR_MTT_ENTRIES *
1505 			   sizeof(struct mlx5_mtt) /
1506 			   MLX5_IB_UMR_OCTOWORD;
1507 		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1508 		ent->limit = 0;
1509 		break;
1510 
1511 	case MLX5_IMR_KSM_CACHE_ENTRY:
1512 		ent->page = MLX5_KSM_PAGE_SHIFT;
1513 		ent->xlt = mlx5_imr_ksm_entries *
1514 			   sizeof(struct mlx5_klm) /
1515 			   MLX5_IB_UMR_OCTOWORD;
1516 		ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1517 		ent->limit = 0;
1518 		break;
1519 	}
1520 }
1521 
1522 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1523 	.advise_mr = mlx5_ib_advise_mr,
1524 };
1525 
1526 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1527 {
1528 	int ret = 0;
1529 
1530 	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1531 		ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1532 
1533 	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1534 		ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1535 		if (ret) {
1536 			mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1537 			return ret;
1538 		}
1539 	}
1540 
1541 	if (!MLX5_CAP_GEN(dev->mdev, pg))
1542 		return ret;
1543 
1544 	ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1545 
1546 	return ret;
1547 }
1548 
1549 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1550 {
1551 	if (!MLX5_CAP_GEN(dev->mdev, pg))
1552 		return;
1553 
1554 	mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
1555 }
1556 
1557 int mlx5_ib_odp_init(void)
1558 {
1559 	mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1560 				       MLX5_IMR_MTT_BITS);
1561 
1562 	return 0;
1563 }
1564 
1565 struct prefetch_mr_work {
1566 	struct work_struct work;
1567 	struct mlx5_ib_dev *dev;
1568 	u32 pf_flags;
1569 	u32 num_sge;
1570 	struct ib_sge sg_list[0];
1571 };
1572 
1573 static int mlx5_ib_prefetch_sg_list(struct mlx5_ib_dev *dev, u32 pf_flags,
1574 				    struct ib_sge *sg_list, u32 num_sge)
1575 {
1576 	int i;
1577 
1578 	for (i = 0; i < num_sge; ++i) {
1579 		struct ib_sge *sg = &sg_list[i];
1580 		int bytes_committed = 0;
1581 		int ret;
1582 
1583 		ret = pagefault_single_data_segment(dev, sg->lkey, sg->addr,
1584 						    sg->length,
1585 						    &bytes_committed, NULL,
1586 						    pf_flags);
1587 		if (ret < 0)
1588 			return ret;
1589 	}
1590 	return 0;
1591 }
1592 
1593 static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1594 {
1595 	struct prefetch_mr_work *w =
1596 		container_of(work, struct prefetch_mr_work, work);
1597 
1598 	if (ib_device_try_get(&w->dev->ib_dev)) {
1599 		mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
1600 					 w->num_sge);
1601 		ib_device_put(&w->dev->ib_dev);
1602 	}
1603 	put_device(&w->dev->ib_dev.dev);
1604 	kfree(w);
1605 }
1606 
1607 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1608 			       enum ib_uverbs_advise_mr_advice advice,
1609 			       u32 flags, struct ib_sge *sg_list, u32 num_sge)
1610 {
1611 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1612 	u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
1613 	struct prefetch_mr_work *work;
1614 
1615 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1616 		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1617 
1618 	if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1619 		return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
1620 						num_sge);
1621 
1622 	work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1623 	if (!work)
1624 		return -ENOMEM;
1625 
1626 	memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1627 
1628 	get_device(&dev->ib_dev.dev);
1629 	work->dev = dev;
1630 	work->pf_flags = pf_flags;
1631 	work->num_sge = num_sge;
1632 
1633 	INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1634 	schedule_work(&work->work);
1635 	return 0;
1636 }
1637