xref: /openbmc/linux/drivers/infiniband/hw/mlx5/odp.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem_odp.h>
34 #include <linux/kernel.h>
35 #include <linux/dma-buf.h>
36 #include <linux/dma-resv.h>
37 
38 #include "mlx5_ib.h"
39 #include "cmd.h"
40 #include "umr.h"
41 #include "qp.h"
42 
43 #include <linux/mlx5/eq.h>
44 
45 /* Contains the details of a pagefault. */
46 struct mlx5_pagefault {
47 	u32			bytes_committed;
48 	u32			token;
49 	u8			event_subtype;
50 	u8			type;
51 	union {
52 		/* Initiator or send message responder pagefault details. */
53 		struct {
54 			/* Received packet size, only valid for responders. */
55 			u32	packet_size;
56 			/*
57 			 * Number of resource holding WQE, depends on type.
58 			 */
59 			u32	wq_num;
60 			/*
61 			 * WQE index. Refers to either the send queue or
62 			 * receive queue, according to event_subtype.
63 			 */
64 			u16	wqe_index;
65 		} wqe;
66 		/* RDMA responder pagefault details */
67 		struct {
68 			u32	r_key;
69 			/*
70 			 * Received packet size, minimal size page fault
71 			 * resolution required for forward progress.
72 			 */
73 			u32	packet_size;
74 			u32	rdma_op_len;
75 			u64	rdma_va;
76 		} rdma;
77 	};
78 
79 	struct mlx5_ib_pf_eq	*eq;
80 	struct work_struct	work;
81 };
82 
83 #define MAX_PREFETCH_LEN (4*1024*1024U)
84 
85 /* Timeout in ms to wait for an active mmu notifier to complete when handling
86  * a pagefault. */
87 #define MMU_NOTIFIER_TIMEOUT 1000
88 
89 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
90 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
91 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
92 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
93 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
94 
95 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
96 
97 static u64 mlx5_imr_ksm_entries;
98 
populate_klm(struct mlx5_klm * pklm,size_t idx,size_t nentries,struct mlx5_ib_mr * imr,int flags)99 static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
100 			struct mlx5_ib_mr *imr, int flags)
101 {
102 	struct mlx5_klm *end = pklm + nentries;
103 
104 	if (flags & MLX5_IB_UPD_XLT_ZAP) {
105 		for (; pklm != end; pklm++, idx++) {
106 			pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
107 			pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
108 			pklm->va = 0;
109 		}
110 		return;
111 	}
112 
113 	/*
114 	 * The locking here is pretty subtle. Ideally the implicit_children
115 	 * xarray would be protected by the umem_mutex, however that is not
116 	 * possible. Instead this uses a weaker update-then-lock pattern:
117 	 *
118 	 *    xa_store()
119 	 *    mutex_lock(umem_mutex)
120 	 *     mlx5r_umr_update_xlt()
121 	 *    mutex_unlock(umem_mutex)
122 	 *    destroy lkey
123 	 *
124 	 * ie any change the xarray must be followed by the locked update_xlt
125 	 * before destroying.
126 	 *
127 	 * The umem_mutex provides the acquire/release semantic needed to make
128 	 * the xa_store() visible to a racing thread.
129 	 */
130 	lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
131 
132 	for (; pklm != end; pklm++, idx++) {
133 		struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
134 
135 		pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
136 		if (mtt) {
137 			pklm->key = cpu_to_be32(mtt->ibmr.lkey);
138 			pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
139 		} else {
140 			pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
141 			pklm->va = 0;
142 		}
143 	}
144 }
145 
umem_dma_to_mtt(dma_addr_t umem_dma)146 static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
147 {
148 	u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
149 
150 	if (umem_dma & ODP_READ_ALLOWED_BIT)
151 		mtt_entry |= MLX5_IB_MTT_READ;
152 	if (umem_dma & ODP_WRITE_ALLOWED_BIT)
153 		mtt_entry |= MLX5_IB_MTT_WRITE;
154 
155 	return mtt_entry;
156 }
157 
populate_mtt(__be64 * pas,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)158 static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
159 			 struct mlx5_ib_mr *mr, int flags)
160 {
161 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
162 	dma_addr_t pa;
163 	size_t i;
164 
165 	if (flags & MLX5_IB_UPD_XLT_ZAP)
166 		return;
167 
168 	for (i = 0; i < nentries; i++) {
169 		pa = odp->dma_list[idx + i];
170 		pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
171 	}
172 }
173 
mlx5_odp_populate_xlt(void * xlt,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)174 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
175 			   struct mlx5_ib_mr *mr, int flags)
176 {
177 	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
178 		populate_klm(xlt, idx, nentries, mr, flags);
179 	} else {
180 		populate_mtt(xlt, idx, nentries, mr, flags);
181 	}
182 }
183 
184 /*
185  * This must be called after the mr has been removed from implicit_children.
186  * NOTE: The MR does not necessarily have to be
187  * empty here, parallel page faults could have raced with the free process and
188  * added pages to it.
189  */
free_implicit_child_mr_work(struct work_struct * work)190 static void free_implicit_child_mr_work(struct work_struct *work)
191 {
192 	struct mlx5_ib_mr *mr =
193 		container_of(work, struct mlx5_ib_mr, odp_destroy.work);
194 	struct mlx5_ib_mr *imr = mr->parent;
195 	struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
196 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
197 
198 	mlx5r_deref_wait_odp_mkey(&mr->mmkey);
199 
200 	mutex_lock(&odp_imr->umem_mutex);
201 	mlx5r_umr_update_xlt(mr->parent,
202 			     ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
203 			     MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
204 	mutex_unlock(&odp_imr->umem_mutex);
205 	mlx5_ib_dereg_mr(&mr->ibmr, NULL);
206 
207 	mlx5r_deref_odp_mkey(&imr->mmkey);
208 }
209 
destroy_unused_implicit_child_mr(struct mlx5_ib_mr * mr)210 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
211 {
212 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
213 	unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
214 	struct mlx5_ib_mr *imr = mr->parent;
215 
216 	if (!refcount_inc_not_zero(&imr->mmkey.usecount))
217 		return;
218 
219 	xa_erase(&imr->implicit_children, idx);
220 
221 	/* Freeing a MR is a sleeping operation, so bounce to a work queue */
222 	INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
223 	queue_work(system_unbound_wq, &mr->odp_destroy.work);
224 }
225 
mlx5_ib_invalidate_range(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)226 static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
227 				     const struct mmu_notifier_range *range,
228 				     unsigned long cur_seq)
229 {
230 	struct ib_umem_odp *umem_odp =
231 		container_of(mni, struct ib_umem_odp, notifier);
232 	struct mlx5_ib_mr *mr;
233 	const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
234 	u64 idx = 0, blk_start_idx = 0;
235 	u64 invalidations = 0;
236 	unsigned long start;
237 	unsigned long end;
238 	int in_block = 0;
239 	u64 addr;
240 
241 	if (!mmu_notifier_range_blockable(range))
242 		return false;
243 
244 	mutex_lock(&umem_odp->umem_mutex);
245 	mmu_interval_set_seq(mni, cur_seq);
246 	/*
247 	 * If npages is zero then umem_odp->private may not be setup yet. This
248 	 * does not complete until after the first page is mapped for DMA.
249 	 */
250 	if (!umem_odp->npages)
251 		goto out;
252 	mr = umem_odp->private;
253 
254 	start = max_t(u64, ib_umem_start(umem_odp), range->start);
255 	end = min_t(u64, ib_umem_end(umem_odp), range->end);
256 
257 	/*
258 	 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
259 	 * while we are doing the invalidation, no page fault will attempt to
260 	 * overwrite the same MTTs.  Concurent invalidations might race us,
261 	 * but they will write 0s as well, so no difference in the end result.
262 	 */
263 	for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
264 		idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
265 		/*
266 		 * Strive to write the MTTs in chunks, but avoid overwriting
267 		 * non-existing MTTs. The huristic here can be improved to
268 		 * estimate the cost of another UMR vs. the cost of bigger
269 		 * UMR.
270 		 */
271 		if (umem_odp->dma_list[idx] &
272 		    (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
273 			if (!in_block) {
274 				blk_start_idx = idx;
275 				in_block = 1;
276 			}
277 
278 			/* Count page invalidations */
279 			invalidations += idx - blk_start_idx + 1;
280 		} else {
281 			u64 umr_offset = idx & umr_block_mask;
282 
283 			if (in_block && umr_offset == 0) {
284 				mlx5r_umr_update_xlt(mr, blk_start_idx,
285 						     idx - blk_start_idx, 0,
286 						     MLX5_IB_UPD_XLT_ZAP |
287 						     MLX5_IB_UPD_XLT_ATOMIC);
288 				in_block = 0;
289 			}
290 		}
291 	}
292 	if (in_block)
293 		mlx5r_umr_update_xlt(mr, blk_start_idx,
294 				     idx - blk_start_idx + 1, 0,
295 				     MLX5_IB_UPD_XLT_ZAP |
296 				     MLX5_IB_UPD_XLT_ATOMIC);
297 
298 	mlx5_update_odp_stats(mr, invalidations, invalidations);
299 
300 	/*
301 	 * We are now sure that the device will not access the
302 	 * memory. We can safely unmap it, and mark it as dirty if
303 	 * needed.
304 	 */
305 
306 	ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
307 
308 	if (unlikely(!umem_odp->npages && mr->parent))
309 		destroy_unused_implicit_child_mr(mr);
310 out:
311 	mutex_unlock(&umem_odp->umem_mutex);
312 	return true;
313 }
314 
315 const struct mmu_interval_notifier_ops mlx5_mn_ops = {
316 	.invalidate = mlx5_ib_invalidate_range,
317 };
318 
internal_fill_odp_caps(struct mlx5_ib_dev * dev)319 static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
320 {
321 	struct ib_odp_caps *caps = &dev->odp_caps;
322 
323 	memset(caps, 0, sizeof(*caps));
324 
325 	if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
326 		return;
327 
328 	caps->general_caps = IB_ODP_SUPPORT;
329 
330 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
331 		dev->odp_max_size = U64_MAX;
332 	else
333 		dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
334 
335 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
336 		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
337 
338 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
339 		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
340 
341 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
342 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
343 
344 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
345 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
346 
347 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
348 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
349 
350 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
351 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
352 
353 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
354 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
355 
356 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
357 		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
358 
359 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
360 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
361 
362 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
363 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
364 
365 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
366 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
367 
368 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
369 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
370 
371 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
372 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
373 
374 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
375 		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
376 
377 	if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
378 	    MLX5_CAP_GEN(dev->mdev, null_mkey) &&
379 	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
380 	    !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
381 		caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
382 }
383 
mlx5_ib_page_fault_resume(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault,int error)384 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
385 				      struct mlx5_pagefault *pfault,
386 				      int error)
387 {
388 	int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
389 		     pfault->wqe.wq_num : pfault->token;
390 	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
391 	int err;
392 
393 	MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
394 	MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
395 	MLX5_SET(page_fault_resume_in, in, token, pfault->token);
396 	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
397 	MLX5_SET(page_fault_resume_in, in, error, !!error);
398 
399 	err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
400 	if (err)
401 		mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
402 			    wq_num, err);
403 }
404 
implicit_get_child_mr(struct mlx5_ib_mr * imr,unsigned long idx)405 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
406 						unsigned long idx)
407 {
408 	struct mlx5_ib_dev *dev = mr_to_mdev(imr);
409 	struct ib_umem_odp *odp;
410 	struct mlx5_ib_mr *mr;
411 	struct mlx5_ib_mr *ret;
412 	int err;
413 
414 	odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
415 				      idx * MLX5_IMR_MTT_SIZE,
416 				      MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
417 	if (IS_ERR(odp))
418 		return ERR_CAST(odp);
419 
420 	mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
421 				 MLX5_MKC_ACCESS_MODE_MTT,
422 				 MLX5_IMR_MTT_ENTRIES);
423 	if (IS_ERR(mr)) {
424 		ib_umem_odp_release(odp);
425 		return mr;
426 	}
427 
428 	mr->access_flags = imr->access_flags;
429 	mr->ibmr.pd = imr->ibmr.pd;
430 	mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
431 	mr->umem = &odp->umem;
432 	mr->ibmr.lkey = mr->mmkey.key;
433 	mr->ibmr.rkey = mr->mmkey.key;
434 	mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
435 	mr->parent = imr;
436 	odp->private = mr;
437 
438 	/*
439 	 * First refcount is owned by the xarray and second refconut
440 	 * is returned to the caller.
441 	 */
442 	refcount_set(&mr->mmkey.usecount, 2);
443 
444 	err = mlx5r_umr_update_xlt(mr, 0,
445 				   MLX5_IMR_MTT_ENTRIES,
446 				   PAGE_SHIFT,
447 				   MLX5_IB_UPD_XLT_ZAP |
448 				   MLX5_IB_UPD_XLT_ENABLE);
449 	if (err) {
450 		ret = ERR_PTR(err);
451 		goto out_mr;
452 	}
453 
454 	xa_lock(&imr->implicit_children);
455 	ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
456 			   GFP_KERNEL);
457 	if (unlikely(ret)) {
458 		if (xa_is_err(ret)) {
459 			ret = ERR_PTR(xa_err(ret));
460 			goto out_lock;
461 		}
462 		/*
463 		 * Another thread beat us to creating the child mr, use
464 		 * theirs.
465 		 */
466 		refcount_inc(&ret->mmkey.usecount);
467 		goto out_lock;
468 	}
469 	xa_unlock(&imr->implicit_children);
470 
471 	mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
472 	return mr;
473 
474 out_lock:
475 	xa_unlock(&imr->implicit_children);
476 out_mr:
477 	mlx5_ib_dereg_mr(&mr->ibmr, NULL);
478 	return ret;
479 }
480 
mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd * pd,int access_flags)481 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
482 					     int access_flags)
483 {
484 	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
485 	struct ib_umem_odp *umem_odp;
486 	struct mlx5_ib_mr *imr;
487 	int err;
488 
489 	if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
490 		return ERR_PTR(-EOPNOTSUPP);
491 
492 	umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
493 	if (IS_ERR(umem_odp))
494 		return ERR_CAST(umem_odp);
495 
496 	imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM,
497 				  mlx5_imr_ksm_entries);
498 	if (IS_ERR(imr)) {
499 		ib_umem_odp_release(umem_odp);
500 		return imr;
501 	}
502 
503 	imr->access_flags = access_flags;
504 	imr->ibmr.pd = &pd->ibpd;
505 	imr->ibmr.iova = 0;
506 	imr->umem = &umem_odp->umem;
507 	imr->ibmr.lkey = imr->mmkey.key;
508 	imr->ibmr.rkey = imr->mmkey.key;
509 	imr->ibmr.device = &dev->ib_dev;
510 	imr->is_odp_implicit = true;
511 	xa_init(&imr->implicit_children);
512 
513 	err = mlx5r_umr_update_xlt(imr, 0,
514 				   mlx5_imr_ksm_entries,
515 				   MLX5_KSM_PAGE_SHIFT,
516 				   MLX5_IB_UPD_XLT_INDIRECT |
517 				   MLX5_IB_UPD_XLT_ZAP |
518 				   MLX5_IB_UPD_XLT_ENABLE);
519 	if (err)
520 		goto out_mr;
521 
522 	err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
523 	if (err)
524 		goto out_mr;
525 
526 	mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
527 	return imr;
528 out_mr:
529 	mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
530 	mlx5_ib_dereg_mr(&imr->ibmr, NULL);
531 	return ERR_PTR(err);
532 }
533 
mlx5_ib_free_odp_mr(struct mlx5_ib_mr * mr)534 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
535 {
536 	struct mlx5_ib_mr *mtt;
537 	unsigned long idx;
538 
539 	/*
540 	 * If this is an implicit MR it is already invalidated so we can just
541 	 * delete the children mkeys.
542 	 */
543 	xa_for_each(&mr->implicit_children, idx, mtt) {
544 		xa_erase(&mr->implicit_children, idx);
545 		mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
546 	}
547 }
548 
549 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
550 #define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
551 #define MLX5_PF_FLAGS_ENABLE BIT(3)
pagefault_real_mr(struct mlx5_ib_mr * mr,struct ib_umem_odp * odp,u64 user_va,size_t bcnt,u32 * bytes_mapped,u32 flags)552 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
553 			     u64 user_va, size_t bcnt, u32 *bytes_mapped,
554 			     u32 flags)
555 {
556 	int page_shift, ret, np;
557 	bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
558 	u64 access_mask;
559 	u64 start_idx;
560 	bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
561 	u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
562 
563 	if (flags & MLX5_PF_FLAGS_ENABLE)
564 		xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
565 
566 	page_shift = odp->page_shift;
567 	start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
568 	access_mask = ODP_READ_ALLOWED_BIT;
569 
570 	if (odp->umem.writable && !downgrade)
571 		access_mask |= ODP_WRITE_ALLOWED_BIT;
572 
573 	np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
574 	if (np < 0)
575 		return np;
576 
577 	/*
578 	 * No need to check whether the MTTs really belong to this MR, since
579 	 * ib_umem_odp_map_dma_and_lock already checks this.
580 	 */
581 	ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
582 	mutex_unlock(&odp->umem_mutex);
583 
584 	if (ret < 0) {
585 		if (ret != -EAGAIN)
586 			mlx5_ib_err(mr_to_mdev(mr),
587 				    "Failed to update mkey page tables\n");
588 		goto out;
589 	}
590 
591 	if (bytes_mapped) {
592 		u32 new_mappings = (np << page_shift) -
593 			(user_va - round_down(user_va, 1 << page_shift));
594 
595 		*bytes_mapped += min_t(u32, new_mappings, bcnt);
596 	}
597 
598 	return np << (page_shift - PAGE_SHIFT);
599 
600 out:
601 	return ret;
602 }
603 
pagefault_implicit_mr(struct mlx5_ib_mr * imr,struct ib_umem_odp * odp_imr,u64 user_va,size_t bcnt,u32 * bytes_mapped,u32 flags)604 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
605 				 struct ib_umem_odp *odp_imr, u64 user_va,
606 				 size_t bcnt, u32 *bytes_mapped, u32 flags)
607 {
608 	unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
609 	unsigned long upd_start_idx = end_idx + 1;
610 	unsigned long upd_len = 0;
611 	unsigned long npages = 0;
612 	int err;
613 	int ret;
614 
615 	if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
616 		     mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
617 		return -EFAULT;
618 
619 	/* Fault each child mr that intersects with our interval. */
620 	while (bcnt) {
621 		unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
622 		struct ib_umem_odp *umem_odp;
623 		struct mlx5_ib_mr *mtt;
624 		u64 len;
625 
626 		xa_lock(&imr->implicit_children);
627 		mtt = xa_load(&imr->implicit_children, idx);
628 		if (unlikely(!mtt)) {
629 			xa_unlock(&imr->implicit_children);
630 			mtt = implicit_get_child_mr(imr, idx);
631 			if (IS_ERR(mtt)) {
632 				ret = PTR_ERR(mtt);
633 				goto out;
634 			}
635 			upd_start_idx = min(upd_start_idx, idx);
636 			upd_len = idx - upd_start_idx + 1;
637 		} else {
638 			refcount_inc(&mtt->mmkey.usecount);
639 			xa_unlock(&imr->implicit_children);
640 		}
641 
642 		umem_odp = to_ib_umem_odp(mtt->umem);
643 		len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
644 		      user_va;
645 
646 		ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
647 					bytes_mapped, flags);
648 
649 		mlx5r_deref_odp_mkey(&mtt->mmkey);
650 
651 		if (ret < 0)
652 			goto out;
653 		user_va += len;
654 		bcnt -= len;
655 		npages += ret;
656 	}
657 
658 	ret = npages;
659 
660 	/*
661 	 * Any time the implicit_children are changed we must perform an
662 	 * update of the xlt before exiting to ensure the HW and the
663 	 * implicit_children remains synchronized.
664 	 */
665 out:
666 	if (likely(!upd_len))
667 		return ret;
668 
669 	/*
670 	 * Notice this is not strictly ordered right, the KSM is updated after
671 	 * the implicit_children is updated, so a parallel page fault could
672 	 * see a MR that is not yet visible in the KSM.  This is similar to a
673 	 * parallel page fault seeing a MR that is being concurrently removed
674 	 * from the KSM. Both of these improbable situations are resolved
675 	 * safely by resuming the HW and then taking another page fault. The
676 	 * next pagefault handler will see the new information.
677 	 */
678 	mutex_lock(&odp_imr->umem_mutex);
679 	err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
680 				   MLX5_IB_UPD_XLT_INDIRECT |
681 					  MLX5_IB_UPD_XLT_ATOMIC);
682 	mutex_unlock(&odp_imr->umem_mutex);
683 	if (err) {
684 		mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
685 		return err;
686 	}
687 	return ret;
688 }
689 
pagefault_dmabuf_mr(struct mlx5_ib_mr * mr,size_t bcnt,u32 * bytes_mapped,u32 flags)690 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
691 			       u32 *bytes_mapped, u32 flags)
692 {
693 	struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
694 	u32 xlt_flags = 0;
695 	int err;
696 	unsigned int page_size;
697 
698 	if (flags & MLX5_PF_FLAGS_ENABLE)
699 		xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
700 
701 	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
702 	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
703 	if (err) {
704 		dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
705 		return err;
706 	}
707 
708 	page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf);
709 	if (!page_size) {
710 		ib_umem_dmabuf_unmap_pages(umem_dmabuf);
711 		err = -EINVAL;
712 	} else {
713 		err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
714 	}
715 	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
716 
717 	if (err)
718 		return err;
719 
720 	if (bytes_mapped)
721 		*bytes_mapped += bcnt;
722 
723 	return ib_umem_num_pages(mr->umem);
724 }
725 
726 /*
727  * Returns:
728  *  -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
729  *           not accessible, or the MR is no longer valid.
730  *  -EAGAIN/-ENOMEM: The operation should be retried
731  *
732  *  -EINVAL/others: General internal malfunction
733  *  >0: Number of pages mapped
734  */
pagefault_mr(struct mlx5_ib_mr * mr,u64 io_virt,size_t bcnt,u32 * bytes_mapped,u32 flags,bool permissive_fault)735 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
736 			u32 *bytes_mapped, u32 flags, bool permissive_fault)
737 {
738 	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
739 
740 	if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
741 		return -EFAULT;
742 
743 	if (mr->umem->is_dmabuf)
744 		return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
745 
746 	if (!odp->is_implicit_odp) {
747 		u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
748 		u64 user_va;
749 
750 		if (check_add_overflow(offset, (u64)odp->umem.address,
751 				       &user_va))
752 			return -EFAULT;
753 
754 		if (permissive_fault) {
755 			if (user_va < ib_umem_start(odp))
756 				user_va = ib_umem_start(odp);
757 			if ((user_va + bcnt) > ib_umem_end(odp))
758 				bcnt = ib_umem_end(odp) - user_va;
759 		} else if (unlikely(user_va >= ib_umem_end(odp) ||
760 				    ib_umem_end(odp) - user_va < bcnt))
761 			return -EFAULT;
762 		return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
763 					 flags);
764 	}
765 	return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
766 				     flags);
767 }
768 
mlx5_ib_init_odp_mr(struct mlx5_ib_mr * mr)769 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
770 {
771 	int ret;
772 
773 	ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
774 				mr->umem->length, NULL,
775 				MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
776 	return ret >= 0 ? 0 : ret;
777 }
778 
mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr * mr)779 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
780 {
781 	int ret;
782 
783 	ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
784 				  MLX5_PF_FLAGS_ENABLE);
785 
786 	return ret >= 0 ? 0 : ret;
787 }
788 
789 struct pf_frame {
790 	struct pf_frame *next;
791 	u32 key;
792 	u64 io_virt;
793 	size_t bcnt;
794 	int depth;
795 };
796 
mkey_is_eq(struct mlx5_ib_mkey * mmkey,u32 key)797 static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
798 {
799 	if (!mmkey)
800 		return false;
801 	if (mmkey->type == MLX5_MKEY_MW ||
802 	    mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
803 		return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
804 	return mmkey->key == key;
805 }
806 
807 /*
808  * Handle a single data segment in a page-fault WQE or RDMA region.
809  *
810  * Returns zero on success. The caller may continue to the next data segment.
811  * Can return the following error codes:
812  * -EAGAIN to designate a temporary error. The caller will abort handling the
813  *  page fault and resolve it.
814  * -EFAULT when there's an error mapping the requested pages. The caller will
815  *  abort the page fault handling.
816  */
pagefault_single_data_segment(struct mlx5_ib_dev * dev,struct ib_pd * pd,u32 key,u64 io_virt,size_t bcnt,u32 * bytes_committed,u32 * bytes_mapped)817 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
818 					 struct ib_pd *pd, u32 key,
819 					 u64 io_virt, size_t bcnt,
820 					 u32 *bytes_committed,
821 					 u32 *bytes_mapped)
822 {
823 	int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
824 	struct pf_frame *head = NULL, *frame;
825 	struct mlx5_ib_mkey *mmkey;
826 	struct mlx5_ib_mr *mr;
827 	struct mlx5_klm *pklm;
828 	u32 *out = NULL;
829 	size_t offset;
830 
831 	io_virt += *bytes_committed;
832 	bcnt -= *bytes_committed;
833 
834 next_mr:
835 	xa_lock(&dev->odp_mkeys);
836 	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
837 	if (!mmkey) {
838 		xa_unlock(&dev->odp_mkeys);
839 		mlx5_ib_dbg(
840 			dev,
841 			"skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
842 			key);
843 		if (bytes_mapped)
844 			*bytes_mapped += bcnt;
845 		/*
846 		 * The user could specify a SGL with multiple lkeys and only
847 		 * some of them are ODP. Treat the non-ODP ones as fully
848 		 * faulted.
849 		 */
850 		ret = 0;
851 		goto end;
852 	}
853 	refcount_inc(&mmkey->usecount);
854 	xa_unlock(&dev->odp_mkeys);
855 
856 	if (!mkey_is_eq(mmkey, key)) {
857 		mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
858 		ret = -EFAULT;
859 		goto end;
860 	}
861 
862 	switch (mmkey->type) {
863 	case MLX5_MKEY_MR:
864 		mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
865 
866 		pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
867 				  (io_virt & PAGE_MASK)) >>
868 				 PAGE_SHIFT;
869 		ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
870 		if (ret < 0)
871 			goto end;
872 
873 		mlx5_update_odp_stats(mr, faults, ret);
874 
875 		if (ret < pages_in_range) {
876 			ret = -EFAULT;
877 			goto end;
878 		}
879 
880 		ret = 0;
881 		break;
882 
883 	case MLX5_MKEY_MW:
884 	case MLX5_MKEY_INDIRECT_DEVX:
885 		if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
886 			mlx5_ib_dbg(dev, "indirection level exceeded\n");
887 			ret = -EFAULT;
888 			goto end;
889 		}
890 
891 		outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
892 			sizeof(*pklm) * (mmkey->ndescs - 2);
893 
894 		if (outlen > cur_outlen) {
895 			kfree(out);
896 			out = kzalloc(outlen, GFP_KERNEL);
897 			if (!out) {
898 				ret = -ENOMEM;
899 				goto end;
900 			}
901 			cur_outlen = outlen;
902 		}
903 
904 		pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
905 						       bsf0_klm0_pas_mtt0_1);
906 
907 		ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
908 		if (ret)
909 			goto end;
910 
911 		offset = io_virt - MLX5_GET64(query_mkey_out, out,
912 					      memory_key_mkey_entry.start_addr);
913 
914 		for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
915 			if (offset >= be32_to_cpu(pklm->bcount)) {
916 				offset -= be32_to_cpu(pklm->bcount);
917 				continue;
918 			}
919 
920 			frame = kzalloc(sizeof(*frame), GFP_KERNEL);
921 			if (!frame) {
922 				ret = -ENOMEM;
923 				goto end;
924 			}
925 
926 			frame->key = be32_to_cpu(pklm->key);
927 			frame->io_virt = be64_to_cpu(pklm->va) + offset;
928 			frame->bcnt = min_t(size_t, bcnt,
929 					    be32_to_cpu(pklm->bcount) - offset);
930 			frame->depth = depth + 1;
931 			frame->next = head;
932 			head = frame;
933 
934 			bcnt -= frame->bcnt;
935 			offset = 0;
936 		}
937 		break;
938 
939 	default:
940 		mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
941 		ret = -EFAULT;
942 		goto end;
943 	}
944 
945 	if (head) {
946 		frame = head;
947 		head = frame->next;
948 
949 		key = frame->key;
950 		io_virt = frame->io_virt;
951 		bcnt = frame->bcnt;
952 		depth = frame->depth;
953 		kfree(frame);
954 
955 		mlx5r_deref_odp_mkey(mmkey);
956 		goto next_mr;
957 	}
958 
959 end:
960 	if (mmkey)
961 		mlx5r_deref_odp_mkey(mmkey);
962 	while (head) {
963 		frame = head;
964 		head = frame->next;
965 		kfree(frame);
966 	}
967 	kfree(out);
968 
969 	*bytes_committed = 0;
970 	return ret;
971 }
972 
973 /*
974  * Parse a series of data segments for page fault handling.
975  *
976  * @dev:  Pointer to mlx5 IB device
977  * @pfault: contains page fault information.
978  * @wqe: points at the first data segment in the WQE.
979  * @wqe_end: points after the end of the WQE.
980  * @bytes_mapped: receives the number of bytes that the function was able to
981  *                map. This allows the caller to decide intelligently whether
982  *                enough memory was mapped to resolve the page fault
983  *                successfully (e.g. enough for the next MTU, or the entire
984  *                WQE).
985  * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
986  *                   the committed bytes).
987  * @receive_queue: receive WQE end of sg list
988  *
989  * Returns zero for success or a negative error code.
990  */
pagefault_data_segments(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault,void * wqe,void * wqe_end,u32 * bytes_mapped,u32 * total_wqe_bytes,bool receive_queue)991 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
992 				   struct mlx5_pagefault *pfault,
993 				   void *wqe,
994 				   void *wqe_end, u32 *bytes_mapped,
995 				   u32 *total_wqe_bytes, bool receive_queue)
996 {
997 	int ret = 0;
998 	u64 io_virt;
999 	__be32 key;
1000 	u32 byte_count;
1001 	size_t bcnt;
1002 	int inline_segment;
1003 
1004 	if (bytes_mapped)
1005 		*bytes_mapped = 0;
1006 	if (total_wqe_bytes)
1007 		*total_wqe_bytes = 0;
1008 
1009 	while (wqe < wqe_end) {
1010 		struct mlx5_wqe_data_seg *dseg = wqe;
1011 
1012 		io_virt = be64_to_cpu(dseg->addr);
1013 		key = dseg->lkey;
1014 		byte_count = be32_to_cpu(dseg->byte_count);
1015 		inline_segment = !!(byte_count &  MLX5_INLINE_SEG);
1016 		bcnt	       = byte_count & ~MLX5_INLINE_SEG;
1017 
1018 		if (inline_segment) {
1019 			bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1020 			wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1021 				     16);
1022 		} else {
1023 			wqe += sizeof(*dseg);
1024 		}
1025 
1026 		/* receive WQE end of sg list. */
1027 		if (receive_queue && bcnt == 0 &&
1028 		    key == dev->mkeys.terminate_scatter_list_mkey &&
1029 		    io_virt == 0)
1030 			break;
1031 
1032 		if (!inline_segment && total_wqe_bytes) {
1033 			*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1034 					pfault->bytes_committed);
1035 		}
1036 
1037 		/* A zero length data segment designates a length of 2GB. */
1038 		if (bcnt == 0)
1039 			bcnt = 1U << 31;
1040 
1041 		if (inline_segment || bcnt <= pfault->bytes_committed) {
1042 			pfault->bytes_committed -=
1043 				min_t(size_t, bcnt,
1044 				      pfault->bytes_committed);
1045 			continue;
1046 		}
1047 
1048 		ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key),
1049 						    io_virt, bcnt,
1050 						    &pfault->bytes_committed,
1051 						    bytes_mapped);
1052 		if (ret < 0)
1053 			break;
1054 	}
1055 
1056 	return ret;
1057 }
1058 
1059 /*
1060  * Parse initiator WQE. Advances the wqe pointer to point at the
1061  * scatter-gather list, and set wqe_end to the end of the WQE.
1062  */
mlx5_ib_mr_initiator_pfault_handler(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault,struct mlx5_ib_qp * qp,void ** wqe,void ** wqe_end,int wqe_length)1063 static int mlx5_ib_mr_initiator_pfault_handler(
1064 	struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1065 	struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1066 {
1067 	struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1068 	u16 wqe_index = pfault->wqe.wqe_index;
1069 	struct mlx5_base_av *av;
1070 	unsigned ds, opcode;
1071 	u32 qpn = qp->trans_qp.base.mqp.qpn;
1072 
1073 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1074 	if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1075 		mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1076 			    ds, wqe_length);
1077 		return -EFAULT;
1078 	}
1079 
1080 	if (ds == 0) {
1081 		mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1082 			    wqe_index, qpn);
1083 		return -EFAULT;
1084 	}
1085 
1086 	*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1087 	*wqe += sizeof(*ctrl);
1088 
1089 	opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1090 		 MLX5_WQE_CTRL_OPCODE_MASK;
1091 
1092 	if (qp->type == IB_QPT_XRC_INI)
1093 		*wqe += sizeof(struct mlx5_wqe_xrc_seg);
1094 
1095 	if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
1096 		av = *wqe;
1097 		if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1098 			*wqe += sizeof(struct mlx5_av);
1099 		else
1100 			*wqe += sizeof(struct mlx5_base_av);
1101 	}
1102 
1103 	switch (opcode) {
1104 	case MLX5_OPCODE_RDMA_WRITE:
1105 	case MLX5_OPCODE_RDMA_WRITE_IMM:
1106 	case MLX5_OPCODE_RDMA_READ:
1107 		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
1108 		break;
1109 	case MLX5_OPCODE_ATOMIC_CS:
1110 	case MLX5_OPCODE_ATOMIC_FA:
1111 		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
1112 		*wqe += sizeof(struct mlx5_wqe_atomic_seg);
1113 		break;
1114 	}
1115 
1116 	return 0;
1117 }
1118 
1119 /*
1120  * Parse responder WQE and set wqe_end to the end of the WQE.
1121  */
mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev * dev,struct mlx5_ib_srq * srq,void ** wqe,void ** wqe_end,int wqe_length)1122 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1123 						   struct mlx5_ib_srq *srq,
1124 						   void **wqe, void **wqe_end,
1125 						   int wqe_length)
1126 {
1127 	int wqe_size = 1 << srq->msrq.wqe_shift;
1128 
1129 	if (wqe_size > wqe_length) {
1130 		mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1131 		return -EFAULT;
1132 	}
1133 
1134 	*wqe_end = *wqe + wqe_size;
1135 	*wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1136 
1137 	return 0;
1138 }
1139 
mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev * dev,struct mlx5_ib_qp * qp,void * wqe,void ** wqe_end,int wqe_length)1140 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1141 						  struct mlx5_ib_qp *qp,
1142 						  void *wqe, void **wqe_end,
1143 						  int wqe_length)
1144 {
1145 	struct mlx5_ib_wq *wq = &qp->rq;
1146 	int wqe_size = 1 << wq->wqe_shift;
1147 
1148 	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1149 		mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1150 		return -EFAULT;
1151 	}
1152 
1153 	if (wqe_size > wqe_length) {
1154 		mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1155 		return -EFAULT;
1156 	}
1157 
1158 	*wqe_end = wqe + wqe_size;
1159 
1160 	return 0;
1161 }
1162 
odp_get_rsc(struct mlx5_ib_dev * dev,u32 wq_num,int pf_type)1163 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1164 						       u32 wq_num, int pf_type)
1165 {
1166 	struct mlx5_core_rsc_common *common = NULL;
1167 	struct mlx5_core_srq *srq;
1168 
1169 	switch (pf_type) {
1170 	case MLX5_WQE_PF_TYPE_RMP:
1171 		srq = mlx5_cmd_get_srq(dev, wq_num);
1172 		if (srq)
1173 			common = &srq->common;
1174 		break;
1175 	case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1176 	case MLX5_WQE_PF_TYPE_RESP:
1177 	case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1178 		common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
1179 		break;
1180 	default:
1181 		break;
1182 	}
1183 
1184 	return common;
1185 }
1186 
res_to_qp(struct mlx5_core_rsc_common * res)1187 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1188 {
1189 	struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1190 
1191 	return to_mibqp(mqp);
1192 }
1193 
res_to_srq(struct mlx5_core_rsc_common * res)1194 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1195 {
1196 	struct mlx5_core_srq *msrq =
1197 		container_of(res, struct mlx5_core_srq, common);
1198 
1199 	return to_mibsrq(msrq);
1200 }
1201 
mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault)1202 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1203 					  struct mlx5_pagefault *pfault)
1204 {
1205 	bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1206 	u16 wqe_index = pfault->wqe.wqe_index;
1207 	void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1208 	u32 bytes_mapped, total_wqe_bytes;
1209 	struct mlx5_core_rsc_common *res;
1210 	int resume_with_error = 1;
1211 	struct mlx5_ib_qp *qp;
1212 	size_t bytes_copied;
1213 	int ret = 0;
1214 
1215 	res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1216 	if (!res) {
1217 		mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1218 		return;
1219 	}
1220 
1221 	if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1222 	    res->res != MLX5_RES_XSRQ) {
1223 		mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1224 			    pfault->type);
1225 		goto resolve_page_fault;
1226 	}
1227 
1228 	wqe_start = (void *)__get_free_page(GFP_KERNEL);
1229 	if (!wqe_start) {
1230 		mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1231 		goto resolve_page_fault;
1232 	}
1233 
1234 	wqe = wqe_start;
1235 	qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1236 	if (qp && sq) {
1237 		ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1238 					  &bytes_copied);
1239 		if (ret)
1240 			goto read_user;
1241 		ret = mlx5_ib_mr_initiator_pfault_handler(
1242 			dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1243 	} else if (qp && !sq) {
1244 		ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1245 					  &bytes_copied);
1246 		if (ret)
1247 			goto read_user;
1248 		ret = mlx5_ib_mr_responder_pfault_handler_rq(
1249 			dev, qp, wqe, &wqe_end, bytes_copied);
1250 	} else if (!qp) {
1251 		struct mlx5_ib_srq *srq = res_to_srq(res);
1252 
1253 		ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1254 					   &bytes_copied);
1255 		if (ret)
1256 			goto read_user;
1257 		ret = mlx5_ib_mr_responder_pfault_handler_srq(
1258 			dev, srq, &wqe, &wqe_end, bytes_copied);
1259 	}
1260 
1261 	if (ret < 0 || wqe >= wqe_end)
1262 		goto resolve_page_fault;
1263 
1264 	ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1265 				      &total_wqe_bytes, !sq);
1266 	if (ret == -EAGAIN)
1267 		goto out;
1268 
1269 	if (ret < 0 || total_wqe_bytes > bytes_mapped)
1270 		goto resolve_page_fault;
1271 
1272 out:
1273 	ret = 0;
1274 	resume_with_error = 0;
1275 
1276 read_user:
1277 	if (ret)
1278 		mlx5_ib_err(
1279 			dev,
1280 			"Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1281 			ret, wqe_index, pfault->token);
1282 
1283 resolve_page_fault:
1284 	mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1285 	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1286 		    pfault->wqe.wq_num, resume_with_error,
1287 		    pfault->type);
1288 	mlx5_core_res_put(res);
1289 	free_page((unsigned long)wqe_start);
1290 }
1291 
mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault)1292 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1293 					   struct mlx5_pagefault *pfault)
1294 {
1295 	u64 address;
1296 	u32 length;
1297 	u32 prefetch_len = pfault->bytes_committed;
1298 	int prefetch_activated = 0;
1299 	u32 rkey = pfault->rdma.r_key;
1300 	int ret;
1301 
1302 	/* The RDMA responder handler handles the page fault in two parts.
1303 	 * First it brings the necessary pages for the current packet
1304 	 * (and uses the pfault context), and then (after resuming the QP)
1305 	 * prefetches more pages. The second operation cannot use the pfault
1306 	 * context and therefore uses the dummy_pfault context allocated on
1307 	 * the stack */
1308 	pfault->rdma.rdma_va += pfault->bytes_committed;
1309 	pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1310 					 pfault->rdma.rdma_op_len);
1311 	pfault->bytes_committed = 0;
1312 
1313 	address = pfault->rdma.rdma_va;
1314 	length  = pfault->rdma.rdma_op_len;
1315 
1316 	/* For some operations, the hardware cannot tell the exact message
1317 	 * length, and in those cases it reports zero. Use prefetch
1318 	 * logic. */
1319 	if (length == 0) {
1320 		prefetch_activated = 1;
1321 		length = pfault->rdma.packet_size;
1322 		prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1323 	}
1324 
1325 	ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1326 					    &pfault->bytes_committed, NULL);
1327 	if (ret == -EAGAIN) {
1328 		/* We're racing with an invalidation, don't prefetch */
1329 		prefetch_activated = 0;
1330 	} else if (ret < 0) {
1331 		mlx5_ib_page_fault_resume(dev, pfault, 1);
1332 		if (ret != -ENOENT)
1333 			mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1334 				    ret, pfault->token, pfault->type);
1335 		return;
1336 	}
1337 
1338 	mlx5_ib_page_fault_resume(dev, pfault, 0);
1339 	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1340 		    pfault->token, pfault->type,
1341 		    prefetch_activated);
1342 
1343 	/* At this point, there might be a new pagefault already arriving in
1344 	 * the eq, switch to the dummy pagefault for the rest of the
1345 	 * processing. We're still OK with the objects being alive as the
1346 	 * work-queue is being fenced. */
1347 
1348 	if (prefetch_activated) {
1349 		u32 bytes_committed = 0;
1350 
1351 		ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1352 						    prefetch_len,
1353 						    &bytes_committed, NULL);
1354 		if (ret < 0 && ret != -EAGAIN) {
1355 			mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1356 				    ret, pfault->token, address, prefetch_len);
1357 		}
1358 	}
1359 }
1360 
mlx5_ib_pfault(struct mlx5_ib_dev * dev,struct mlx5_pagefault * pfault)1361 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1362 {
1363 	u8 event_subtype = pfault->event_subtype;
1364 
1365 	switch (event_subtype) {
1366 	case MLX5_PFAULT_SUBTYPE_WQE:
1367 		mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1368 		break;
1369 	case MLX5_PFAULT_SUBTYPE_RDMA:
1370 		mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1371 		break;
1372 	default:
1373 		mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1374 			    event_subtype);
1375 		mlx5_ib_page_fault_resume(dev, pfault, 1);
1376 	}
1377 }
1378 
mlx5_ib_eqe_pf_action(struct work_struct * work)1379 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1380 {
1381 	struct mlx5_pagefault *pfault = container_of(work,
1382 						     struct mlx5_pagefault,
1383 						     work);
1384 	struct mlx5_ib_pf_eq *eq = pfault->eq;
1385 
1386 	mlx5_ib_pfault(eq->dev, pfault);
1387 	mempool_free(pfault, eq->pool);
1388 }
1389 
mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq * eq)1390 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1391 {
1392 	struct mlx5_eqe_page_fault *pf_eqe;
1393 	struct mlx5_pagefault *pfault;
1394 	struct mlx5_eqe *eqe;
1395 	int cc = 0;
1396 
1397 	while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1398 		pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1399 		if (!pfault) {
1400 			schedule_work(&eq->work);
1401 			break;
1402 		}
1403 
1404 		pf_eqe = &eqe->data.page_fault;
1405 		pfault->event_subtype = eqe->sub_type;
1406 		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1407 
1408 		mlx5_ib_dbg(eq->dev,
1409 			    "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1410 			    eqe->sub_type, pfault->bytes_committed);
1411 
1412 		switch (eqe->sub_type) {
1413 		case MLX5_PFAULT_SUBTYPE_RDMA:
1414 			/* RDMA based event */
1415 			pfault->type =
1416 				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1417 			pfault->token =
1418 				be32_to_cpu(pf_eqe->rdma.pftype_token) &
1419 				MLX5_24BIT_MASK;
1420 			pfault->rdma.r_key =
1421 				be32_to_cpu(pf_eqe->rdma.r_key);
1422 			pfault->rdma.packet_size =
1423 				be16_to_cpu(pf_eqe->rdma.packet_length);
1424 			pfault->rdma.rdma_op_len =
1425 				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1426 			pfault->rdma.rdma_va =
1427 				be64_to_cpu(pf_eqe->rdma.rdma_va);
1428 			mlx5_ib_dbg(eq->dev,
1429 				    "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1430 				    pfault->type, pfault->token,
1431 				    pfault->rdma.r_key);
1432 			mlx5_ib_dbg(eq->dev,
1433 				    "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1434 				    pfault->rdma.rdma_op_len,
1435 				    pfault->rdma.rdma_va);
1436 			break;
1437 
1438 		case MLX5_PFAULT_SUBTYPE_WQE:
1439 			/* WQE based event */
1440 			pfault->type =
1441 				(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1442 			pfault->token =
1443 				be32_to_cpu(pf_eqe->wqe.token);
1444 			pfault->wqe.wq_num =
1445 				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1446 				MLX5_24BIT_MASK;
1447 			pfault->wqe.wqe_index =
1448 				be16_to_cpu(pf_eqe->wqe.wqe_index);
1449 			pfault->wqe.packet_size =
1450 				be16_to_cpu(pf_eqe->wqe.packet_length);
1451 			mlx5_ib_dbg(eq->dev,
1452 				    "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1453 				    pfault->type, pfault->token,
1454 				    pfault->wqe.wq_num,
1455 				    pfault->wqe.wqe_index);
1456 			break;
1457 
1458 		default:
1459 			mlx5_ib_warn(eq->dev,
1460 				     "Unsupported page fault event sub-type: 0x%02hhx\n",
1461 				     eqe->sub_type);
1462 			/* Unsupported page faults should still be
1463 			 * resolved by the page fault handler
1464 			 */
1465 		}
1466 
1467 		pfault->eq = eq;
1468 		INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1469 		queue_work(eq->wq, &pfault->work);
1470 
1471 		cc = mlx5_eq_update_cc(eq->core, ++cc);
1472 	}
1473 
1474 	mlx5_eq_update_ci(eq->core, cc, 1);
1475 }
1476 
mlx5_ib_eq_pf_int(struct notifier_block * nb,unsigned long type,void * data)1477 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1478 			     void *data)
1479 {
1480 	struct mlx5_ib_pf_eq *eq =
1481 		container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1482 	unsigned long flags;
1483 
1484 	if (spin_trylock_irqsave(&eq->lock, flags)) {
1485 		mlx5_ib_eq_pf_process(eq);
1486 		spin_unlock_irqrestore(&eq->lock, flags);
1487 	} else {
1488 		schedule_work(&eq->work);
1489 	}
1490 
1491 	return IRQ_HANDLED;
1492 }
1493 
1494 /* mempool_refill() was proposed but unfortunately wasn't accepted
1495  * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1496  * Cheap workaround.
1497  */
mempool_refill(mempool_t * pool)1498 static void mempool_refill(mempool_t *pool)
1499 {
1500 	while (pool->curr_nr < pool->min_nr)
1501 		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1502 }
1503 
mlx5_ib_eq_pf_action(struct work_struct * work)1504 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1505 {
1506 	struct mlx5_ib_pf_eq *eq =
1507 		container_of(work, struct mlx5_ib_pf_eq, work);
1508 
1509 	mempool_refill(eq->pool);
1510 
1511 	spin_lock_irq(&eq->lock);
1512 	mlx5_ib_eq_pf_process(eq);
1513 	spin_unlock_irq(&eq->lock);
1514 }
1515 
1516 enum {
1517 	MLX5_IB_NUM_PF_EQE	= 0x1000,
1518 	MLX5_IB_NUM_PF_DRAIN	= 64,
1519 };
1520 
mlx5r_odp_create_eq(struct mlx5_ib_dev * dev,struct mlx5_ib_pf_eq * eq)1521 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1522 {
1523 	struct mlx5_eq_param param = {};
1524 	int err = 0;
1525 
1526 	mutex_lock(&dev->odp_eq_mutex);
1527 	if (eq->core)
1528 		goto unlock;
1529 	INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1530 	spin_lock_init(&eq->lock);
1531 	eq->dev = dev;
1532 
1533 	eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1534 					       sizeof(struct mlx5_pagefault));
1535 	if (!eq->pool) {
1536 		err = -ENOMEM;
1537 		goto unlock;
1538 	}
1539 
1540 	eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1541 				 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1542 				 MLX5_NUM_CMD_EQE);
1543 	if (!eq->wq) {
1544 		err = -ENOMEM;
1545 		goto err_mempool;
1546 	}
1547 
1548 	eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1549 	param = (struct mlx5_eq_param) {
1550 		.nent = MLX5_IB_NUM_PF_EQE,
1551 	};
1552 	param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1553 	eq->core = mlx5_eq_create_generic(dev->mdev, &param);
1554 	if (IS_ERR(eq->core)) {
1555 		err = PTR_ERR(eq->core);
1556 		goto err_wq;
1557 	}
1558 	err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1559 	if (err) {
1560 		mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1561 		goto err_eq;
1562 	}
1563 
1564 	mutex_unlock(&dev->odp_eq_mutex);
1565 	return 0;
1566 err_eq:
1567 	mlx5_eq_destroy_generic(dev->mdev, eq->core);
1568 err_wq:
1569 	eq->core = NULL;
1570 	destroy_workqueue(eq->wq);
1571 err_mempool:
1572 	mempool_destroy(eq->pool);
1573 unlock:
1574 	mutex_unlock(&dev->odp_eq_mutex);
1575 	return err;
1576 }
1577 
1578 static int
mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev * dev,struct mlx5_ib_pf_eq * eq)1579 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1580 {
1581 	int err;
1582 
1583 	if (!eq->core)
1584 		return 0;
1585 	mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1586 	err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1587 	cancel_work_sync(&eq->work);
1588 	destroy_workqueue(eq->wq);
1589 	mempool_destroy(eq->pool);
1590 
1591 	return err;
1592 }
1593 
mlx5_odp_init_mkey_cache(struct mlx5_ib_dev * dev)1594 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1595 {
1596 	struct mlx5r_cache_rb_key rb_key = {
1597 		.access_mode = MLX5_MKC_ACCESS_MODE_KSM,
1598 		.ndescs = mlx5_imr_ksm_entries,
1599 	};
1600 	struct mlx5_cache_ent *ent;
1601 
1602 	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1603 		return 0;
1604 
1605 	ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
1606 	if (IS_ERR(ent))
1607 		return PTR_ERR(ent);
1608 
1609 	return 0;
1610 }
1611 
1612 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1613 	.advise_mr = mlx5_ib_advise_mr,
1614 };
1615 
mlx5_ib_odp_init_one(struct mlx5_ib_dev * dev)1616 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1617 {
1618 	internal_fill_odp_caps(dev);
1619 
1620 	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1621 		return 0;
1622 
1623 	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1624 
1625 	mutex_init(&dev->odp_eq_mutex);
1626 	return 0;
1627 }
1628 
mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev * dev)1629 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1630 {
1631 	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1632 		return;
1633 
1634 	mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
1635 }
1636 
mlx5_ib_odp_init(void)1637 int mlx5_ib_odp_init(void)
1638 {
1639 	mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1640 				       MLX5_IMR_MTT_BITS);
1641 
1642 	return 0;
1643 }
1644 
1645 struct prefetch_mr_work {
1646 	struct work_struct work;
1647 	u32 pf_flags;
1648 	u32 num_sge;
1649 	struct {
1650 		u64 io_virt;
1651 		struct mlx5_ib_mr *mr;
1652 		size_t length;
1653 	} frags[];
1654 };
1655 
destroy_prefetch_work(struct prefetch_mr_work * work)1656 static void destroy_prefetch_work(struct prefetch_mr_work *work)
1657 {
1658 	u32 i;
1659 
1660 	for (i = 0; i < work->num_sge; ++i)
1661 		mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
1662 
1663 	kvfree(work);
1664 }
1665 
1666 static struct mlx5_ib_mr *
get_prefetchable_mr(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 lkey)1667 get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
1668 		    u32 lkey)
1669 {
1670 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
1671 	struct mlx5_ib_mr *mr = NULL;
1672 	struct mlx5_ib_mkey *mmkey;
1673 
1674 	xa_lock(&dev->odp_mkeys);
1675 	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
1676 	if (!mmkey || mmkey->key != lkey) {
1677 		mr = ERR_PTR(-ENOENT);
1678 		goto end;
1679 	}
1680 	if (mmkey->type != MLX5_MKEY_MR) {
1681 		mr = ERR_PTR(-EINVAL);
1682 		goto end;
1683 	}
1684 
1685 	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1686 
1687 	if (mr->ibmr.pd != pd) {
1688 		mr = ERR_PTR(-EPERM);
1689 		goto end;
1690 	}
1691 
1692 	/* prefetch with write-access must be supported by the MR */
1693 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1694 	    !mr->umem->writable) {
1695 		mr = ERR_PTR(-EPERM);
1696 		goto end;
1697 	}
1698 
1699 	refcount_inc(&mmkey->usecount);
1700 end:
1701 	xa_unlock(&dev->odp_mkeys);
1702 	return mr;
1703 }
1704 
mlx5_ib_prefetch_mr_work(struct work_struct * w)1705 static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
1706 {
1707 	struct prefetch_mr_work *work =
1708 		container_of(w, struct prefetch_mr_work, work);
1709 	u32 bytes_mapped = 0;
1710 	int ret;
1711 	u32 i;
1712 
1713 	/* We rely on IB/core that work is executed if we have num_sge != 0 only. */
1714 	WARN_ON(!work->num_sge);
1715 	for (i = 0; i < work->num_sge; ++i) {
1716 		ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
1717 				   work->frags[i].length, &bytes_mapped,
1718 				   work->pf_flags, false);
1719 		if (ret <= 0)
1720 			continue;
1721 		mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
1722 	}
1723 
1724 	destroy_prefetch_work(work);
1725 }
1726 
init_prefetch_work(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 pf_flags,struct prefetch_mr_work * work,struct ib_sge * sg_list,u32 num_sge)1727 static int init_prefetch_work(struct ib_pd *pd,
1728 			       enum ib_uverbs_advise_mr_advice advice,
1729 			       u32 pf_flags, struct prefetch_mr_work *work,
1730 			       struct ib_sge *sg_list, u32 num_sge)
1731 {
1732 	u32 i;
1733 
1734 	INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1735 	work->pf_flags = pf_flags;
1736 
1737 	for (i = 0; i < num_sge; ++i) {
1738 		struct mlx5_ib_mr *mr;
1739 
1740 		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1741 		if (IS_ERR(mr)) {
1742 			work->num_sge = i;
1743 			return PTR_ERR(mr);
1744 		}
1745 		work->frags[i].io_virt = sg_list[i].addr;
1746 		work->frags[i].length = sg_list[i].length;
1747 		work->frags[i].mr = mr;
1748 	}
1749 	work->num_sge = num_sge;
1750 	return 0;
1751 }
1752 
mlx5_ib_prefetch_sg_list(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 pf_flags,struct ib_sge * sg_list,u32 num_sge)1753 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
1754 				    enum ib_uverbs_advise_mr_advice advice,
1755 				    u32 pf_flags, struct ib_sge *sg_list,
1756 				    u32 num_sge)
1757 {
1758 	u32 bytes_mapped = 0;
1759 	int ret = 0;
1760 	u32 i;
1761 
1762 	for (i = 0; i < num_sge; ++i) {
1763 		struct mlx5_ib_mr *mr;
1764 
1765 		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1766 		if (IS_ERR(mr))
1767 			return PTR_ERR(mr);
1768 		ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
1769 				   &bytes_mapped, pf_flags, false);
1770 		if (ret < 0) {
1771 			mlx5r_deref_odp_mkey(&mr->mmkey);
1772 			return ret;
1773 		}
1774 		mlx5_update_odp_stats(mr, prefetch, ret);
1775 		mlx5r_deref_odp_mkey(&mr->mmkey);
1776 	}
1777 
1778 	return 0;
1779 }
1780 
mlx5_ib_advise_mr_prefetch(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)1781 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1782 			       enum ib_uverbs_advise_mr_advice advice,
1783 			       u32 flags, struct ib_sge *sg_list, u32 num_sge)
1784 {
1785 	u32 pf_flags = 0;
1786 	struct prefetch_mr_work *work;
1787 	int rc;
1788 
1789 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1790 		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1791 
1792 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1793 		pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
1794 
1795 	if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1796 		return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
1797 						num_sge);
1798 
1799 	work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
1800 	if (!work)
1801 		return -ENOMEM;
1802 
1803 	rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
1804 	if (rc) {
1805 		destroy_prefetch_work(work);
1806 		return rc;
1807 	}
1808 	queue_work(system_unbound_wq, &work->work);
1809 	return 0;
1810 }
1811