xref: /openbmc/linux/drivers/infiniband/sw/rdmavt/mr.c (revision 3e26a691)
1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
52 #include "vt.h"
53 #include "mr.h"
54 
55 /**
56  * rvt_driver_mr_init - Init MR resources per driver
57  * @rdi: rvt dev struct
58  *
59  * Do any intilization needed when a driver registers with rdmavt.
60  *
61  * Return: 0 on success or errno on failure
62  */
63 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
64 {
65 	unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
66 	unsigned lk_tab_size;
67 	int i;
68 
69 	/*
70 	 * The top hfi1_lkey_table_size bits are used to index the
71 	 * table.  The lower 8 bits can be owned by the user (copied from
72 	 * the LKEY).  The remaining bits act as a generation number or tag.
73 	 */
74 	if (!lkey_table_size)
75 		return -EINVAL;
76 
77 	spin_lock_init(&rdi->lkey_table.lock);
78 
79 	/* ensure generation is at least 4 bits */
80 	if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
81 		rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
82 			    lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
83 		rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
84 		lkey_table_size = rdi->dparms.lkey_table_size;
85 	}
86 	rdi->lkey_table.max = 1 << lkey_table_size;
87 	lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
88 	rdi->lkey_table.table = (struct rvt_mregion __rcu **)
89 			       vmalloc_node(lk_tab_size, rdi->dparms.node);
90 	if (!rdi->lkey_table.table)
91 		return -ENOMEM;
92 
93 	RCU_INIT_POINTER(rdi->dma_mr, NULL);
94 	for (i = 0; i < rdi->lkey_table.max; i++)
95 		RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
96 
97 	return 0;
98 }
99 
100 /**
101  *rvt_mr_exit: clean up MR
102  *@rdi: rvt dev structure
103  *
104  * called when drivers have unregistered or perhaps failed to register with us
105  */
106 void rvt_mr_exit(struct rvt_dev_info *rdi)
107 {
108 	if (rdi->dma_mr)
109 		rvt_pr_err(rdi, "DMA MR not null!\n");
110 
111 	vfree(rdi->lkey_table.table);
112 }
113 
114 static void rvt_deinit_mregion(struct rvt_mregion *mr)
115 {
116 	int i = mr->mapsz;
117 
118 	mr->mapsz = 0;
119 	while (i)
120 		kfree(mr->map[--i]);
121 }
122 
123 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
124 			    int count)
125 {
126 	int m, i = 0;
127 
128 	mr->mapsz = 0;
129 	m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
130 	for (; i < m; i++) {
131 		mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
132 		if (!mr->map[i]) {
133 			rvt_deinit_mregion(mr);
134 			return -ENOMEM;
135 		}
136 		mr->mapsz++;
137 	}
138 	init_completion(&mr->comp);
139 	/* count returning the ptr to user */
140 	atomic_set(&mr->refcount, 1);
141 	mr->pd = pd;
142 	mr->max_segs = count;
143 	return 0;
144 }
145 
146 /**
147  * rvt_alloc_lkey - allocate an lkey
148  * @mr: memory region that this lkey protects
149  * @dma_region: 0->normal key, 1->restricted DMA key
150  *
151  * Returns 0 if successful, otherwise returns -errno.
152  *
153  * Increments mr reference count as required.
154  *
155  * Sets the lkey field mr for non-dma regions.
156  *
157  */
158 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
159 {
160 	unsigned long flags;
161 	u32 r;
162 	u32 n;
163 	int ret = 0;
164 	struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
165 	struct rvt_lkey_table *rkt = &dev->lkey_table;
166 
167 	rvt_get_mr(mr);
168 	spin_lock_irqsave(&rkt->lock, flags);
169 
170 	/* special case for dma_mr lkey == 0 */
171 	if (dma_region) {
172 		struct rvt_mregion *tmr;
173 
174 		tmr = rcu_access_pointer(dev->dma_mr);
175 		if (!tmr) {
176 			rcu_assign_pointer(dev->dma_mr, mr);
177 			mr->lkey_published = 1;
178 		} else {
179 			rvt_put_mr(mr);
180 		}
181 		goto success;
182 	}
183 
184 	/* Find the next available LKEY */
185 	r = rkt->next;
186 	n = r;
187 	for (;;) {
188 		if (!rcu_access_pointer(rkt->table[r]))
189 			break;
190 		r = (r + 1) & (rkt->max - 1);
191 		if (r == n)
192 			goto bail;
193 	}
194 	rkt->next = (r + 1) & (rkt->max - 1);
195 	/*
196 	 * Make sure lkey is never zero which is reserved to indicate an
197 	 * unrestricted LKEY.
198 	 */
199 	rkt->gen++;
200 	/*
201 	 * bits are capped to ensure enough bits for generation number
202 	 */
203 	mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
204 		((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
205 		 << 8);
206 	if (mr->lkey == 0) {
207 		mr->lkey |= 1 << 8;
208 		rkt->gen++;
209 	}
210 	rcu_assign_pointer(rkt->table[r], mr);
211 	mr->lkey_published = 1;
212 success:
213 	spin_unlock_irqrestore(&rkt->lock, flags);
214 out:
215 	return ret;
216 bail:
217 	rvt_put_mr(mr);
218 	spin_unlock_irqrestore(&rkt->lock, flags);
219 	ret = -ENOMEM;
220 	goto out;
221 }
222 
223 /**
224  * rvt_free_lkey - free an lkey
225  * @mr: mr to free from tables
226  */
227 static void rvt_free_lkey(struct rvt_mregion *mr)
228 {
229 	unsigned long flags;
230 	u32 lkey = mr->lkey;
231 	u32 r;
232 	struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
233 	struct rvt_lkey_table *rkt = &dev->lkey_table;
234 	int freed = 0;
235 
236 	spin_lock_irqsave(&rkt->lock, flags);
237 	if (!mr->lkey_published)
238 		goto out;
239 	if (lkey == 0) {
240 		RCU_INIT_POINTER(dev->dma_mr, NULL);
241 	} else {
242 		r = lkey >> (32 - dev->dparms.lkey_table_size);
243 		RCU_INIT_POINTER(rkt->table[r], NULL);
244 	}
245 	mr->lkey_published = 0;
246 	freed++;
247 out:
248 	spin_unlock_irqrestore(&rkt->lock, flags);
249 	if (freed) {
250 		synchronize_rcu();
251 		rvt_put_mr(mr);
252 	}
253 }
254 
255 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
256 {
257 	struct rvt_mr *mr;
258 	int rval = -ENOMEM;
259 	int m;
260 
261 	/* Allocate struct plus pointers to first level page tables. */
262 	m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
263 	mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
264 	if (!mr)
265 		goto bail;
266 
267 	rval = rvt_init_mregion(&mr->mr, pd, count);
268 	if (rval)
269 		goto bail;
270 	/*
271 	 * ib_reg_phys_mr() will initialize mr->ibmr except for
272 	 * lkey and rkey.
273 	 */
274 	rval = rvt_alloc_lkey(&mr->mr, 0);
275 	if (rval)
276 		goto bail_mregion;
277 	mr->ibmr.lkey = mr->mr.lkey;
278 	mr->ibmr.rkey = mr->mr.lkey;
279 done:
280 	return mr;
281 
282 bail_mregion:
283 	rvt_deinit_mregion(&mr->mr);
284 bail:
285 	kfree(mr);
286 	mr = ERR_PTR(rval);
287 	goto done;
288 }
289 
290 static void __rvt_free_mr(struct rvt_mr *mr)
291 {
292 	rvt_deinit_mregion(&mr->mr);
293 	rvt_free_lkey(&mr->mr);
294 	vfree(mr);
295 }
296 
297 /**
298  * rvt_get_dma_mr - get a DMA memory region
299  * @pd: protection domain for this memory region
300  * @acc: access flags
301  *
302  * Return: the memory region on success, otherwise returns an errno.
303  * Note that all DMA addresses should be created via the
304  * struct ib_dma_mapping_ops functions (see dma.c).
305  */
306 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
307 {
308 	struct rvt_mr *mr;
309 	struct ib_mr *ret;
310 	int rval;
311 
312 	if (ibpd_to_rvtpd(pd)->user)
313 		return ERR_PTR(-EPERM);
314 
315 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
316 	if (!mr) {
317 		ret = ERR_PTR(-ENOMEM);
318 		goto bail;
319 	}
320 
321 	rval = rvt_init_mregion(&mr->mr, pd, 0);
322 	if (rval) {
323 		ret = ERR_PTR(rval);
324 		goto bail;
325 	}
326 
327 	rval = rvt_alloc_lkey(&mr->mr, 1);
328 	if (rval) {
329 		ret = ERR_PTR(rval);
330 		goto bail_mregion;
331 	}
332 
333 	mr->mr.access_flags = acc;
334 	ret = &mr->ibmr;
335 done:
336 	return ret;
337 
338 bail_mregion:
339 	rvt_deinit_mregion(&mr->mr);
340 bail:
341 	kfree(mr);
342 	goto done;
343 }
344 
345 /**
346  * rvt_reg_user_mr - register a userspace memory region
347  * @pd: protection domain for this memory region
348  * @start: starting userspace address
349  * @length: length of region to register
350  * @mr_access_flags: access flags for this memory region
351  * @udata: unused by the driver
352  *
353  * Return: the memory region on success, otherwise returns an errno.
354  */
355 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
356 			      u64 virt_addr, int mr_access_flags,
357 			      struct ib_udata *udata)
358 {
359 	struct rvt_mr *mr;
360 	struct ib_umem *umem;
361 	struct scatterlist *sg;
362 	int n, m, entry;
363 	struct ib_mr *ret;
364 
365 	if (length == 0)
366 		return ERR_PTR(-EINVAL);
367 
368 	umem = ib_umem_get(pd->uobject->context, start, length,
369 			   mr_access_flags, 0);
370 	if (IS_ERR(umem))
371 		return (void *)umem;
372 
373 	n = umem->nmap;
374 
375 	mr = __rvt_alloc_mr(n, pd);
376 	if (IS_ERR(mr)) {
377 		ret = (struct ib_mr *)mr;
378 		goto bail_umem;
379 	}
380 
381 	mr->mr.user_base = start;
382 	mr->mr.iova = virt_addr;
383 	mr->mr.length = length;
384 	mr->mr.offset = ib_umem_offset(umem);
385 	mr->mr.access_flags = mr_access_flags;
386 	mr->umem = umem;
387 
388 	if (is_power_of_2(umem->page_size))
389 		mr->mr.page_shift = ilog2(umem->page_size);
390 	m = 0;
391 	n = 0;
392 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
393 		void *vaddr;
394 
395 		vaddr = page_address(sg_page(sg));
396 		if (!vaddr) {
397 			ret = ERR_PTR(-EINVAL);
398 			goto bail_inval;
399 		}
400 		mr->mr.map[m]->segs[n].vaddr = vaddr;
401 		mr->mr.map[m]->segs[n].length = umem->page_size;
402 		n++;
403 		if (n == RVT_SEGSZ) {
404 			m++;
405 			n = 0;
406 		}
407 	}
408 	return &mr->ibmr;
409 
410 bail_inval:
411 	__rvt_free_mr(mr);
412 
413 bail_umem:
414 	ib_umem_release(umem);
415 
416 	return ret;
417 }
418 
419 /**
420  * rvt_dereg_mr - unregister and free a memory region
421  * @ibmr: the memory region to free
422  *
423  *
424  * Note that this is called to free MRs created by rvt_get_dma_mr()
425  * or rvt_reg_user_mr().
426  *
427  * Returns 0 on success.
428  */
429 int rvt_dereg_mr(struct ib_mr *ibmr)
430 {
431 	struct rvt_mr *mr = to_imr(ibmr);
432 	struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device);
433 	int ret = 0;
434 	unsigned long timeout;
435 
436 	rvt_free_lkey(&mr->mr);
437 
438 	rvt_put_mr(&mr->mr); /* will set completion if last */
439 	timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
440 	if (!timeout) {
441 		rvt_pr_err(rdi,
442 			   "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
443 			   mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
444 		rvt_get_mr(&mr->mr);
445 		ret = -EBUSY;
446 		goto out;
447 	}
448 	rvt_deinit_mregion(&mr->mr);
449 	if (mr->umem)
450 		ib_umem_release(mr->umem);
451 	kfree(mr);
452 out:
453 	return ret;
454 }
455 
456 /**
457  * rvt_alloc_mr - Allocate a memory region usable with the
458  * @pd: protection domain for this memory region
459  * @mr_type: mem region type
460  * @max_num_sg: Max number of segments allowed
461  *
462  * Return: the memory region on success, otherwise return an errno.
463  */
464 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
465 			   enum ib_mr_type mr_type,
466 			   u32 max_num_sg)
467 {
468 	struct rvt_mr *mr;
469 
470 	if (mr_type != IB_MR_TYPE_MEM_REG)
471 		return ERR_PTR(-EINVAL);
472 
473 	mr = __rvt_alloc_mr(max_num_sg, pd);
474 	if (IS_ERR(mr))
475 		return (struct ib_mr *)mr;
476 
477 	return &mr->ibmr;
478 }
479 
480 /**
481  * rvt_alloc_fmr - allocate a fast memory region
482  * @pd: the protection domain for this memory region
483  * @mr_access_flags: access flags for this memory region
484  * @fmr_attr: fast memory region attributes
485  *
486  * Return: the memory region on success, otherwise returns an errno.
487  */
488 struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
489 			     struct ib_fmr_attr *fmr_attr)
490 {
491 	struct rvt_fmr *fmr;
492 	int m;
493 	struct ib_fmr *ret;
494 	int rval = -ENOMEM;
495 
496 	/* Allocate struct plus pointers to first level page tables. */
497 	m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
498 	fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
499 	if (!fmr)
500 		goto bail;
501 
502 	rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
503 	if (rval)
504 		goto bail;
505 
506 	/*
507 	 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
508 	 * rkey.
509 	 */
510 	rval = rvt_alloc_lkey(&fmr->mr, 0);
511 	if (rval)
512 		goto bail_mregion;
513 	fmr->ibfmr.rkey = fmr->mr.lkey;
514 	fmr->ibfmr.lkey = fmr->mr.lkey;
515 	/*
516 	 * Resources are allocated but no valid mapping (RKEY can't be
517 	 * used).
518 	 */
519 	fmr->mr.access_flags = mr_access_flags;
520 	fmr->mr.max_segs = fmr_attr->max_pages;
521 	fmr->mr.page_shift = fmr_attr->page_shift;
522 
523 	ret = &fmr->ibfmr;
524 done:
525 	return ret;
526 
527 bail_mregion:
528 	rvt_deinit_mregion(&fmr->mr);
529 bail:
530 	kfree(fmr);
531 	ret = ERR_PTR(rval);
532 	goto done;
533 }
534 
535 /**
536  * rvt_map_phys_fmr - set up a fast memory region
537  * @ibmfr: the fast memory region to set up
538  * @page_list: the list of pages to associate with the fast memory region
539  * @list_len: the number of pages to associate with the fast memory region
540  * @iova: the virtual address of the start of the fast memory region
541  *
542  * This may be called from interrupt context.
543  *
544  * Return: 0 on success
545  */
546 
547 int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
548 		     int list_len, u64 iova)
549 {
550 	struct rvt_fmr *fmr = to_ifmr(ibfmr);
551 	struct rvt_lkey_table *rkt;
552 	unsigned long flags;
553 	int m, n, i;
554 	u32 ps;
555 	struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
556 
557 	i = atomic_read(&fmr->mr.refcount);
558 	if (i > 2)
559 		return -EBUSY;
560 
561 	if (list_len > fmr->mr.max_segs)
562 		return -EINVAL;
563 
564 	rkt = &rdi->lkey_table;
565 	spin_lock_irqsave(&rkt->lock, flags);
566 	fmr->mr.user_base = iova;
567 	fmr->mr.iova = iova;
568 	ps = 1 << fmr->mr.page_shift;
569 	fmr->mr.length = list_len * ps;
570 	m = 0;
571 	n = 0;
572 	for (i = 0; i < list_len; i++) {
573 		fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
574 		fmr->mr.map[m]->segs[n].length = ps;
575 		if (++n == RVT_SEGSZ) {
576 			m++;
577 			n = 0;
578 		}
579 	}
580 	spin_unlock_irqrestore(&rkt->lock, flags);
581 	return 0;
582 }
583 
584 /**
585  * rvt_unmap_fmr - unmap fast memory regions
586  * @fmr_list: the list of fast memory regions to unmap
587  *
588  * Return: 0 on success.
589  */
590 int rvt_unmap_fmr(struct list_head *fmr_list)
591 {
592 	struct rvt_fmr *fmr;
593 	struct rvt_lkey_table *rkt;
594 	unsigned long flags;
595 	struct rvt_dev_info *rdi;
596 
597 	list_for_each_entry(fmr, fmr_list, ibfmr.list) {
598 		rdi = ib_to_rvt(fmr->ibfmr.device);
599 		rkt = &rdi->lkey_table;
600 		spin_lock_irqsave(&rkt->lock, flags);
601 		fmr->mr.user_base = 0;
602 		fmr->mr.iova = 0;
603 		fmr->mr.length = 0;
604 		spin_unlock_irqrestore(&rkt->lock, flags);
605 	}
606 	return 0;
607 }
608 
609 /**
610  * rvt_dealloc_fmr - deallocate a fast memory region
611  * @ibfmr: the fast memory region to deallocate
612  *
613  * Return: 0 on success.
614  */
615 int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
616 {
617 	struct rvt_fmr *fmr = to_ifmr(ibfmr);
618 	int ret = 0;
619 	unsigned long timeout;
620 
621 	rvt_free_lkey(&fmr->mr);
622 	rvt_put_mr(&fmr->mr); /* will set completion if last */
623 	timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ);
624 	if (!timeout) {
625 		rvt_get_mr(&fmr->mr);
626 		ret = -EBUSY;
627 		goto out;
628 	}
629 	rvt_deinit_mregion(&fmr->mr);
630 	kfree(fmr);
631 out:
632 	return ret;
633 }
634 
635 /**
636  * rvt_lkey_ok - check IB SGE for validity and initialize
637  * @rkt: table containing lkey to check SGE against
638  * @pd: protection domain
639  * @isge: outgoing internal SGE
640  * @sge: SGE to check
641  * @acc: access flags
642  *
643  * Check the IB SGE for validity and initialize our internal version
644  * of it.
645  *
646  * Return: 1 if valid and successful, otherwise returns 0.
647  *
648  * increments the reference count upon success
649  *
650  */
651 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
652 		struct rvt_sge *isge, struct ib_sge *sge, int acc)
653 {
654 	struct rvt_mregion *mr;
655 	unsigned n, m;
656 	size_t off;
657 	struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
658 
659 	/*
660 	 * We use LKEY == zero for kernel virtual addresses
661 	 * (see rvt_get_dma_mr and dma.c).
662 	 */
663 	rcu_read_lock();
664 	if (sge->lkey == 0) {
665 		if (pd->user)
666 			goto bail;
667 		mr = rcu_dereference(dev->dma_mr);
668 		if (!mr)
669 			goto bail;
670 		atomic_inc(&mr->refcount);
671 		rcu_read_unlock();
672 
673 		isge->mr = mr;
674 		isge->vaddr = (void *)sge->addr;
675 		isge->length = sge->length;
676 		isge->sge_length = sge->length;
677 		isge->m = 0;
678 		isge->n = 0;
679 		goto ok;
680 	}
681 	mr = rcu_dereference(
682 		rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
683 	if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
684 		goto bail;
685 
686 	off = sge->addr - mr->user_base;
687 	if (unlikely(sge->addr < mr->user_base ||
688 		     off + sge->length > mr->length ||
689 		     (mr->access_flags & acc) != acc))
690 		goto bail;
691 	atomic_inc(&mr->refcount);
692 	rcu_read_unlock();
693 
694 	off += mr->offset;
695 	if (mr->page_shift) {
696 		/*
697 		 * page sizes are uniform power of 2 so no loop is necessary
698 		 * entries_spanned_by_off is the number of times the loop below
699 		 * would have executed.
700 		*/
701 		size_t entries_spanned_by_off;
702 
703 		entries_spanned_by_off = off >> mr->page_shift;
704 		off -= (entries_spanned_by_off << mr->page_shift);
705 		m = entries_spanned_by_off / RVT_SEGSZ;
706 		n = entries_spanned_by_off % RVT_SEGSZ;
707 	} else {
708 		m = 0;
709 		n = 0;
710 		while (off >= mr->map[m]->segs[n].length) {
711 			off -= mr->map[m]->segs[n].length;
712 			n++;
713 			if (n >= RVT_SEGSZ) {
714 				m++;
715 				n = 0;
716 			}
717 		}
718 	}
719 	isge->mr = mr;
720 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
721 	isge->length = mr->map[m]->segs[n].length - off;
722 	isge->sge_length = sge->length;
723 	isge->m = m;
724 	isge->n = n;
725 ok:
726 	return 1;
727 bail:
728 	rcu_read_unlock();
729 	return 0;
730 }
731 EXPORT_SYMBOL(rvt_lkey_ok);
732 
733 /**
734  * rvt_rkey_ok - check the IB virtual address, length, and RKEY
735  * @qp: qp for validation
736  * @sge: SGE state
737  * @len: length of data
738  * @vaddr: virtual address to place data
739  * @rkey: rkey to check
740  * @acc: access flags
741  *
742  * Return: 1 if successful, otherwise 0.
743  *
744  * increments the reference count upon success
745  */
746 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
747 		u32 len, u64 vaddr, u32 rkey, int acc)
748 {
749 	struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
750 	struct rvt_lkey_table *rkt = &dev->lkey_table;
751 	struct rvt_mregion *mr;
752 	unsigned n, m;
753 	size_t off;
754 
755 	/*
756 	 * We use RKEY == zero for kernel virtual addresses
757 	 * (see rvt_get_dma_mr and dma.c).
758 	 */
759 	rcu_read_lock();
760 	if (rkey == 0) {
761 		struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
762 		struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
763 
764 		if (pd->user)
765 			goto bail;
766 		mr = rcu_dereference(rdi->dma_mr);
767 		if (!mr)
768 			goto bail;
769 		atomic_inc(&mr->refcount);
770 		rcu_read_unlock();
771 
772 		sge->mr = mr;
773 		sge->vaddr = (void *)vaddr;
774 		sge->length = len;
775 		sge->sge_length = len;
776 		sge->m = 0;
777 		sge->n = 0;
778 		goto ok;
779 	}
780 
781 	mr = rcu_dereference(
782 		rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
783 	if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
784 		goto bail;
785 
786 	off = vaddr - mr->iova;
787 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
788 		     (mr->access_flags & acc) == 0))
789 		goto bail;
790 	atomic_inc(&mr->refcount);
791 	rcu_read_unlock();
792 
793 	off += mr->offset;
794 	if (mr->page_shift) {
795 		/*
796 		 * page sizes are uniform power of 2 so no loop is necessary
797 		 * entries_spanned_by_off is the number of times the loop below
798 		 * would have executed.
799 		*/
800 		size_t entries_spanned_by_off;
801 
802 		entries_spanned_by_off = off >> mr->page_shift;
803 		off -= (entries_spanned_by_off << mr->page_shift);
804 		m = entries_spanned_by_off / RVT_SEGSZ;
805 		n = entries_spanned_by_off % RVT_SEGSZ;
806 	} else {
807 		m = 0;
808 		n = 0;
809 		while (off >= mr->map[m]->segs[n].length) {
810 			off -= mr->map[m]->segs[n].length;
811 			n++;
812 			if (n >= RVT_SEGSZ) {
813 				m++;
814 				n = 0;
815 			}
816 		}
817 	}
818 	sge->mr = mr;
819 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
820 	sge->length = mr->map[m]->segs[n].length - off;
821 	sge->sge_length = len;
822 	sge->m = m;
823 	sge->n = n;
824 ok:
825 	return 1;
826 bail:
827 	rcu_read_unlock();
828 	return 0;
829 }
830 EXPORT_SYMBOL(rvt_rkey_ok);
831