1*d164bf64SCai Huoqing // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
22a055eb7SDennis Dalessandro /*
3fe314195SDennis Dalessandro * Copyright(c) 2016 Intel Corporation.
42a055eb7SDennis Dalessandro */
52a055eb7SDennis Dalessandro
62a055eb7SDennis Dalessandro #include <linux/slab.h>
77b1e2099SDennis Dalessandro #include <linux/vmalloc.h>
87b1e2099SDennis Dalessandro #include <rdma/ib_umem.h>
97b1e2099SDennis Dalessandro #include <rdma/rdma_vt.h>
107b1e2099SDennis Dalessandro #include "vt.h"
112a055eb7SDennis Dalessandro #include "mr.h"
12fcb29a66SMike Marciniszyn #include "trace.h"
132a055eb7SDennis Dalessandro
1490793f71SDennis Dalessandro /**
1590793f71SDennis Dalessandro * rvt_driver_mr_init - Init MR resources per driver
1690793f71SDennis Dalessandro * @rdi: rvt dev struct
1790793f71SDennis Dalessandro *
187b1e2099SDennis Dalessandro * Do any intilization needed when a driver registers with rdmavt.
1990793f71SDennis Dalessandro *
2090793f71SDennis Dalessandro * Return: 0 on success or errno on failure
217b1e2099SDennis Dalessandro */
rvt_driver_mr_init(struct rvt_dev_info * rdi)227b1e2099SDennis Dalessandro int rvt_driver_mr_init(struct rvt_dev_info *rdi)
237b1e2099SDennis Dalessandro {
247b1e2099SDennis Dalessandro unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
257b1e2099SDennis Dalessandro unsigned lk_tab_size;
267b1e2099SDennis Dalessandro int i;
277b1e2099SDennis Dalessandro
287b1e2099SDennis Dalessandro /*
297b1e2099SDennis Dalessandro * The top hfi1_lkey_table_size bits are used to index the
307b1e2099SDennis Dalessandro * table. The lower 8 bits can be owned by the user (copied from
317b1e2099SDennis Dalessandro * the LKEY). The remaining bits act as a generation number or tag.
327b1e2099SDennis Dalessandro */
337b1e2099SDennis Dalessandro if (!lkey_table_size)
347b1e2099SDennis Dalessandro return -EINVAL;
357b1e2099SDennis Dalessandro
367b1e2099SDennis Dalessandro spin_lock_init(&rdi->lkey_table.lock);
377b1e2099SDennis Dalessandro
387b1e2099SDennis Dalessandro /* ensure generation is at least 4 bits */
397b1e2099SDennis Dalessandro if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
407b1e2099SDennis Dalessandro rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
417b1e2099SDennis Dalessandro lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
427b1e2099SDennis Dalessandro rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
437b1e2099SDennis Dalessandro lkey_table_size = rdi->dparms.lkey_table_size;
447b1e2099SDennis Dalessandro }
45ade30240SJubin John rdi->lkey_table.max = 1 << lkey_table_size;
4699f80d2fSMike Marciniszyn rdi->lkey_table.shift = 32 - lkey_table_size;
477b1e2099SDennis Dalessandro lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
487b1e2099SDennis Dalessandro rdi->lkey_table.table = (struct rvt_mregion __rcu **)
49d1b697b6SMitko Haralanov vmalloc_node(lk_tab_size, rdi->dparms.node);
507b1e2099SDennis Dalessandro if (!rdi->lkey_table.table)
517b1e2099SDennis Dalessandro return -ENOMEM;
527b1e2099SDennis Dalessandro
537b1e2099SDennis Dalessandro RCU_INIT_POINTER(rdi->dma_mr, NULL);
547b1e2099SDennis Dalessandro for (i = 0; i < rdi->lkey_table.max; i++)
557b1e2099SDennis Dalessandro RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
567b1e2099SDennis Dalessandro
5735164f52SMike Marciniszyn rdi->dparms.props.max_mr = rdi->lkey_table.max;
587b1e2099SDennis Dalessandro return 0;
597b1e2099SDennis Dalessandro }
607b1e2099SDennis Dalessandro
6190793f71SDennis Dalessandro /**
62bf194997SLeon Romanovsky * rvt_mr_exit - clean up MR
6390793f71SDennis Dalessandro * @rdi: rvt dev structure
6490793f71SDennis Dalessandro *
657b1e2099SDennis Dalessandro * called when drivers have unregistered or perhaps failed to register with us
667b1e2099SDennis Dalessandro */
rvt_mr_exit(struct rvt_dev_info * rdi)677b1e2099SDennis Dalessandro void rvt_mr_exit(struct rvt_dev_info *rdi)
687b1e2099SDennis Dalessandro {
697b1e2099SDennis Dalessandro if (rdi->dma_mr)
707b1e2099SDennis Dalessandro rvt_pr_err(rdi, "DMA MR not null!\n");
717b1e2099SDennis Dalessandro
727b1e2099SDennis Dalessandro vfree(rdi->lkey_table.table);
737b1e2099SDennis Dalessandro }
747b1e2099SDennis Dalessandro
rvt_deinit_mregion(struct rvt_mregion * mr)757b1e2099SDennis Dalessandro static void rvt_deinit_mregion(struct rvt_mregion *mr)
767b1e2099SDennis Dalessandro {
777b1e2099SDennis Dalessandro int i = mr->mapsz;
787b1e2099SDennis Dalessandro
797b1e2099SDennis Dalessandro mr->mapsz = 0;
807b1e2099SDennis Dalessandro while (i)
817b1e2099SDennis Dalessandro kfree(mr->map[--i]);
82338adfddSSebastian Sanchez percpu_ref_exit(&mr->refcount);
83338adfddSSebastian Sanchez }
84338adfddSSebastian Sanchez
__rvt_mregion_complete(struct percpu_ref * ref)85338adfddSSebastian Sanchez static void __rvt_mregion_complete(struct percpu_ref *ref)
86338adfddSSebastian Sanchez {
87338adfddSSebastian Sanchez struct rvt_mregion *mr = container_of(ref, struct rvt_mregion,
88338adfddSSebastian Sanchez refcount);
89338adfddSSebastian Sanchez
90338adfddSSebastian Sanchez complete(&mr->comp);
917b1e2099SDennis Dalessandro }
927b1e2099SDennis Dalessandro
rvt_init_mregion(struct rvt_mregion * mr,struct ib_pd * pd,int count,unsigned int percpu_flags)937b1e2099SDennis Dalessandro static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
94338adfddSSebastian Sanchez int count, unsigned int percpu_flags)
957b1e2099SDennis Dalessandro {
967b1e2099SDennis Dalessandro int m, i = 0;
9749961f8fSJubin John struct rvt_dev_info *dev = ib_to_rvt(pd->device);
987b1e2099SDennis Dalessandro
997b1e2099SDennis Dalessandro mr->mapsz = 0;
1007b1e2099SDennis Dalessandro m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
1017b1e2099SDennis Dalessandro for (; i < m; i++) {
10249961f8fSJubin John mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
10349961f8fSJubin John dev->dparms.node);
104338adfddSSebastian Sanchez if (!mr->map[i])
105338adfddSSebastian Sanchez goto bail;
1067b1e2099SDennis Dalessandro mr->mapsz++;
1077b1e2099SDennis Dalessandro }
1087b1e2099SDennis Dalessandro init_completion(&mr->comp);
1097b1e2099SDennis Dalessandro /* count returning the ptr to user */
110338adfddSSebastian Sanchez if (percpu_ref_init(&mr->refcount, &__rvt_mregion_complete,
111338adfddSSebastian Sanchez percpu_flags, GFP_KERNEL))
112338adfddSSebastian Sanchez goto bail;
113338adfddSSebastian Sanchez
114e8f8b098SJianxin Xiong atomic_set(&mr->lkey_invalid, 0);
1157b1e2099SDennis Dalessandro mr->pd = pd;
1167b1e2099SDennis Dalessandro mr->max_segs = count;
1177b1e2099SDennis Dalessandro return 0;
118338adfddSSebastian Sanchez bail:
119338adfddSSebastian Sanchez rvt_deinit_mregion(mr);
120338adfddSSebastian Sanchez return -ENOMEM;
1217b1e2099SDennis Dalessandro }
1227b1e2099SDennis Dalessandro
1237b1e2099SDennis Dalessandro /**
1247b1e2099SDennis Dalessandro * rvt_alloc_lkey - allocate an lkey
1257b1e2099SDennis Dalessandro * @mr: memory region that this lkey protects
1267b1e2099SDennis Dalessandro * @dma_region: 0->normal key, 1->restricted DMA key
1277b1e2099SDennis Dalessandro *
1287b1e2099SDennis Dalessandro * Returns 0 if successful, otherwise returns -errno.
1297b1e2099SDennis Dalessandro *
1307b1e2099SDennis Dalessandro * Increments mr reference count as required.
1317b1e2099SDennis Dalessandro *
1327b1e2099SDennis Dalessandro * Sets the lkey field mr for non-dma regions.
1337b1e2099SDennis Dalessandro *
1347b1e2099SDennis Dalessandro */
rvt_alloc_lkey(struct rvt_mregion * mr,int dma_region)1357b1e2099SDennis Dalessandro static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
1367b1e2099SDennis Dalessandro {
1377b1e2099SDennis Dalessandro unsigned long flags;
1387b1e2099SDennis Dalessandro u32 r;
1397b1e2099SDennis Dalessandro u32 n;
1407b1e2099SDennis Dalessandro int ret = 0;
1417b1e2099SDennis Dalessandro struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
1427b1e2099SDennis Dalessandro struct rvt_lkey_table *rkt = &dev->lkey_table;
1437b1e2099SDennis Dalessandro
1447b1e2099SDennis Dalessandro rvt_get_mr(mr);
1457b1e2099SDennis Dalessandro spin_lock_irqsave(&rkt->lock, flags);
1467b1e2099SDennis Dalessandro
1477b1e2099SDennis Dalessandro /* special case for dma_mr lkey == 0 */
1487b1e2099SDennis Dalessandro if (dma_region) {
1497b1e2099SDennis Dalessandro struct rvt_mregion *tmr;
1507b1e2099SDennis Dalessandro
1517b1e2099SDennis Dalessandro tmr = rcu_access_pointer(dev->dma_mr);
1527b1e2099SDennis Dalessandro if (!tmr) {
1537b1e2099SDennis Dalessandro mr->lkey_published = 1;
154b58fc804SMike Marciniszyn /* Insure published written first */
155b58fc804SMike Marciniszyn rcu_assign_pointer(dev->dma_mr, mr);
156338adfddSSebastian Sanchez rvt_get_mr(mr);
1577b1e2099SDennis Dalessandro }
1587b1e2099SDennis Dalessandro goto success;
1597b1e2099SDennis Dalessandro }
1607b1e2099SDennis Dalessandro
1617b1e2099SDennis Dalessandro /* Find the next available LKEY */
1627b1e2099SDennis Dalessandro r = rkt->next;
1637b1e2099SDennis Dalessandro n = r;
1647b1e2099SDennis Dalessandro for (;;) {
1657b1e2099SDennis Dalessandro if (!rcu_access_pointer(rkt->table[r]))
1667b1e2099SDennis Dalessandro break;
1677b1e2099SDennis Dalessandro r = (r + 1) & (rkt->max - 1);
1687b1e2099SDennis Dalessandro if (r == n)
1697b1e2099SDennis Dalessandro goto bail;
1707b1e2099SDennis Dalessandro }
1717b1e2099SDennis Dalessandro rkt->next = (r + 1) & (rkt->max - 1);
1727b1e2099SDennis Dalessandro /*
1737b1e2099SDennis Dalessandro * Make sure lkey is never zero which is reserved to indicate an
1747b1e2099SDennis Dalessandro * unrestricted LKEY.
1757b1e2099SDennis Dalessandro */
1767b1e2099SDennis Dalessandro rkt->gen++;
1777b1e2099SDennis Dalessandro /*
1787b1e2099SDennis Dalessandro * bits are capped to ensure enough bits for generation number
1797b1e2099SDennis Dalessandro */
1807b1e2099SDennis Dalessandro mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
1817b1e2099SDennis Dalessandro ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
1827b1e2099SDennis Dalessandro << 8);
1837b1e2099SDennis Dalessandro if (mr->lkey == 0) {
1847b1e2099SDennis Dalessandro mr->lkey |= 1 << 8;
1857b1e2099SDennis Dalessandro rkt->gen++;
1867b1e2099SDennis Dalessandro }
1877b1e2099SDennis Dalessandro mr->lkey_published = 1;
188b58fc804SMike Marciniszyn /* Insure published written first */
189b58fc804SMike Marciniszyn rcu_assign_pointer(rkt->table[r], mr);
1907b1e2099SDennis Dalessandro success:
1917b1e2099SDennis Dalessandro spin_unlock_irqrestore(&rkt->lock, flags);
1927b1e2099SDennis Dalessandro out:
1937b1e2099SDennis Dalessandro return ret;
1947b1e2099SDennis Dalessandro bail:
1957b1e2099SDennis Dalessandro rvt_put_mr(mr);
1967b1e2099SDennis Dalessandro spin_unlock_irqrestore(&rkt->lock, flags);
1977b1e2099SDennis Dalessandro ret = -ENOMEM;
1987b1e2099SDennis Dalessandro goto out;
1997b1e2099SDennis Dalessandro }
2007b1e2099SDennis Dalessandro
2017b1e2099SDennis Dalessandro /**
2027b1e2099SDennis Dalessandro * rvt_free_lkey - free an lkey
2037b1e2099SDennis Dalessandro * @mr: mr to free from tables
2047b1e2099SDennis Dalessandro */
rvt_free_lkey(struct rvt_mregion * mr)2057b1e2099SDennis Dalessandro static void rvt_free_lkey(struct rvt_mregion *mr)
2067b1e2099SDennis Dalessandro {
2077b1e2099SDennis Dalessandro unsigned long flags;
2087b1e2099SDennis Dalessandro u32 lkey = mr->lkey;
2097b1e2099SDennis Dalessandro u32 r;
2107b1e2099SDennis Dalessandro struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
2117b1e2099SDennis Dalessandro struct rvt_lkey_table *rkt = &dev->lkey_table;
2127b1e2099SDennis Dalessandro int freed = 0;
2137b1e2099SDennis Dalessandro
2147b1e2099SDennis Dalessandro spin_lock_irqsave(&rkt->lock, flags);
215338adfddSSebastian Sanchez if (!lkey) {
216338adfddSSebastian Sanchez if (mr->lkey_published) {
217b58fc804SMike Marciniszyn mr->lkey_published = 0;
218b58fc804SMike Marciniszyn /* insure published is written before pointer */
219b58fc804SMike Marciniszyn rcu_assign_pointer(dev->dma_mr, NULL);
220338adfddSSebastian Sanchez rvt_put_mr(mr);
221338adfddSSebastian Sanchez }
222338adfddSSebastian Sanchez } else {
2237b1e2099SDennis Dalessandro if (!mr->lkey_published)
2247b1e2099SDennis Dalessandro goto out;
2257b1e2099SDennis Dalessandro r = lkey >> (32 - dev->dparms.lkey_table_size);
2267b1e2099SDennis Dalessandro mr->lkey_published = 0;
227b58fc804SMike Marciniszyn /* insure published is written before pointer */
228b58fc804SMike Marciniszyn rcu_assign_pointer(rkt->table[r], NULL);
229b58fc804SMike Marciniszyn }
2307b1e2099SDennis Dalessandro freed++;
2317b1e2099SDennis Dalessandro out:
2327b1e2099SDennis Dalessandro spin_unlock_irqrestore(&rkt->lock, flags);
233b58fc804SMike Marciniszyn if (freed)
234338adfddSSebastian Sanchez percpu_ref_kill(&mr->refcount);
2357b1e2099SDennis Dalessandro }
2367b1e2099SDennis Dalessandro
__rvt_alloc_mr(int count,struct ib_pd * pd)2377b1e2099SDennis Dalessandro static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
2387b1e2099SDennis Dalessandro {
2397b1e2099SDennis Dalessandro struct rvt_mr *mr;
2407b1e2099SDennis Dalessandro int rval = -ENOMEM;
2417b1e2099SDennis Dalessandro int m;
2427b1e2099SDennis Dalessandro
2437b1e2099SDennis Dalessandro /* Allocate struct plus pointers to first level page tables. */
2447b1e2099SDennis Dalessandro m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
245acafe7e3SKees Cook mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL);
2467b1e2099SDennis Dalessandro if (!mr)
2477b1e2099SDennis Dalessandro goto bail;
2487b1e2099SDennis Dalessandro
249338adfddSSebastian Sanchez rval = rvt_init_mregion(&mr->mr, pd, count, 0);
2507b1e2099SDennis Dalessandro if (rval)
2517b1e2099SDennis Dalessandro goto bail;
2527b1e2099SDennis Dalessandro /*
2537b1e2099SDennis Dalessandro * ib_reg_phys_mr() will initialize mr->ibmr except for
2547b1e2099SDennis Dalessandro * lkey and rkey.
2557b1e2099SDennis Dalessandro */
2567b1e2099SDennis Dalessandro rval = rvt_alloc_lkey(&mr->mr, 0);
2577b1e2099SDennis Dalessandro if (rval)
2587b1e2099SDennis Dalessandro goto bail_mregion;
2597b1e2099SDennis Dalessandro mr->ibmr.lkey = mr->mr.lkey;
2607b1e2099SDennis Dalessandro mr->ibmr.rkey = mr->mr.lkey;
2617b1e2099SDennis Dalessandro done:
2627b1e2099SDennis Dalessandro return mr;
2637b1e2099SDennis Dalessandro
2647b1e2099SDennis Dalessandro bail_mregion:
2657b1e2099SDennis Dalessandro rvt_deinit_mregion(&mr->mr);
2667b1e2099SDennis Dalessandro bail:
2677b1e2099SDennis Dalessandro kfree(mr);
2687b1e2099SDennis Dalessandro mr = ERR_PTR(rval);
2697b1e2099SDennis Dalessandro goto done;
2707b1e2099SDennis Dalessandro }
2717b1e2099SDennis Dalessandro
__rvt_free_mr(struct rvt_mr * mr)2727b1e2099SDennis Dalessandro static void __rvt_free_mr(struct rvt_mr *mr)
2737b1e2099SDennis Dalessandro {
2747b1e2099SDennis Dalessandro rvt_free_lkey(&mr->mr);
275338adfddSSebastian Sanchez rvt_deinit_mregion(&mr->mr);
276e4618d40SColin Ian King kfree(mr);
2777b1e2099SDennis Dalessandro }
2787b1e2099SDennis Dalessandro
2792a055eb7SDennis Dalessandro /**
2802a055eb7SDennis Dalessandro * rvt_get_dma_mr - get a DMA memory region
2812a055eb7SDennis Dalessandro * @pd: protection domain for this memory region
2822a055eb7SDennis Dalessandro * @acc: access flags
2832a055eb7SDennis Dalessandro *
28490793f71SDennis Dalessandro * Return: the memory region on success, otherwise returns an errno.
2852a055eb7SDennis Dalessandro */
rvt_get_dma_mr(struct ib_pd * pd,int acc)2862a055eb7SDennis Dalessandro struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
2872a055eb7SDennis Dalessandro {
2887b1e2099SDennis Dalessandro struct rvt_mr *mr;
2897b1e2099SDennis Dalessandro struct ib_mr *ret;
2907b1e2099SDennis Dalessandro int rval;
2917b1e2099SDennis Dalessandro
2927b1e2099SDennis Dalessandro if (ibpd_to_rvtpd(pd)->user)
2937b1e2099SDennis Dalessandro return ERR_PTR(-EPERM);
2947b1e2099SDennis Dalessandro
2957b1e2099SDennis Dalessandro mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2967b1e2099SDennis Dalessandro if (!mr) {
2977b1e2099SDennis Dalessandro ret = ERR_PTR(-ENOMEM);
2987b1e2099SDennis Dalessandro goto bail;
2997b1e2099SDennis Dalessandro }
3007b1e2099SDennis Dalessandro
301338adfddSSebastian Sanchez rval = rvt_init_mregion(&mr->mr, pd, 0, 0);
3027b1e2099SDennis Dalessandro if (rval) {
3037b1e2099SDennis Dalessandro ret = ERR_PTR(rval);
3047b1e2099SDennis Dalessandro goto bail;
3057b1e2099SDennis Dalessandro }
3067b1e2099SDennis Dalessandro
3077b1e2099SDennis Dalessandro rval = rvt_alloc_lkey(&mr->mr, 1);
3087b1e2099SDennis Dalessandro if (rval) {
3097b1e2099SDennis Dalessandro ret = ERR_PTR(rval);
3107b1e2099SDennis Dalessandro goto bail_mregion;
3117b1e2099SDennis Dalessandro }
3127b1e2099SDennis Dalessandro
3137b1e2099SDennis Dalessandro mr->mr.access_flags = acc;
3147b1e2099SDennis Dalessandro ret = &mr->ibmr;
3157b1e2099SDennis Dalessandro done:
3167b1e2099SDennis Dalessandro return ret;
3177b1e2099SDennis Dalessandro
3187b1e2099SDennis Dalessandro bail_mregion:
3197b1e2099SDennis Dalessandro rvt_deinit_mregion(&mr->mr);
3207b1e2099SDennis Dalessandro bail:
3217b1e2099SDennis Dalessandro kfree(mr);
3227b1e2099SDennis Dalessandro goto done;
3232a055eb7SDennis Dalessandro }
3242a055eb7SDennis Dalessandro
3252a055eb7SDennis Dalessandro /**
3262a055eb7SDennis Dalessandro * rvt_reg_user_mr - register a userspace memory region
3272a055eb7SDennis Dalessandro * @pd: protection domain for this memory region
3282a055eb7SDennis Dalessandro * @start: starting userspace address
3292a055eb7SDennis Dalessandro * @length: length of region to register
330cf8f5ceaSLee Jones * @virt_addr: associated virtual address
3312a055eb7SDennis Dalessandro * @mr_access_flags: access flags for this memory region
3322a055eb7SDennis Dalessandro * @udata: unused by the driver
3332a055eb7SDennis Dalessandro *
33490793f71SDennis Dalessandro * Return: the memory region on success, otherwise returns an errno.
3352a055eb7SDennis Dalessandro */
rvt_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)3362a055eb7SDennis Dalessandro struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
3372a055eb7SDennis Dalessandro u64 virt_addr, int mr_access_flags,
3382a055eb7SDennis Dalessandro struct ib_udata *udata)
3392a055eb7SDennis Dalessandro {
3407b1e2099SDennis Dalessandro struct rvt_mr *mr;
3417b1e2099SDennis Dalessandro struct ib_umem *umem;
34236d57708SShiraz, Saleem struct sg_page_iter sg_iter;
34336d57708SShiraz, Saleem int n, m;
3447b1e2099SDennis Dalessandro struct ib_mr *ret;
3457b1e2099SDennis Dalessandro
3467b1e2099SDennis Dalessandro if (length == 0)
3477b1e2099SDennis Dalessandro return ERR_PTR(-EINVAL);
3487b1e2099SDennis Dalessandro
349c320e527SMoni Shoua umem = ib_umem_get(pd->device, start, length, mr_access_flags);
3507b1e2099SDennis Dalessandro if (IS_ERR(umem))
3517b1e2099SDennis Dalessandro return (void *)umem;
3527b1e2099SDennis Dalessandro
353629e6f9dSShiraz Saleem n = ib_umem_num_pages(umem);
3547b1e2099SDennis Dalessandro
3557b1e2099SDennis Dalessandro mr = __rvt_alloc_mr(n, pd);
3567b1e2099SDennis Dalessandro if (IS_ERR(mr)) {
3577b1e2099SDennis Dalessandro ret = (struct ib_mr *)mr;
3587b1e2099SDennis Dalessandro goto bail_umem;
3597b1e2099SDennis Dalessandro }
3607b1e2099SDennis Dalessandro
3617b1e2099SDennis Dalessandro mr->mr.user_base = start;
3627b1e2099SDennis Dalessandro mr->mr.iova = virt_addr;
3637b1e2099SDennis Dalessandro mr->mr.length = length;
3647b1e2099SDennis Dalessandro mr->mr.offset = ib_umem_offset(umem);
3657b1e2099SDennis Dalessandro mr->mr.access_flags = mr_access_flags;
3667b1e2099SDennis Dalessandro mr->umem = umem;
3677b1e2099SDennis Dalessandro
36836d57708SShiraz, Saleem mr->mr.page_shift = PAGE_SHIFT;
3697b1e2099SDennis Dalessandro m = 0;
3707b1e2099SDennis Dalessandro n = 0;
37179fbd3e1SMaor Gottlieb for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
3727b1e2099SDennis Dalessandro void *vaddr;
3737b1e2099SDennis Dalessandro
37436d57708SShiraz, Saleem vaddr = page_address(sg_page_iter_page(&sg_iter));
3757b1e2099SDennis Dalessandro if (!vaddr) {
3767b1e2099SDennis Dalessandro ret = ERR_PTR(-EINVAL);
3777b1e2099SDennis Dalessandro goto bail_inval;
3787b1e2099SDennis Dalessandro }
3797b1e2099SDennis Dalessandro mr->mr.map[m]->segs[n].vaddr = vaddr;
38036d57708SShiraz, Saleem mr->mr.map[m]->segs[n].length = PAGE_SIZE;
38136d57708SShiraz, Saleem trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
38236d57708SShiraz, Saleem if (++n == RVT_SEGSZ) {
3837b1e2099SDennis Dalessandro m++;
3847b1e2099SDennis Dalessandro n = 0;
3857b1e2099SDennis Dalessandro }
3867b1e2099SDennis Dalessandro }
3877b1e2099SDennis Dalessandro return &mr->ibmr;
3887b1e2099SDennis Dalessandro
3897b1e2099SDennis Dalessandro bail_inval:
3907b1e2099SDennis Dalessandro __rvt_free_mr(mr);
3917b1e2099SDennis Dalessandro
3927b1e2099SDennis Dalessandro bail_umem:
3937b1e2099SDennis Dalessandro ib_umem_release(umem);
3947b1e2099SDennis Dalessandro
3957b1e2099SDennis Dalessandro return ret;
3962a055eb7SDennis Dalessandro }
3972a055eb7SDennis Dalessandro
3982a055eb7SDennis Dalessandro /**
3990208da90SMike Marciniszyn * rvt_dereg_clean_qp_cb - callback from iterator
400cf8f5ceaSLee Jones * @qp: the qp
401cf8f5ceaSLee Jones * @v: the mregion (as u64)
4020208da90SMike Marciniszyn *
4030208da90SMike Marciniszyn * This routine fields the callback for all QPs and
4040208da90SMike Marciniszyn * for QPs in the same PD as the MR will call the
4050208da90SMike Marciniszyn * rvt_qp_mr_clean() to potentially cleanup references.
4060208da90SMike Marciniszyn */
rvt_dereg_clean_qp_cb(struct rvt_qp * qp,u64 v)4070208da90SMike Marciniszyn static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
4080208da90SMike Marciniszyn {
4090208da90SMike Marciniszyn struct rvt_mregion *mr = (struct rvt_mregion *)v;
4100208da90SMike Marciniszyn
4110208da90SMike Marciniszyn /* skip PDs that are not ours */
4120208da90SMike Marciniszyn if (mr->pd != qp->ibqp.pd)
4130208da90SMike Marciniszyn return;
4140208da90SMike Marciniszyn rvt_qp_mr_clean(qp, mr->lkey);
4150208da90SMike Marciniszyn }
4160208da90SMike Marciniszyn
4170208da90SMike Marciniszyn /**
4180208da90SMike Marciniszyn * rvt_dereg_clean_qps - find QPs for reference cleanup
419cf8f5ceaSLee Jones * @mr: the MR that is being deregistered
4200208da90SMike Marciniszyn *
4210208da90SMike Marciniszyn * This routine iterates RC QPs looking for references
4220208da90SMike Marciniszyn * to the lkey noted in mr.
4230208da90SMike Marciniszyn */
rvt_dereg_clean_qps(struct rvt_mregion * mr)4240208da90SMike Marciniszyn static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
4250208da90SMike Marciniszyn {
4260208da90SMike Marciniszyn struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
4270208da90SMike Marciniszyn
4280208da90SMike Marciniszyn rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
4290208da90SMike Marciniszyn }
4300208da90SMike Marciniszyn
4310208da90SMike Marciniszyn /**
4320208da90SMike Marciniszyn * rvt_check_refs - check references
433cf8f5ceaSLee Jones * @mr: the megion
434cf8f5ceaSLee Jones * @t: the caller identification
4350208da90SMike Marciniszyn *
4360208da90SMike Marciniszyn * This routine checks MRs holding a reference during
4370208da90SMike Marciniszyn * when being de-registered.
4380208da90SMike Marciniszyn *
4390208da90SMike Marciniszyn * If the count is non-zero, the code calls a clean routine then
4400208da90SMike Marciniszyn * waits for the timeout for the count to zero.
4410208da90SMike Marciniszyn */
rvt_check_refs(struct rvt_mregion * mr,const char * t)4420208da90SMike Marciniszyn static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
4430208da90SMike Marciniszyn {
4440208da90SMike Marciniszyn unsigned long timeout;
4450208da90SMike Marciniszyn struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
4460208da90SMike Marciniszyn
44774b44bbeSTejun Heo if (mr->lkey) {
4480208da90SMike Marciniszyn /* avoid dma mr */
4490208da90SMike Marciniszyn rvt_dereg_clean_qps(mr);
45074b44bbeSTejun Heo /* @mr was indexed on rcu protected @lkey_table */
45174b44bbeSTejun Heo synchronize_rcu();
45274b44bbeSTejun Heo }
45374b44bbeSTejun Heo
4540208da90SMike Marciniszyn timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
4550208da90SMike Marciniszyn if (!timeout) {
4560208da90SMike Marciniszyn rvt_pr_err(rdi,
4570208da90SMike Marciniszyn "%s timeout mr %p pd %p lkey %x refcount %ld\n",
4580208da90SMike Marciniszyn t, mr, mr->pd, mr->lkey,
4592b0d3d3eSMing Lei atomic_long_read(&mr->refcount.data->count));
4600208da90SMike Marciniszyn rvt_get_mr(mr);
4610208da90SMike Marciniszyn return -EBUSY;
4620208da90SMike Marciniszyn }
4630208da90SMike Marciniszyn return 0;
4640208da90SMike Marciniszyn }
4650208da90SMike Marciniszyn
4660208da90SMike Marciniszyn /**
4670208da90SMike Marciniszyn * rvt_mr_has_lkey - is MR
468cf8f5ceaSLee Jones * @mr: the mregion
469cf8f5ceaSLee Jones * @lkey: the lkey
4700208da90SMike Marciniszyn */
rvt_mr_has_lkey(struct rvt_mregion * mr,u32 lkey)4710208da90SMike Marciniszyn bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
4720208da90SMike Marciniszyn {
4730208da90SMike Marciniszyn return mr && lkey == mr->lkey;
4740208da90SMike Marciniszyn }
4750208da90SMike Marciniszyn
4760208da90SMike Marciniszyn /**
4770208da90SMike Marciniszyn * rvt_ss_has_lkey - is mr in sge tests
478cf8f5ceaSLee Jones * @ss: the sge state
479cf8f5ceaSLee Jones * @lkey: the lkey
4800208da90SMike Marciniszyn *
4810208da90SMike Marciniszyn * This code tests for an MR in the indicated
4820208da90SMike Marciniszyn * sge state.
4830208da90SMike Marciniszyn */
rvt_ss_has_lkey(struct rvt_sge_state * ss,u32 lkey)4840208da90SMike Marciniszyn bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
4850208da90SMike Marciniszyn {
4860208da90SMike Marciniszyn int i;
4870208da90SMike Marciniszyn bool rval = false;
4880208da90SMike Marciniszyn
4890208da90SMike Marciniszyn if (!ss->num_sge)
4900208da90SMike Marciniszyn return rval;
4910208da90SMike Marciniszyn /* first one */
4920208da90SMike Marciniszyn rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
4930208da90SMike Marciniszyn /* any others */
4940208da90SMike Marciniszyn for (i = 0; !rval && i < ss->num_sge - 1; i++)
4950208da90SMike Marciniszyn rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
4960208da90SMike Marciniszyn return rval;
4970208da90SMike Marciniszyn }
4980208da90SMike Marciniszyn
4990208da90SMike Marciniszyn /**
5002a055eb7SDennis Dalessandro * rvt_dereg_mr - unregister and free a memory region
5012a055eb7SDennis Dalessandro * @ibmr: the memory region to free
502cf8f5ceaSLee Jones * @udata: unused by the driver
5032a055eb7SDennis Dalessandro *
5042a055eb7SDennis Dalessandro * Note that this is called to free MRs created by rvt_get_dma_mr()
5052a055eb7SDennis Dalessandro * or rvt_reg_user_mr().
50690793f71SDennis Dalessandro *
50790793f71SDennis Dalessandro * Returns 0 on success.
5082a055eb7SDennis Dalessandro */
rvt_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)509c4367a26SShamir Rabinovitch int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
5102a055eb7SDennis Dalessandro {
5117b1e2099SDennis Dalessandro struct rvt_mr *mr = to_imr(ibmr);
5120208da90SMike Marciniszyn int ret;
5137b1e2099SDennis Dalessandro
5147b1e2099SDennis Dalessandro rvt_free_lkey(&mr->mr);
5157b1e2099SDennis Dalessandro
5167b1e2099SDennis Dalessandro rvt_put_mr(&mr->mr); /* will set completion if last */
5170208da90SMike Marciniszyn ret = rvt_check_refs(&mr->mr, __func__);
5180208da90SMike Marciniszyn if (ret)
5197b1e2099SDennis Dalessandro goto out;
5207b1e2099SDennis Dalessandro rvt_deinit_mregion(&mr->mr);
5217b1e2099SDennis Dalessandro ib_umem_release(mr->umem);
5227b1e2099SDennis Dalessandro kfree(mr);
5237b1e2099SDennis Dalessandro out:
5247b1e2099SDennis Dalessandro return ret;
5252a055eb7SDennis Dalessandro }
5262a055eb7SDennis Dalessandro
5272a055eb7SDennis Dalessandro /**
5282a055eb7SDennis Dalessandro * rvt_alloc_mr - Allocate a memory region usable with the
5292a055eb7SDennis Dalessandro * @pd: protection domain for this memory region
5302a055eb7SDennis Dalessandro * @mr_type: mem region type
5312a055eb7SDennis Dalessandro * @max_num_sg: Max number of segments allowed
5322a055eb7SDennis Dalessandro *
53390793f71SDennis Dalessandro * Return: the memory region on success, otherwise return an errno.
5342a055eb7SDennis Dalessandro */
rvt_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)535c4367a26SShamir Rabinovitch struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
53642a3b153SGal Pressman u32 max_num_sg)
5372a055eb7SDennis Dalessandro {
5387b1e2099SDennis Dalessandro struct rvt_mr *mr;
5397b1e2099SDennis Dalessandro
5407b1e2099SDennis Dalessandro if (mr_type != IB_MR_TYPE_MEM_REG)
5417b1e2099SDennis Dalessandro return ERR_PTR(-EINVAL);
5427b1e2099SDennis Dalessandro
5437b1e2099SDennis Dalessandro mr = __rvt_alloc_mr(max_num_sg, pd);
5447b1e2099SDennis Dalessandro if (IS_ERR(mr))
5457b1e2099SDennis Dalessandro return (struct ib_mr *)mr;
5467b1e2099SDennis Dalessandro
5477b1e2099SDennis Dalessandro return &mr->ibmr;
5482a055eb7SDennis Dalessandro }
5492a055eb7SDennis Dalessandro
5502a055eb7SDennis Dalessandro /**
551a41081aaSJianxin Xiong * rvt_set_page - page assignment function called by ib_sg_to_pages
552a41081aaSJianxin Xiong * @ibmr: memory region
553a41081aaSJianxin Xiong * @addr: dma address of mapped page
554a41081aaSJianxin Xiong *
555a41081aaSJianxin Xiong * Return: 0 on success
556a41081aaSJianxin Xiong */
rvt_set_page(struct ib_mr * ibmr,u64 addr)557a41081aaSJianxin Xiong static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
558a41081aaSJianxin Xiong {
559a41081aaSJianxin Xiong struct rvt_mr *mr = to_imr(ibmr);
560a41081aaSJianxin Xiong u32 ps = 1 << mr->mr.page_shift;
561a41081aaSJianxin Xiong u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
562a41081aaSJianxin Xiong int m, n;
563a41081aaSJianxin Xiong
564a41081aaSJianxin Xiong if (unlikely(mapped_segs == mr->mr.max_segs))
565a41081aaSJianxin Xiong return -ENOMEM;
566a41081aaSJianxin Xiong
567a41081aaSJianxin Xiong m = mapped_segs / RVT_SEGSZ;
568a41081aaSJianxin Xiong n = mapped_segs % RVT_SEGSZ;
569a41081aaSJianxin Xiong mr->mr.map[m]->segs[n].vaddr = (void *)addr;
570a41081aaSJianxin Xiong mr->mr.map[m]->segs[n].length = ps;
571a41081aaSJianxin Xiong mr->mr.length += ps;
572315aed11SMike Marciniszyn trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
573a41081aaSJianxin Xiong
574a41081aaSJianxin Xiong return 0;
575a41081aaSJianxin Xiong }
576a41081aaSJianxin Xiong
577a41081aaSJianxin Xiong /**
578a41081aaSJianxin Xiong * rvt_map_mr_sg - map sg list and set it the memory region
579a41081aaSJianxin Xiong * @ibmr: memory region
580a41081aaSJianxin Xiong * @sg: dma mapped scatterlist
581a41081aaSJianxin Xiong * @sg_nents: number of entries in sg
582a41081aaSJianxin Xiong * @sg_offset: offset in bytes into sg
583a41081aaSJianxin Xiong *
5847c39f7f6SJosh Collier * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
5857c39f7f6SJosh Collier *
586a41081aaSJianxin Xiong * Return: number of sg elements mapped to the memory region
587a41081aaSJianxin Xiong */
rvt_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)588a41081aaSJianxin Xiong int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
589a41081aaSJianxin Xiong int sg_nents, unsigned int *sg_offset)
590a41081aaSJianxin Xiong {
591a41081aaSJianxin Xiong struct rvt_mr *mr = to_imr(ibmr);
5927c39f7f6SJosh Collier int ret;
593a41081aaSJianxin Xiong
594a41081aaSJianxin Xiong mr->mr.length = 0;
595a41081aaSJianxin Xiong mr->mr.page_shift = PAGE_SHIFT;
5967c39f7f6SJosh Collier ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
5977c39f7f6SJosh Collier mr->mr.user_base = ibmr->iova;
5987c39f7f6SJosh Collier mr->mr.iova = ibmr->iova;
5997c39f7f6SJosh Collier mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
6007c39f7f6SJosh Collier mr->mr.length = (size_t)ibmr->length;
6018bd516bdSMike Marciniszyn trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
6027c39f7f6SJosh Collier return ret;
603a41081aaSJianxin Xiong }
604a41081aaSJianxin Xiong
605a41081aaSJianxin Xiong /**
606e8f8b098SJianxin Xiong * rvt_fast_reg_mr - fast register physical MR
607e8f8b098SJianxin Xiong * @qp: the queue pair where the work request comes from
608e8f8b098SJianxin Xiong * @ibmr: the memory region to be registered
609e8f8b098SJianxin Xiong * @key: updated key for this memory region
610e8f8b098SJianxin Xiong * @access: access flags for this memory region
611e8f8b098SJianxin Xiong *
612e8f8b098SJianxin Xiong * Returns 0 on success.
613e8f8b098SJianxin Xiong */
rvt_fast_reg_mr(struct rvt_qp * qp,struct ib_mr * ibmr,u32 key,int access)614e8f8b098SJianxin Xiong int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
615e8f8b098SJianxin Xiong int access)
616e8f8b098SJianxin Xiong {
617e8f8b098SJianxin Xiong struct rvt_mr *mr = to_imr(ibmr);
618e8f8b098SJianxin Xiong
619e8f8b098SJianxin Xiong if (qp->ibqp.pd != mr->mr.pd)
620e8f8b098SJianxin Xiong return -EACCES;
621e8f8b098SJianxin Xiong
622e8f8b098SJianxin Xiong /* not applicable to dma MR or user MR */
623e8f8b098SJianxin Xiong if (!mr->mr.lkey || mr->umem)
624e8f8b098SJianxin Xiong return -EINVAL;
625e8f8b098SJianxin Xiong
626e8f8b098SJianxin Xiong if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
627e8f8b098SJianxin Xiong return -EINVAL;
628e8f8b098SJianxin Xiong
629e8f8b098SJianxin Xiong ibmr->lkey = key;
630e8f8b098SJianxin Xiong ibmr->rkey = key;
631e8f8b098SJianxin Xiong mr->mr.lkey = key;
632e8f8b098SJianxin Xiong mr->mr.access_flags = access;
6337c39f7f6SJosh Collier mr->mr.iova = ibmr->iova;
634e8f8b098SJianxin Xiong atomic_set(&mr->mr.lkey_invalid, 0);
635e8f8b098SJianxin Xiong
636e8f8b098SJianxin Xiong return 0;
637e8f8b098SJianxin Xiong }
638e8f8b098SJianxin Xiong EXPORT_SYMBOL(rvt_fast_reg_mr);
639e8f8b098SJianxin Xiong
640e8f8b098SJianxin Xiong /**
641e8f8b098SJianxin Xiong * rvt_invalidate_rkey - invalidate an MR rkey
642e8f8b098SJianxin Xiong * @qp: queue pair associated with the invalidate op
643e8f8b098SJianxin Xiong * @rkey: rkey to invalidate
644e8f8b098SJianxin Xiong *
645e8f8b098SJianxin Xiong * Returns 0 on success.
646e8f8b098SJianxin Xiong */
rvt_invalidate_rkey(struct rvt_qp * qp,u32 rkey)647e8f8b098SJianxin Xiong int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
648e8f8b098SJianxin Xiong {
649e8f8b098SJianxin Xiong struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
650e8f8b098SJianxin Xiong struct rvt_lkey_table *rkt = &dev->lkey_table;
651e8f8b098SJianxin Xiong struct rvt_mregion *mr;
652e8f8b098SJianxin Xiong
653e8f8b098SJianxin Xiong if (rkey == 0)
654e8f8b098SJianxin Xiong return -EINVAL;
655e8f8b098SJianxin Xiong
656e8f8b098SJianxin Xiong rcu_read_lock();
657e8f8b098SJianxin Xiong mr = rcu_dereference(
658e8f8b098SJianxin Xiong rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
659e8f8b098SJianxin Xiong if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
660e8f8b098SJianxin Xiong goto bail;
661e8f8b098SJianxin Xiong
662e8f8b098SJianxin Xiong atomic_set(&mr->lkey_invalid, 1);
663e8f8b098SJianxin Xiong rcu_read_unlock();
664e8f8b098SJianxin Xiong return 0;
665e8f8b098SJianxin Xiong
666e8f8b098SJianxin Xiong bail:
667e8f8b098SJianxin Xiong rcu_read_unlock();
668e8f8b098SJianxin Xiong return -EINVAL;
669e8f8b098SJianxin Xiong }
670e8f8b098SJianxin Xiong EXPORT_SYMBOL(rvt_invalidate_rkey);
671e8f8b098SJianxin Xiong
672e8f8b098SJianxin Xiong /**
67314fe13fcSMike Marciniszyn * rvt_sge_adjacent - is isge compressible
67414fe13fcSMike Marciniszyn * @last_sge: last outgoing SGE written
67514fe13fcSMike Marciniszyn * @sge: SGE to check
67614fe13fcSMike Marciniszyn *
67714fe13fcSMike Marciniszyn * If adjacent will update last_sge to add length.
67814fe13fcSMike Marciniszyn *
67914fe13fcSMike Marciniszyn * Return: true if isge is adjacent to last sge
68014fe13fcSMike Marciniszyn */
rvt_sge_adjacent(struct rvt_sge * last_sge,struct ib_sge * sge)6813ffea7d8SMike Marciniszyn static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
68214fe13fcSMike Marciniszyn struct ib_sge *sge)
68314fe13fcSMike Marciniszyn {
68414fe13fcSMike Marciniszyn if (last_sge && sge->lkey == last_sge->mr->lkey &&
68514fe13fcSMike Marciniszyn ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
68614fe13fcSMike Marciniszyn if (sge->lkey) {
68714fe13fcSMike Marciniszyn if (unlikely((sge->addr - last_sge->mr->user_base +
68814fe13fcSMike Marciniszyn sge->length > last_sge->mr->length)))
68914fe13fcSMike Marciniszyn return false; /* overrun, caller will catch */
69014fe13fcSMike Marciniszyn } else {
69114fe13fcSMike Marciniszyn last_sge->length += sge->length;
69214fe13fcSMike Marciniszyn }
69314fe13fcSMike Marciniszyn last_sge->sge_length += sge->length;
69414fe13fcSMike Marciniszyn trace_rvt_sge_adjacent(last_sge, sge);
69514fe13fcSMike Marciniszyn return true;
69614fe13fcSMike Marciniszyn }
69714fe13fcSMike Marciniszyn return false;
69814fe13fcSMike Marciniszyn }
69914fe13fcSMike Marciniszyn
70014fe13fcSMike Marciniszyn /**
7017b1e2099SDennis Dalessandro * rvt_lkey_ok - check IB SGE for validity and initialize
7027b1e2099SDennis Dalessandro * @rkt: table containing lkey to check SGE against
7037b1e2099SDennis Dalessandro * @pd: protection domain
7047b1e2099SDennis Dalessandro * @isge: outgoing internal SGE
70514fe13fcSMike Marciniszyn * @last_sge: last outgoing SGE written
7067b1e2099SDennis Dalessandro * @sge: SGE to check
7077b1e2099SDennis Dalessandro * @acc: access flags
7087b1e2099SDennis Dalessandro *
70990793f71SDennis Dalessandro * Check the IB SGE for validity and initialize our internal version
71090793f71SDennis Dalessandro * of it.
71190793f71SDennis Dalessandro *
71214fe13fcSMike Marciniszyn * Increments the reference count when a new sge is stored.
7137b1e2099SDennis Dalessandro *
71414fe13fcSMike Marciniszyn * Return: 0 if compressed, 1 if added , otherwise returns -errno.
7157b1e2099SDennis Dalessandro */
rvt_lkey_ok(struct rvt_lkey_table * rkt,struct rvt_pd * pd,struct rvt_sge * isge,struct rvt_sge * last_sge,struct ib_sge * sge,int acc)7167b1e2099SDennis Dalessandro int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
71714fe13fcSMike Marciniszyn struct rvt_sge *isge, struct rvt_sge *last_sge,
71814fe13fcSMike Marciniszyn struct ib_sge *sge, int acc)
7197b1e2099SDennis Dalessandro {
7207b1e2099SDennis Dalessandro struct rvt_mregion *mr;
7217b1e2099SDennis Dalessandro unsigned n, m;
7227b1e2099SDennis Dalessandro size_t off;
7237b1e2099SDennis Dalessandro
7247b1e2099SDennis Dalessandro /*
7257b1e2099SDennis Dalessandro * We use LKEY == zero for kernel virtual addresses
7265a7a9e03SChristoph Hellwig * (see rvt_get_dma_mr()).
7277b1e2099SDennis Dalessandro */
7287b1e2099SDennis Dalessandro if (sge->lkey == 0) {
72999f80d2fSMike Marciniszyn struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
73099f80d2fSMike Marciniszyn
7317b1e2099SDennis Dalessandro if (pd->user)
73214fe13fcSMike Marciniszyn return -EINVAL;
7333ffea7d8SMike Marciniszyn if (rvt_sge_adjacent(last_sge, sge))
73414fe13fcSMike Marciniszyn return 0;
73514fe13fcSMike Marciniszyn rcu_read_lock();
7367b1e2099SDennis Dalessandro mr = rcu_dereference(dev->dma_mr);
7377b1e2099SDennis Dalessandro if (!mr)
7387b1e2099SDennis Dalessandro goto bail;
739f84dfa26SSebastian Sanchez rvt_get_mr(mr);
7407b1e2099SDennis Dalessandro rcu_read_unlock();
7417b1e2099SDennis Dalessandro
7427b1e2099SDennis Dalessandro isge->mr = mr;
7437b1e2099SDennis Dalessandro isge->vaddr = (void *)sge->addr;
7447b1e2099SDennis Dalessandro isge->length = sge->length;
7457b1e2099SDennis Dalessandro isge->sge_length = sge->length;
7467b1e2099SDennis Dalessandro isge->m = 0;
7477b1e2099SDennis Dalessandro isge->n = 0;
7487b1e2099SDennis Dalessandro goto ok;
7497b1e2099SDennis Dalessandro }
7503ffea7d8SMike Marciniszyn if (rvt_sge_adjacent(last_sge, sge))
75114fe13fcSMike Marciniszyn return 0;
75214fe13fcSMike Marciniszyn rcu_read_lock();
75399f80d2fSMike Marciniszyn mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
754b58fc804SMike Marciniszyn if (!mr)
7557b1e2099SDennis Dalessandro goto bail;
756b58fc804SMike Marciniszyn rvt_get_mr(mr);
757b58fc804SMike Marciniszyn if (!READ_ONCE(mr->lkey_published))
758b58fc804SMike Marciniszyn goto bail_unref;
759b58fc804SMike Marciniszyn
760b58fc804SMike Marciniszyn if (unlikely(atomic_read(&mr->lkey_invalid) ||
761b58fc804SMike Marciniszyn mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
762b58fc804SMike Marciniszyn goto bail_unref;
7637b1e2099SDennis Dalessandro
7647b1e2099SDennis Dalessandro off = sge->addr - mr->user_base;
7657b1e2099SDennis Dalessandro if (unlikely(sge->addr < mr->user_base ||
7667b1e2099SDennis Dalessandro off + sge->length > mr->length ||
7677b1e2099SDennis Dalessandro (mr->access_flags & acc) != acc))
768b58fc804SMike Marciniszyn goto bail_unref;
7697b1e2099SDennis Dalessandro rcu_read_unlock();
7707b1e2099SDennis Dalessandro
7717b1e2099SDennis Dalessandro off += mr->offset;
7727b1e2099SDennis Dalessandro if (mr->page_shift) {
7737b1e2099SDennis Dalessandro /*
7747b1e2099SDennis Dalessandro * page sizes are uniform power of 2 so no loop is necessary
7757b1e2099SDennis Dalessandro * entries_spanned_by_off is the number of times the loop below
7767b1e2099SDennis Dalessandro * would have executed.
7777b1e2099SDennis Dalessandro */
7787b1e2099SDennis Dalessandro size_t entries_spanned_by_off;
7797b1e2099SDennis Dalessandro
7807b1e2099SDennis Dalessandro entries_spanned_by_off = off >> mr->page_shift;
7817b1e2099SDennis Dalessandro off -= (entries_spanned_by_off << mr->page_shift);
7827b1e2099SDennis Dalessandro m = entries_spanned_by_off / RVT_SEGSZ;
7837b1e2099SDennis Dalessandro n = entries_spanned_by_off % RVT_SEGSZ;
7847b1e2099SDennis Dalessandro } else {
7857b1e2099SDennis Dalessandro m = 0;
7867b1e2099SDennis Dalessandro n = 0;
7877b1e2099SDennis Dalessandro while (off >= mr->map[m]->segs[n].length) {
7887b1e2099SDennis Dalessandro off -= mr->map[m]->segs[n].length;
7897b1e2099SDennis Dalessandro n++;
7907b1e2099SDennis Dalessandro if (n >= RVT_SEGSZ) {
7917b1e2099SDennis Dalessandro m++;
7927b1e2099SDennis Dalessandro n = 0;
7937b1e2099SDennis Dalessandro }
7947b1e2099SDennis Dalessandro }
7957b1e2099SDennis Dalessandro }
7967b1e2099SDennis Dalessandro isge->mr = mr;
7977b1e2099SDennis Dalessandro isge->vaddr = mr->map[m]->segs[n].vaddr + off;
7987b1e2099SDennis Dalessandro isge->length = mr->map[m]->segs[n].length - off;
7997b1e2099SDennis Dalessandro isge->sge_length = sge->length;
8007b1e2099SDennis Dalessandro isge->m = m;
8017b1e2099SDennis Dalessandro isge->n = n;
8027b1e2099SDennis Dalessandro ok:
80314fe13fcSMike Marciniszyn trace_rvt_sge_new(isge, sge);
8047b1e2099SDennis Dalessandro return 1;
805b58fc804SMike Marciniszyn bail_unref:
806b58fc804SMike Marciniszyn rvt_put_mr(mr);
8077b1e2099SDennis Dalessandro bail:
8087b1e2099SDennis Dalessandro rcu_read_unlock();
80914fe13fcSMike Marciniszyn return -EINVAL;
8107b1e2099SDennis Dalessandro }
8117b1e2099SDennis Dalessandro EXPORT_SYMBOL(rvt_lkey_ok);
8127b1e2099SDennis Dalessandro
8137b1e2099SDennis Dalessandro /**
8147b1e2099SDennis Dalessandro * rvt_rkey_ok - check the IB virtual address, length, and RKEY
8157b1e2099SDennis Dalessandro * @qp: qp for validation
8167b1e2099SDennis Dalessandro * @sge: SGE state
8177b1e2099SDennis Dalessandro * @len: length of data
8187b1e2099SDennis Dalessandro * @vaddr: virtual address to place data
8197b1e2099SDennis Dalessandro * @rkey: rkey to check
8207b1e2099SDennis Dalessandro * @acc: access flags
8217b1e2099SDennis Dalessandro *
82290793f71SDennis Dalessandro * Return: 1 if successful, otherwise 0.
8237b1e2099SDennis Dalessandro *
8247b1e2099SDennis Dalessandro * increments the reference count upon success
8257b1e2099SDennis Dalessandro */
rvt_rkey_ok(struct rvt_qp * qp,struct rvt_sge * sge,u32 len,u64 vaddr,u32 rkey,int acc)8267b1e2099SDennis Dalessandro int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
8277b1e2099SDennis Dalessandro u32 len, u64 vaddr, u32 rkey, int acc)
8287b1e2099SDennis Dalessandro {
8297b1e2099SDennis Dalessandro struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
8307b1e2099SDennis Dalessandro struct rvt_lkey_table *rkt = &dev->lkey_table;
8317b1e2099SDennis Dalessandro struct rvt_mregion *mr;
8327b1e2099SDennis Dalessandro unsigned n, m;
8337b1e2099SDennis Dalessandro size_t off;
8347b1e2099SDennis Dalessandro
8357b1e2099SDennis Dalessandro /*
8367b1e2099SDennis Dalessandro * We use RKEY == zero for kernel virtual addresses
8375a7a9e03SChristoph Hellwig * (see rvt_get_dma_mr()).
8387b1e2099SDennis Dalessandro */
8397b1e2099SDennis Dalessandro rcu_read_lock();
8407b1e2099SDennis Dalessandro if (rkey == 0) {
8417b1e2099SDennis Dalessandro struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
8427b1e2099SDennis Dalessandro struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
8437b1e2099SDennis Dalessandro
8447b1e2099SDennis Dalessandro if (pd->user)
8457b1e2099SDennis Dalessandro goto bail;
8467b1e2099SDennis Dalessandro mr = rcu_dereference(rdi->dma_mr);
8477b1e2099SDennis Dalessandro if (!mr)
8487b1e2099SDennis Dalessandro goto bail;
849f84dfa26SSebastian Sanchez rvt_get_mr(mr);
8507b1e2099SDennis Dalessandro rcu_read_unlock();
8517b1e2099SDennis Dalessandro
8527b1e2099SDennis Dalessandro sge->mr = mr;
8537b1e2099SDennis Dalessandro sge->vaddr = (void *)vaddr;
8547b1e2099SDennis Dalessandro sge->length = len;
8557b1e2099SDennis Dalessandro sge->sge_length = len;
8567b1e2099SDennis Dalessandro sge->m = 0;
8577b1e2099SDennis Dalessandro sge->n = 0;
8587b1e2099SDennis Dalessandro goto ok;
8597b1e2099SDennis Dalessandro }
8607b1e2099SDennis Dalessandro
86199f80d2fSMike Marciniszyn mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
862b58fc804SMike Marciniszyn if (!mr)
8637b1e2099SDennis Dalessandro goto bail;
864b58fc804SMike Marciniszyn rvt_get_mr(mr);
865b58fc804SMike Marciniszyn /* insure mr read is before test */
866b58fc804SMike Marciniszyn if (!READ_ONCE(mr->lkey_published))
867b58fc804SMike Marciniszyn goto bail_unref;
868b58fc804SMike Marciniszyn if (unlikely(atomic_read(&mr->lkey_invalid) ||
869b58fc804SMike Marciniszyn mr->lkey != rkey || qp->ibqp.pd != mr->pd))
870b58fc804SMike Marciniszyn goto bail_unref;
8717b1e2099SDennis Dalessandro
8727b1e2099SDennis Dalessandro off = vaddr - mr->iova;
8737b1e2099SDennis Dalessandro if (unlikely(vaddr < mr->iova || off + len > mr->length ||
8747b1e2099SDennis Dalessandro (mr->access_flags & acc) == 0))
875b58fc804SMike Marciniszyn goto bail_unref;
8767b1e2099SDennis Dalessandro rcu_read_unlock();
8777b1e2099SDennis Dalessandro
8787b1e2099SDennis Dalessandro off += mr->offset;
8797b1e2099SDennis Dalessandro if (mr->page_shift) {
8807b1e2099SDennis Dalessandro /*
8817b1e2099SDennis Dalessandro * page sizes are uniform power of 2 so no loop is necessary
8827b1e2099SDennis Dalessandro * entries_spanned_by_off is the number of times the loop below
8837b1e2099SDennis Dalessandro * would have executed.
8847b1e2099SDennis Dalessandro */
8857b1e2099SDennis Dalessandro size_t entries_spanned_by_off;
8867b1e2099SDennis Dalessandro
8877b1e2099SDennis Dalessandro entries_spanned_by_off = off >> mr->page_shift;
8887b1e2099SDennis Dalessandro off -= (entries_spanned_by_off << mr->page_shift);
8897b1e2099SDennis Dalessandro m = entries_spanned_by_off / RVT_SEGSZ;
8907b1e2099SDennis Dalessandro n = entries_spanned_by_off % RVT_SEGSZ;
8917b1e2099SDennis Dalessandro } else {
8927b1e2099SDennis Dalessandro m = 0;
8937b1e2099SDennis Dalessandro n = 0;
8947b1e2099SDennis Dalessandro while (off >= mr->map[m]->segs[n].length) {
8957b1e2099SDennis Dalessandro off -= mr->map[m]->segs[n].length;
8967b1e2099SDennis Dalessandro n++;
8977b1e2099SDennis Dalessandro if (n >= RVT_SEGSZ) {
8987b1e2099SDennis Dalessandro m++;
8997b1e2099SDennis Dalessandro n = 0;
9007b1e2099SDennis Dalessandro }
9017b1e2099SDennis Dalessandro }
9027b1e2099SDennis Dalessandro }
9037b1e2099SDennis Dalessandro sge->mr = mr;
9047b1e2099SDennis Dalessandro sge->vaddr = mr->map[m]->segs[n].vaddr + off;
9057b1e2099SDennis Dalessandro sge->length = mr->map[m]->segs[n].length - off;
9067b1e2099SDennis Dalessandro sge->sge_length = len;
9077b1e2099SDennis Dalessandro sge->m = m;
9087b1e2099SDennis Dalessandro sge->n = n;
9097b1e2099SDennis Dalessandro ok:
9107b1e2099SDennis Dalessandro return 1;
911b58fc804SMike Marciniszyn bail_unref:
912b58fc804SMike Marciniszyn rvt_put_mr(mr);
9137b1e2099SDennis Dalessandro bail:
9147b1e2099SDennis Dalessandro rcu_read_unlock();
9157b1e2099SDennis Dalessandro return 0;
9167b1e2099SDennis Dalessandro }
9177b1e2099SDennis Dalessandro EXPORT_SYMBOL(rvt_rkey_ok);
918