1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/vmalloc.h>
36 #include <linux/mm.h>
37 #include <linux/errno.h>
38 #include <asm/pgtable.h>
39 #include <rdma/uverbs_ioctl.h>
40 
41 #include "rxe.h"
42 #include "rxe_loc.h"
43 #include "rxe_queue.h"
44 
45 void rxe_mmap_release(struct kref *ref)
46 {
47 	struct rxe_mmap_info *ip = container_of(ref,
48 					struct rxe_mmap_info, ref);
49 	struct rxe_dev *rxe = to_rdev(ip->context->device);
50 
51 	spin_lock_bh(&rxe->pending_lock);
52 
53 	if (!list_empty(&ip->pending_mmaps))
54 		list_del(&ip->pending_mmaps);
55 
56 	spin_unlock_bh(&rxe->pending_lock);
57 
58 	vfree(ip->obj);		/* buf */
59 	kfree(ip);
60 }
61 
62 /*
63  * open and close keep track of how many times the memory region is mapped,
64  * to avoid releasing it.
65  */
66 static void rxe_vma_open(struct vm_area_struct *vma)
67 {
68 	struct rxe_mmap_info *ip = vma->vm_private_data;
69 
70 	kref_get(&ip->ref);
71 }
72 
73 static void rxe_vma_close(struct vm_area_struct *vma)
74 {
75 	struct rxe_mmap_info *ip = vma->vm_private_data;
76 
77 	kref_put(&ip->ref, rxe_mmap_release);
78 }
79 
80 static const struct vm_operations_struct rxe_vm_ops = {
81 	.open = rxe_vma_open,
82 	.close = rxe_vma_close,
83 };
84 
85 /**
86  * rxe_mmap - create a new mmap region
87  * @context: the IB user context of the process making the mmap() call
88  * @vma: the VMA to be initialized
89  * Return zero if the mmap is OK. Otherwise, return an errno.
90  */
91 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
92 {
93 	struct rxe_dev *rxe = to_rdev(context->device);
94 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
95 	unsigned long size = vma->vm_end - vma->vm_start;
96 	struct rxe_mmap_info *ip, *pp;
97 	int ret;
98 
99 	/*
100 	 * Search the device's list of objects waiting for a mmap call.
101 	 * Normally, this list is very short since a call to create a
102 	 * CQ, QP, or SRQ is soon followed by a call to mmap().
103 	 */
104 	spin_lock_bh(&rxe->pending_lock);
105 	list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
106 		if (context != ip->context || (__u64)offset != ip->info.offset)
107 			continue;
108 
109 		/* Don't allow a mmap larger than the object. */
110 		if (size > ip->info.size) {
111 			pr_err("mmap region is larger than the object!\n");
112 			spin_unlock_bh(&rxe->pending_lock);
113 			ret = -EINVAL;
114 			goto done;
115 		}
116 
117 		goto found_it;
118 	}
119 	pr_warn("unable to find pending mmap info\n");
120 	spin_unlock_bh(&rxe->pending_lock);
121 	ret = -EINVAL;
122 	goto done;
123 
124 found_it:
125 	list_del_init(&ip->pending_mmaps);
126 	spin_unlock_bh(&rxe->pending_lock);
127 
128 	ret = remap_vmalloc_range(vma, ip->obj, 0);
129 	if (ret) {
130 		pr_err("err %d from remap_vmalloc_range\n", ret);
131 		goto done;
132 	}
133 
134 	vma->vm_ops = &rxe_vm_ops;
135 	vma->vm_private_data = ip;
136 	rxe_vma_open(vma);
137 done:
138 	return ret;
139 }
140 
141 /*
142  * Allocate information for rxe_mmap
143  */
144 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
145 					   struct ib_udata *udata, void *obj)
146 {
147 	struct rxe_mmap_info *ip;
148 
149 	if (!udata)
150 		return ERR_PTR(-EINVAL);
151 
152 	ip = kmalloc(sizeof(*ip), GFP_KERNEL);
153 	if (!ip)
154 		return NULL;
155 
156 	size = PAGE_ALIGN(size);
157 
158 	spin_lock_bh(&rxe->mmap_offset_lock);
159 
160 	if (rxe->mmap_offset == 0)
161 		rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
162 
163 	ip->info.offset = rxe->mmap_offset;
164 	rxe->mmap_offset += ALIGN(size, SHMLBA);
165 
166 	spin_unlock_bh(&rxe->mmap_offset_lock);
167 
168 	INIT_LIST_HEAD(&ip->pending_mmaps);
169 	ip->info.size = size;
170 	ip->context =
171 		container_of(udata, struct uverbs_attr_bundle, driver_udata)
172 			->context;
173 	ip->obj = obj;
174 	kref_init(&ip->ref);
175 
176 	return ip;
177 }
178