1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <linux/list.h>
47 #include <linux/slab.h>
48 
49 #include "pvrdma.h"
50 
51 /**
52  * pvrdma_get_dma_mr - get a DMA memory region
53  * @pd: protection domain
54  * @acc: access flags
55  *
56  * @return: ib_mr pointer on success, otherwise returns an errno.
57  */
58 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
59 {
60 	struct pvrdma_dev *dev = to_vdev(pd->device);
61 	struct pvrdma_user_mr *mr;
62 	union pvrdma_cmd_req req;
63 	union pvrdma_cmd_resp rsp;
64 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
65 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
66 	int ret;
67 
68 	/* Support only LOCAL_WRITE flag for DMA MRs */
69 	if (acc & ~IB_ACCESS_LOCAL_WRITE) {
70 		dev_warn(&dev->pdev->dev,
71 			 "unsupported dma mr access flags %#x\n", acc);
72 		return ERR_PTR(-EOPNOTSUPP);
73 	}
74 
75 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
76 	if (!mr)
77 		return ERR_PTR(-ENOMEM);
78 
79 	memset(cmd, 0, sizeof(*cmd));
80 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
81 	cmd->pd_handle = to_vpd(pd)->pd_handle;
82 	cmd->access_flags = acc;
83 	cmd->flags = PVRDMA_MR_FLAG_DMA;
84 
85 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
86 	if (ret < 0) {
87 		dev_warn(&dev->pdev->dev,
88 			 "could not get DMA mem region, error: %d\n", ret);
89 		kfree(mr);
90 		return ERR_PTR(ret);
91 	}
92 
93 	mr->mmr.mr_handle = resp->mr_handle;
94 	mr->ibmr.lkey = resp->lkey;
95 	mr->ibmr.rkey = resp->rkey;
96 
97 	return &mr->ibmr;
98 }
99 
100 /**
101  * pvrdma_reg_user_mr - register a userspace memory region
102  * @pd: protection domain
103  * @start: starting address
104  * @length: length of region
105  * @virt_addr: I/O virtual address
106  * @access_flags: access flags for memory region
107  * @udata: user data
108  *
109  * @return: ib_mr pointer on success, otherwise returns an errno.
110  */
111 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
112 				 u64 virt_addr, int access_flags,
113 				 struct ib_udata *udata)
114 {
115 	struct pvrdma_dev *dev = to_vdev(pd->device);
116 	struct pvrdma_user_mr *mr = NULL;
117 	struct ib_umem *umem;
118 	union pvrdma_cmd_req req;
119 	union pvrdma_cmd_resp rsp;
120 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
121 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
122 	int ret;
123 
124 	if (length == 0 || length > dev->dsr->caps.max_mr_size) {
125 		dev_warn(&dev->pdev->dev, "invalid mem region length\n");
126 		return ERR_PTR(-EINVAL);
127 	}
128 
129 	umem = ib_umem_get(udata, start, length, access_flags, 0);
130 	if (IS_ERR(umem)) {
131 		dev_warn(&dev->pdev->dev,
132 			 "could not get umem for mem region\n");
133 		return ERR_CAST(umem);
134 	}
135 
136 	if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
137 		dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
138 			 umem->npages);
139 		ret = -EINVAL;
140 		goto err_umem;
141 	}
142 
143 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
144 	if (!mr) {
145 		ret = -ENOMEM;
146 		goto err_umem;
147 	}
148 
149 	mr->mmr.iova = virt_addr;
150 	mr->mmr.size = length;
151 	mr->umem = umem;
152 
153 	ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
154 	if (ret) {
155 		dev_warn(&dev->pdev->dev,
156 			 "could not allocate page directory\n");
157 		goto err_umem;
158 	}
159 
160 	ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
161 	if (ret)
162 		goto err_pdir;
163 
164 	memset(cmd, 0, sizeof(*cmd));
165 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
166 	cmd->start = start;
167 	cmd->length = length;
168 	cmd->pd_handle = to_vpd(pd)->pd_handle;
169 	cmd->access_flags = access_flags;
170 	cmd->nchunks = umem->npages;
171 	cmd->pdir_dma = mr->pdir.dir_dma;
172 
173 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
174 	if (ret < 0) {
175 		dev_warn(&dev->pdev->dev,
176 			 "could not register mem region, error: %d\n", ret);
177 		goto err_pdir;
178 	}
179 
180 	mr->mmr.mr_handle = resp->mr_handle;
181 	mr->ibmr.lkey = resp->lkey;
182 	mr->ibmr.rkey = resp->rkey;
183 
184 	return &mr->ibmr;
185 
186 err_pdir:
187 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
188 err_umem:
189 	ib_umem_release(umem);
190 	kfree(mr);
191 
192 	return ERR_PTR(ret);
193 }
194 
195 /**
196  * pvrdma_alloc_mr - allocate a memory region
197  * @pd: protection domain
198  * @mr_type: type of memory region
199  * @max_num_sg: maximum number of pages
200  *
201  * @return: ib_mr pointer on success, otherwise returns an errno.
202  */
203 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
204 			      u32 max_num_sg)
205 {
206 	struct pvrdma_dev *dev = to_vdev(pd->device);
207 	struct pvrdma_user_mr *mr;
208 	union pvrdma_cmd_req req;
209 	union pvrdma_cmd_resp rsp;
210 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
211 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
212 	int size = max_num_sg * sizeof(u64);
213 	int ret;
214 
215 	if (mr_type != IB_MR_TYPE_MEM_REG ||
216 	    max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
217 		return ERR_PTR(-EINVAL);
218 
219 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
220 	if (!mr)
221 		return ERR_PTR(-ENOMEM);
222 
223 	mr->pages = kzalloc(size, GFP_KERNEL);
224 	if (!mr->pages) {
225 		ret = -ENOMEM;
226 		goto freemr;
227 	}
228 
229 	ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
230 	if (ret) {
231 		dev_warn(&dev->pdev->dev,
232 			 "failed to allocate page dir for mr\n");
233 		ret = -ENOMEM;
234 		goto freepages;
235 	}
236 
237 	memset(cmd, 0, sizeof(*cmd));
238 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
239 	cmd->pd_handle = to_vpd(pd)->pd_handle;
240 	cmd->access_flags = 0;
241 	cmd->flags = PVRDMA_MR_FLAG_FRMR;
242 	cmd->nchunks = max_num_sg;
243 
244 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
245 	if (ret < 0) {
246 		dev_warn(&dev->pdev->dev,
247 			 "could not create FR mem region, error: %d\n", ret);
248 		goto freepdir;
249 	}
250 
251 	mr->max_pages = max_num_sg;
252 	mr->mmr.mr_handle = resp->mr_handle;
253 	mr->ibmr.lkey = resp->lkey;
254 	mr->ibmr.rkey = resp->rkey;
255 	mr->page_shift = PAGE_SHIFT;
256 	mr->umem = NULL;
257 
258 	return &mr->ibmr;
259 
260 freepdir:
261 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
262 freepages:
263 	kfree(mr->pages);
264 freemr:
265 	kfree(mr);
266 	return ERR_PTR(ret);
267 }
268 
269 /**
270  * pvrdma_dereg_mr - deregister a memory region
271  * @ibmr: memory region
272  *
273  * @return: 0 on success.
274  */
275 int pvrdma_dereg_mr(struct ib_mr *ibmr)
276 {
277 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
278 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
279 	union pvrdma_cmd_req req;
280 	struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
281 	int ret;
282 
283 	memset(cmd, 0, sizeof(*cmd));
284 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
285 	cmd->mr_handle = mr->mmr.mr_handle;
286 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
287 	if (ret < 0)
288 		dev_warn(&dev->pdev->dev,
289 			 "could not deregister mem region, error: %d\n", ret);
290 
291 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
292 	if (mr->umem)
293 		ib_umem_release(mr->umem);
294 
295 	kfree(mr->pages);
296 	kfree(mr);
297 
298 	return 0;
299 }
300 
301 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
302 {
303 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
304 
305 	if (mr->npages == mr->max_pages)
306 		return -ENOMEM;
307 
308 	mr->pages[mr->npages++] = addr;
309 	return 0;
310 }
311 
312 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
313 		     unsigned int *sg_offset)
314 {
315 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
316 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
317 	int ret;
318 
319 	mr->npages = 0;
320 
321 	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
322 	if (ret < 0)
323 		dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
324 
325 	return ret;
326 }
327