xref: /openbmc/linux/drivers/infiniband/hw/hns/hns_roce_mr.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/vmalloc.h>
35 #include <rdma/ib_umem.h>
36 #include <linux/math.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
40 
hw_index_to_key(int ind)41 static u32 hw_index_to_key(int ind)
42 {
43 	return ((u32)ind >> 24) | ((u32)ind << 8);
44 }
45 
key_to_hw_index(u32 key)46 unsigned long key_to_hw_index(u32 key)
47 {
48 	return (key << 24) | (key >> 8);
49 }
50 
alloc_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)51 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
52 {
53 	struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
54 	struct ib_device *ibdev = &hr_dev->ib_dev;
55 	int err;
56 	int id;
57 
58 	/* Allocate a key for mr from mr_table */
59 	id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
60 			     GFP_KERNEL);
61 	if (id < 0) {
62 		ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
63 		return -ENOMEM;
64 	}
65 
66 	mr->key = hw_index_to_key(id); /* MR key */
67 
68 	err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
69 				 (unsigned long)id);
70 	if (err) {
71 		ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
72 		goto err_free_bitmap;
73 	}
74 
75 	return 0;
76 err_free_bitmap:
77 	ida_free(&mtpt_ida->ida, id);
78 	return err;
79 }
80 
free_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)81 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
82 {
83 	unsigned long obj = key_to_hw_index(mr->key);
84 
85 	hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
86 	ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
87 }
88 
alloc_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,struct ib_udata * udata,u64 start)89 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
90 			struct ib_udata *udata, u64 start)
91 {
92 	struct ib_device *ibdev = &hr_dev->ib_dev;
93 	bool is_fast = mr->type == MR_TYPE_FRMR;
94 	struct hns_roce_buf_attr buf_attr = {};
95 	int err;
96 
97 	mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
98 	buf_attr.page_shift = is_fast ? PAGE_SHIFT :
99 			      hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
100 	buf_attr.region[0].size = mr->size;
101 	buf_attr.region[0].hopnum = mr->pbl_hop_num;
102 	buf_attr.region_count = 1;
103 	buf_attr.user_access = mr->access;
104 	/* fast MR's buffer is alloced before mapping, not at creation */
105 	buf_attr.mtt_only = is_fast;
106 
107 	err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
108 				  hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
109 				  udata, start);
110 	if (err)
111 		ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
112 	else
113 		mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
114 
115 	return err;
116 }
117 
free_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)118 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
119 {
120 	hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
121 }
122 
hns_roce_mr_free(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)123 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
124 {
125 	struct ib_device *ibdev = &hr_dev->ib_dev;
126 	int ret;
127 
128 	if (mr->enabled) {
129 		ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
130 					      key_to_hw_index(mr->key) &
131 					      (hr_dev->caps.num_mtpts - 1));
132 		if (ret)
133 			ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n",
134 					       ret);
135 	}
136 
137 	free_mr_pbl(hr_dev, mr);
138 	free_mr_key(hr_dev, mr);
139 }
140 
hns_roce_mr_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)141 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
142 			      struct hns_roce_mr *mr)
143 {
144 	unsigned long mtpt_idx = key_to_hw_index(mr->key);
145 	struct hns_roce_cmd_mailbox *mailbox;
146 	struct device *dev = hr_dev->dev;
147 	int ret;
148 
149 	/* Allocate mailbox memory */
150 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
151 	if (IS_ERR(mailbox))
152 		return PTR_ERR(mailbox);
153 
154 	if (mr->type != MR_TYPE_FRMR)
155 		ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
156 	else
157 		ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
158 	if (ret) {
159 		dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
160 		goto err_page;
161 	}
162 
163 	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
164 				     mtpt_idx & (hr_dev->caps.num_mtpts - 1));
165 	if (ret) {
166 		dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
167 		goto err_page;
168 	}
169 
170 	mr->enabled = 1;
171 
172 err_page:
173 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
174 
175 	return ret;
176 }
177 
hns_roce_init_mr_table(struct hns_roce_dev * hr_dev)178 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
179 {
180 	struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
181 
182 	ida_init(&mtpt_ida->ida);
183 	mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
184 	mtpt_ida->min = hr_dev->caps.reserved_mrws;
185 }
186 
hns_roce_get_dma_mr(struct ib_pd * pd,int acc)187 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
188 {
189 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
190 	struct hns_roce_mr *mr;
191 	int ret;
192 
193 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
194 	if (!mr)
195 		return  ERR_PTR(-ENOMEM);
196 
197 	mr->type = MR_TYPE_DMA;
198 	mr->pd = to_hr_pd(pd)->pdn;
199 	mr->access = acc;
200 
201 	/* Allocate memory region key */
202 	hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
203 	ret = alloc_mr_key(hr_dev, mr);
204 	if (ret)
205 		goto err_free;
206 
207 	ret = hns_roce_mr_enable(hr_dev, mr);
208 	if (ret)
209 		goto err_mr;
210 
211 	mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
212 
213 	return &mr->ibmr;
214 err_mr:
215 	free_mr_key(hr_dev, mr);
216 
217 err_free:
218 	kfree(mr);
219 	return ERR_PTR(ret);
220 }
221 
hns_roce_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)222 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
223 				   u64 virt_addr, int access_flags,
224 				   struct ib_udata *udata)
225 {
226 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
227 	struct hns_roce_mr *mr;
228 	int ret;
229 
230 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
231 	if (!mr)
232 		return ERR_PTR(-ENOMEM);
233 
234 	mr->iova = virt_addr;
235 	mr->size = length;
236 	mr->pd = to_hr_pd(pd)->pdn;
237 	mr->access = access_flags;
238 	mr->type = MR_TYPE_MR;
239 
240 	ret = alloc_mr_key(hr_dev, mr);
241 	if (ret)
242 		goto err_alloc_mr;
243 
244 	ret = alloc_mr_pbl(hr_dev, mr, udata, start);
245 	if (ret)
246 		goto err_alloc_key;
247 
248 	ret = hns_roce_mr_enable(hr_dev, mr);
249 	if (ret)
250 		goto err_alloc_pbl;
251 
252 	mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
253 
254 	return &mr->ibmr;
255 
256 err_alloc_pbl:
257 	free_mr_pbl(hr_dev, mr);
258 err_alloc_key:
259 	free_mr_key(hr_dev, mr);
260 err_alloc_mr:
261 	kfree(mr);
262 	return ERR_PTR(ret);
263 }
264 
hns_roce_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * pd,struct ib_udata * udata)265 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
266 				     u64 length, u64 virt_addr,
267 				     int mr_access_flags, struct ib_pd *pd,
268 				     struct ib_udata *udata)
269 {
270 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
271 	struct ib_device *ib_dev = &hr_dev->ib_dev;
272 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
273 	struct hns_roce_cmd_mailbox *mailbox;
274 	unsigned long mtpt_idx;
275 	int ret;
276 
277 	if (!mr->enabled)
278 		return ERR_PTR(-EINVAL);
279 
280 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
281 	if (IS_ERR(mailbox))
282 		return ERR_CAST(mailbox);
283 
284 	mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
285 
286 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
287 				mtpt_idx);
288 	if (ret)
289 		goto free_cmd_mbox;
290 
291 	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
292 				      mtpt_idx);
293 	if (ret)
294 		ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
295 
296 	mr->enabled = 0;
297 	mr->iova = virt_addr;
298 	mr->size = length;
299 
300 	if (flags & IB_MR_REREG_PD)
301 		mr->pd = to_hr_pd(pd)->pdn;
302 
303 	if (flags & IB_MR_REREG_ACCESS)
304 		mr->access = mr_access_flags;
305 
306 	if (flags & IB_MR_REREG_TRANS) {
307 		free_mr_pbl(hr_dev, mr);
308 		ret = alloc_mr_pbl(hr_dev, mr, udata, start);
309 		if (ret) {
310 			ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
311 				  ret);
312 			goto free_cmd_mbox;
313 		}
314 	}
315 
316 	ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
317 	if (ret) {
318 		ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
319 		goto free_cmd_mbox;
320 	}
321 
322 	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
323 				     mtpt_idx);
324 	if (ret) {
325 		ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
326 		goto free_cmd_mbox;
327 	}
328 
329 	mr->enabled = 1;
330 
331 free_cmd_mbox:
332 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
333 
334 	if (ret)
335 		return ERR_PTR(ret);
336 	return NULL;
337 }
338 
hns_roce_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)339 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
340 {
341 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
342 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
343 
344 	if (hr_dev->hw->dereg_mr)
345 		hr_dev->hw->dereg_mr(hr_dev);
346 
347 	hns_roce_mr_free(hr_dev, mr);
348 	kfree(mr);
349 
350 	return 0;
351 }
352 
hns_roce_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)353 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
354 				u32 max_num_sg)
355 {
356 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
357 	struct device *dev = hr_dev->dev;
358 	struct hns_roce_mr *mr;
359 	int ret;
360 
361 	if (mr_type != IB_MR_TYPE_MEM_REG)
362 		return ERR_PTR(-EINVAL);
363 
364 	if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
365 		dev_err(dev, "max_num_sg larger than %d\n",
366 			HNS_ROCE_FRMR_MAX_PA);
367 		return ERR_PTR(-EINVAL);
368 	}
369 
370 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
371 	if (!mr)
372 		return ERR_PTR(-ENOMEM);
373 
374 	mr->type = MR_TYPE_FRMR;
375 	mr->pd = to_hr_pd(pd)->pdn;
376 	mr->size = max_num_sg * (1 << PAGE_SHIFT);
377 
378 	/* Allocate memory region key */
379 	ret = alloc_mr_key(hr_dev, mr);
380 	if (ret)
381 		goto err_free;
382 
383 	ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
384 	if (ret)
385 		goto err_key;
386 
387 	ret = hns_roce_mr_enable(hr_dev, mr);
388 	if (ret)
389 		goto err_pbl;
390 
391 	mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
392 	mr->ibmr.length = mr->size;
393 
394 	return &mr->ibmr;
395 
396 err_pbl:
397 	free_mr_pbl(hr_dev, mr);
398 err_key:
399 	free_mr_key(hr_dev, mr);
400 err_free:
401 	kfree(mr);
402 	return ERR_PTR(ret);
403 }
404 
hns_roce_set_page(struct ib_mr * ibmr,u64 addr)405 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
406 {
407 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
408 
409 	if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
410 		mr->page_list[mr->npages++] = addr;
411 		return 0;
412 	}
413 
414 	return -ENOBUFS;
415 }
416 
hns_roce_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset_p)417 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
418 		       unsigned int *sg_offset_p)
419 {
420 	unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
421 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
422 	struct ib_device *ibdev = &hr_dev->ib_dev;
423 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
424 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
425 	int ret, sg_num = 0;
426 
427 	if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
428 	    ibmr->page_size < HNS_HW_PAGE_SIZE ||
429 	    ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
430 		return sg_num;
431 
432 	mr->npages = 0;
433 	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
434 				 sizeof(dma_addr_t), GFP_KERNEL);
435 	if (!mr->page_list)
436 		return sg_num;
437 
438 	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page);
439 	if (sg_num < 1) {
440 		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
441 			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
442 		goto err_page_list;
443 	}
444 
445 	mtr->hem_cfg.region[0].offset = 0;
446 	mtr->hem_cfg.region[0].count = mr->npages;
447 	mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
448 	mtr->hem_cfg.region_count = 1;
449 	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
450 	if (ret) {
451 		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
452 		sg_num = 0;
453 	} else {
454 		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
455 	}
456 
457 err_page_list:
458 	kvfree(mr->page_list);
459 	mr->page_list = NULL;
460 
461 	return sg_num;
462 }
463 
hns_roce_mw_free(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)464 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
465 			     struct hns_roce_mw *mw)
466 {
467 	struct device *dev = hr_dev->dev;
468 	int ret;
469 
470 	if (mw->enabled) {
471 		ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
472 					      key_to_hw_index(mw->rkey) &
473 					      (hr_dev->caps.num_mtpts - 1));
474 		if (ret)
475 			dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
476 
477 		hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
478 				   key_to_hw_index(mw->rkey));
479 	}
480 
481 	ida_free(&hr_dev->mr_table.mtpt_ida.ida,
482 		 (int)key_to_hw_index(mw->rkey));
483 }
484 
hns_roce_mw_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)485 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
486 			      struct hns_roce_mw *mw)
487 {
488 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
489 	struct hns_roce_cmd_mailbox *mailbox;
490 	struct device *dev = hr_dev->dev;
491 	unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
492 	int ret;
493 
494 	/* prepare HEM entry memory */
495 	ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
496 	if (ret)
497 		return ret;
498 
499 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
500 	if (IS_ERR(mailbox)) {
501 		ret = PTR_ERR(mailbox);
502 		goto err_table;
503 	}
504 
505 	ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
506 	if (ret) {
507 		dev_err(dev, "MW write mtpt fail!\n");
508 		goto err_page;
509 	}
510 
511 	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
512 				     mtpt_idx & (hr_dev->caps.num_mtpts - 1));
513 	if (ret) {
514 		dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
515 		goto err_page;
516 	}
517 
518 	mw->enabled = 1;
519 
520 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
521 
522 	return 0;
523 
524 err_page:
525 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
526 
527 err_table:
528 	hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
529 
530 	return ret;
531 }
532 
hns_roce_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)533 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
534 {
535 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
536 	struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
537 	struct ib_device *ibdev = &hr_dev->ib_dev;
538 	struct hns_roce_mw *mw = to_hr_mw(ibmw);
539 	int ret;
540 	int id;
541 
542 	/* Allocate a key for mw from mr_table */
543 	id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
544 			     GFP_KERNEL);
545 	if (id < 0) {
546 		ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
547 		return -ENOMEM;
548 	}
549 
550 	mw->rkey = hw_index_to_key(id);
551 
552 	ibmw->rkey = mw->rkey;
553 	mw->pdn = to_hr_pd(ibmw->pd)->pdn;
554 	mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
555 	mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
556 	mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
557 
558 	ret = hns_roce_mw_enable(hr_dev, mw);
559 	if (ret)
560 		goto err_mw;
561 
562 	return 0;
563 
564 err_mw:
565 	hns_roce_mw_free(hr_dev, mw);
566 	return ret;
567 }
568 
hns_roce_dealloc_mw(struct ib_mw * ibmw)569 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
570 {
571 	struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
572 	struct hns_roce_mw *mw = to_hr_mw(ibmw);
573 
574 	hns_roce_mw_free(hr_dev, mw);
575 	return 0;
576 }
577 
mtr_map_region(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_region * region,dma_addr_t * pages,int max_count)578 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
579 			  struct hns_roce_buf_region *region, dma_addr_t *pages,
580 			  int max_count)
581 {
582 	int count, npage;
583 	int offset, end;
584 	__le64 *mtts;
585 	u64 addr;
586 	int i;
587 
588 	offset = region->offset;
589 	end = offset + region->count;
590 	npage = 0;
591 	while (offset < end && npage < max_count) {
592 		count = 0;
593 		mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
594 						  offset, &count);
595 		if (!mtts)
596 			return -ENOBUFS;
597 
598 		for (i = 0; i < count && npage < max_count; i++) {
599 			addr = pages[npage];
600 
601 			mtts[i] = cpu_to_le64(addr);
602 			npage++;
603 		}
604 		offset += count;
605 	}
606 
607 	return npage;
608 }
609 
mtr_has_mtt(struct hns_roce_buf_attr * attr)610 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
611 {
612 	int i;
613 
614 	for (i = 0; i < attr->region_count; i++)
615 		if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
616 		    attr->region[i].hopnum > 0)
617 			return true;
618 
619 	/* because the mtr only one root base address, when hopnum is 0 means
620 	 * root base address equals the first buffer address, thus all alloced
621 	 * memory must in a continuous space accessed by direct mode.
622 	 */
623 	return false;
624 }
625 
mtr_bufs_size(struct hns_roce_buf_attr * attr)626 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
627 {
628 	size_t size = 0;
629 	int i;
630 
631 	for (i = 0; i < attr->region_count; i++)
632 		size += attr->region[i].size;
633 
634 	return size;
635 }
636 
637 /*
638  * check the given pages in continuous address space
639  * Returns 0 on success, or the error page num.
640  */
mtr_check_direct_pages(dma_addr_t * pages,int page_count,unsigned int page_shift)641 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
642 					 unsigned int page_shift)
643 {
644 	size_t page_size = 1 << page_shift;
645 	int i;
646 
647 	for (i = 1; i < page_count; i++)
648 		if (pages[i] - pages[i - 1] != page_size)
649 			return i;
650 
651 	return 0;
652 }
653 
mtr_free_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)654 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
655 {
656 	/* release user buffers */
657 	if (mtr->umem) {
658 		ib_umem_release(mtr->umem);
659 		mtr->umem = NULL;
660 	}
661 
662 	/* release kernel buffers */
663 	if (mtr->kmem) {
664 		hns_roce_buf_free(hr_dev, mtr->kmem);
665 		mtr->kmem = NULL;
666 	}
667 }
668 
mtr_alloc_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,struct ib_udata * udata,unsigned long user_addr)669 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
670 			  struct hns_roce_buf_attr *buf_attr,
671 			  struct ib_udata *udata, unsigned long user_addr)
672 {
673 	struct ib_device *ibdev = &hr_dev->ib_dev;
674 	size_t total_size;
675 
676 	total_size = mtr_bufs_size(buf_attr);
677 
678 	if (udata) {
679 		mtr->kmem = NULL;
680 		mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
681 					buf_attr->user_access);
682 		if (IS_ERR_OR_NULL(mtr->umem)) {
683 			ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
684 				  PTR_ERR(mtr->umem));
685 			return -ENOMEM;
686 		}
687 	} else {
688 		mtr->umem = NULL;
689 		mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
690 					       buf_attr->page_shift,
691 					       mtr->hem_cfg.is_direct ?
692 					       HNS_ROCE_BUF_DIRECT : 0);
693 		if (IS_ERR(mtr->kmem)) {
694 			ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
695 				  PTR_ERR(mtr->kmem));
696 			return PTR_ERR(mtr->kmem);
697 		}
698 	}
699 
700 	return 0;
701 }
702 
mtr_map_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,int page_count,unsigned int page_shift)703 static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
704 			int page_count, unsigned int page_shift)
705 {
706 	struct ib_device *ibdev = &hr_dev->ib_dev;
707 	dma_addr_t *pages;
708 	int npage;
709 	int ret;
710 
711 	/* alloc a tmp array to store buffer's dma address */
712 	pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
713 	if (!pages)
714 		return -ENOMEM;
715 
716 	if (mtr->umem)
717 		npage = hns_roce_get_umem_bufs(pages, page_count,
718 					       mtr->umem, page_shift);
719 	else
720 		npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
721 					       mtr->kmem, page_shift);
722 
723 	if (npage != page_count) {
724 		ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
725 			  page_count);
726 		ret = -ENOBUFS;
727 		goto err_alloc_list;
728 	}
729 
730 	if (mtr->hem_cfg.is_direct && npage > 1) {
731 		ret = mtr_check_direct_pages(pages, npage, page_shift);
732 		if (ret) {
733 			ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
734 				  mtr->umem ? "umtr" : "kmtr", ret, npage);
735 			ret = -ENOBUFS;
736 			goto err_alloc_list;
737 		}
738 	}
739 
740 	ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
741 	if (ret)
742 		ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
743 
744 err_alloc_list:
745 	kvfree(pages);
746 
747 	return ret;
748 }
749 
hns_roce_mtr_map(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,unsigned int page_cnt)750 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
751 		     dma_addr_t *pages, unsigned int page_cnt)
752 {
753 	struct ib_device *ibdev = &hr_dev->ib_dev;
754 	struct hns_roce_buf_region *r;
755 	unsigned int i, mapped_cnt;
756 	int ret = 0;
757 
758 	/*
759 	 * Only use the first page address as root ba when hopnum is 0, this
760 	 * is because the addresses of all pages are consecutive in this case.
761 	 */
762 	if (mtr->hem_cfg.is_direct) {
763 		mtr->hem_cfg.root_ba = pages[0];
764 		return 0;
765 	}
766 
767 	for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
768 	     mapped_cnt < page_cnt; i++) {
769 		r = &mtr->hem_cfg.region[i];
770 
771 		if (r->offset + r->count > page_cnt) {
772 			ret = -EINVAL;
773 			ibdev_err(ibdev,
774 				  "failed to check mtr%u count %u + %u > %u.\n",
775 				  i, r->offset, r->count, page_cnt);
776 			return ret;
777 		}
778 
779 		ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
780 				     page_cnt - mapped_cnt);
781 		if (ret < 0) {
782 			ibdev_err(ibdev,
783 				  "failed to map mtr%u offset %u, ret = %d.\n",
784 				  i, r->offset, ret);
785 			return ret;
786 		}
787 		mapped_cnt += ret;
788 		ret = 0;
789 	}
790 
791 	if (mapped_cnt < page_cnt) {
792 		ret = -ENOBUFS;
793 		ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
794 			  mapped_cnt, page_cnt);
795 	}
796 
797 	return ret;
798 }
799 
hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg * cfg,u32 start_index,u64 * mtt_buf,int mtt_cnt)800 static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
801 					u32 start_index, u64 *mtt_buf,
802 					int mtt_cnt)
803 {
804 	int mtt_count;
805 	int total = 0;
806 	u32 npage;
807 	u64 addr;
808 
809 	if (mtt_cnt > cfg->region_count)
810 		return -EINVAL;
811 
812 	for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
813 	     mtt_count++) {
814 		npage = cfg->region[mtt_count].offset;
815 		if (npage < start_index)
816 			continue;
817 
818 		addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
819 		mtt_buf[total] = addr;
820 
821 		total++;
822 	}
823 
824 	if (!total)
825 		return -ENOENT;
826 
827 	return 0;
828 }
829 
hns_roce_get_mhop_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 start_index,u64 * mtt_buf,int mtt_cnt)830 static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
831 				 struct hns_roce_mtr *mtr, u32 start_index,
832 				 u64 *mtt_buf, int mtt_cnt)
833 {
834 	int left = mtt_cnt;
835 	int total = 0;
836 	int mtt_count;
837 	__le64 *mtts;
838 	u32 npage;
839 
840 	while (left > 0) {
841 		mtt_count = 0;
842 		mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
843 						  start_index + total,
844 						  &mtt_count);
845 		if (!mtts || !mtt_count)
846 			break;
847 
848 		npage = min(mtt_count, left);
849 		left -= npage;
850 		for (mtt_count = 0; mtt_count < npage; mtt_count++)
851 			mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
852 	}
853 
854 	if (!total)
855 		return -ENOENT;
856 
857 	return 0;
858 }
859 
hns_roce_mtr_find(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 offset,u64 * mtt_buf,int mtt_max)860 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
861 		      u32 offset, u64 *mtt_buf, int mtt_max)
862 {
863 	struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
864 	u32 start_index;
865 	int ret;
866 
867 	if (!mtt_buf || mtt_max < 1)
868 		return -EINVAL;
869 
870 	/* no mtt memory in direct mode, so just return the buffer address */
871 	if (cfg->is_direct) {
872 		start_index = offset >> HNS_HW_PAGE_SHIFT;
873 		ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
874 						   mtt_buf, mtt_max);
875 	} else {
876 		start_index = offset >> cfg->buf_pg_shift;
877 		ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
878 					    mtt_buf, mtt_max);
879 	}
880 	return ret;
881 }
882 
mtr_init_buf_cfg(struct hns_roce_dev * hr_dev,struct hns_roce_buf_attr * attr,struct hns_roce_hem_cfg * cfg,unsigned int * buf_page_shift,u64 unalinged_size)883 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
884 			    struct hns_roce_buf_attr *attr,
885 			    struct hns_roce_hem_cfg *cfg,
886 			    unsigned int *buf_page_shift, u64 unalinged_size)
887 {
888 	struct hns_roce_buf_region *r;
889 	u64 first_region_padding;
890 	int page_cnt, region_cnt;
891 	unsigned int page_shift;
892 	size_t buf_size;
893 
894 	/* If mtt is disabled, all pages must be within a continuous range */
895 	cfg->is_direct = !mtr_has_mtt(attr);
896 	buf_size = mtr_bufs_size(attr);
897 	if (cfg->is_direct) {
898 		/* When HEM buffer uses 0-level addressing, the page size is
899 		 * equal to the whole buffer size, and we split the buffer into
900 		 * small pages which is used to check whether the adjacent
901 		 * units are in the continuous space and its size is fixed to
902 		 * 4K based on hns ROCEE's requirement.
903 		 */
904 		page_shift = HNS_HW_PAGE_SHIFT;
905 
906 		/* The ROCEE requires the page size to be 4K * 2 ^ N. */
907 		cfg->buf_pg_count = 1;
908 		cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
909 			order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
910 		first_region_padding = 0;
911 	} else {
912 		page_shift = attr->page_shift;
913 		cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
914 						 1 << page_shift);
915 		cfg->buf_pg_shift = page_shift;
916 		first_region_padding = unalinged_size;
917 	}
918 
919 	/* Convert buffer size to page index and page count for each region and
920 	 * the buffer's offset needs to be appended to the first region.
921 	 */
922 	for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
923 	     region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
924 		r = &cfg->region[region_cnt];
925 		r->offset = page_cnt;
926 		buf_size = hr_hw_page_align(attr->region[region_cnt].size +
927 					    first_region_padding);
928 		r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
929 		first_region_padding = 0;
930 		page_cnt += r->count;
931 		r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
932 					     r->count);
933 	}
934 
935 	cfg->region_count = region_cnt;
936 	*buf_page_shift = page_shift;
937 
938 	return page_cnt;
939 }
940 
cal_pages_per_l1ba(unsigned int ba_per_bt,unsigned int hopnum)941 static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
942 {
943 	return int_pow(ba_per_bt, hopnum - 1);
944 }
945 
cal_best_bt_pg_sz(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int pg_shift)946 static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
947 				      struct hns_roce_mtr *mtr,
948 				      unsigned int pg_shift)
949 {
950 	unsigned long cap = hr_dev->caps.page_size_cap;
951 	struct hns_roce_buf_region *re;
952 	unsigned int pgs_per_l1ba;
953 	unsigned int ba_per_bt;
954 	unsigned int ba_num;
955 	int i;
956 
957 	for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
958 		if (!(BIT(pg_shift) & cap))
959 			continue;
960 
961 		ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
962 		ba_num = 0;
963 		for (i = 0; i < mtr->hem_cfg.region_count; i++) {
964 			re = &mtr->hem_cfg.region[i];
965 			if (re->hopnum == 0)
966 				continue;
967 
968 			pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
969 			ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
970 		}
971 
972 		if (ba_num <= ba_per_bt)
973 			return pg_shift;
974 	}
975 
976 	return 0;
977 }
978 
mtr_alloc_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int ba_page_shift)979 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
980 			 unsigned int ba_page_shift)
981 {
982 	struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
983 	int ret;
984 
985 	hns_roce_hem_list_init(&mtr->hem_list);
986 	if (!cfg->is_direct) {
987 		ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
988 		if (!ba_page_shift)
989 			return -ERANGE;
990 
991 		ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
992 						cfg->region, cfg->region_count,
993 						ba_page_shift);
994 		if (ret)
995 			return ret;
996 		cfg->root_ba = mtr->hem_list.root_ba;
997 		cfg->ba_pg_shift = ba_page_shift;
998 	} else {
999 		cfg->ba_pg_shift = cfg->buf_pg_shift;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
mtr_free_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1005 static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1006 {
1007 	hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1008 }
1009 
1010 /**
1011  * hns_roce_mtr_create - Create hns memory translate region.
1012  *
1013  * @hr_dev: RoCE device struct pointer
1014  * @mtr: memory translate region
1015  * @buf_attr: buffer attribute for creating mtr
1016  * @ba_page_shift: page shift for multi-hop base address table
1017  * @udata: user space context, if it's NULL, means kernel space
1018  * @user_addr: userspace virtual address to start at
1019  */
hns_roce_mtr_create(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_page_shift,struct ib_udata * udata,unsigned long user_addr)1020 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1021 			struct hns_roce_buf_attr *buf_attr,
1022 			unsigned int ba_page_shift, struct ib_udata *udata,
1023 			unsigned long user_addr)
1024 {
1025 	struct ib_device *ibdev = &hr_dev->ib_dev;
1026 	unsigned int buf_page_shift = 0;
1027 	int buf_page_cnt;
1028 	int ret;
1029 
1030 	buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
1031 					&buf_page_shift,
1032 					udata ? user_addr & ~PAGE_MASK : 0);
1033 	if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
1034 		ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
1035 			  buf_page_cnt, buf_page_shift);
1036 		return -EINVAL;
1037 	}
1038 
1039 	ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
1040 	if (ret) {
1041 		ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
1042 		return ret;
1043 	}
1044 
1045 	/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
1046 	 * to finish the MTT configuration.
1047 	 */
1048 	if (buf_attr->mtt_only) {
1049 		mtr->umem = NULL;
1050 		mtr->kmem = NULL;
1051 		return 0;
1052 	}
1053 
1054 	ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
1055 	if (ret) {
1056 		ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
1057 		goto err_alloc_mtt;
1058 	}
1059 
1060 	/* Write buffer's dma address to MTT */
1061 	ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
1062 	if (ret)
1063 		ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
1064 	else
1065 		return 0;
1066 
1067 	mtr_free_bufs(hr_dev, mtr);
1068 err_alloc_mtt:
1069 	mtr_free_mtt(hr_dev, mtr);
1070 	return ret;
1071 }
1072 
hns_roce_mtr_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1073 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1074 {
1075 	/* release multi-hop addressing resource */
1076 	hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1077 
1078 	/* free buffers */
1079 	mtr_free_bufs(hr_dev, mtr);
1080 }
1081