xref: /openbmc/linux/drivers/infiniband/hw/mlx4/mr.c (revision b7019ac5)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/slab.h>
35 #include <rdma/ib_user_verbs.h>
36 
37 #include "mlx4_ib.h"
38 
39 static u32 convert_access(int acc)
40 {
41 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
42 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
43 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
44 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
45 	       (acc & IB_ACCESS_MW_BIND	      ? MLX4_PERM_BIND_MW      : 0) |
46 	       MLX4_PERM_LOCAL_READ;
47 }
48 
49 static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
50 {
51 	switch (type) {
52 	case IB_MW_TYPE_1:	return MLX4_MW_TYPE_1;
53 	case IB_MW_TYPE_2:	return MLX4_MW_TYPE_2;
54 	default:		return -1;
55 	}
56 }
57 
58 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
59 {
60 	struct mlx4_ib_mr *mr;
61 	int err;
62 
63 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
64 	if (!mr)
65 		return ERR_PTR(-ENOMEM);
66 
67 	err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
68 			    ~0ull, convert_access(acc), 0, 0, &mr->mmr);
69 	if (err)
70 		goto err_free;
71 
72 	err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
73 	if (err)
74 		goto err_mr;
75 
76 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
77 	mr->umem = NULL;
78 
79 	return &mr->ibmr;
80 
81 err_mr:
82 	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
83 
84 err_free:
85 	kfree(mr);
86 
87 	return ERR_PTR(err);
88 }
89 
90 enum {
91 	MLX4_MAX_MTT_SHIFT = 31
92 };
93 
94 static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
95 					struct mlx4_mtt *mtt,
96 					u64 mtt_size, u64 mtt_shift, u64 len,
97 					u64 cur_start_addr, u64 *pages,
98 					int *start_index, int *npages)
99 {
100 	u64 cur_end_addr = cur_start_addr + len;
101 	u64 cur_end_addr_aligned = 0;
102 	u64 mtt_entries;
103 	int err = 0;
104 	int k;
105 
106 	len += (cur_start_addr & (mtt_size - 1ULL));
107 	cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
108 	len += (cur_end_addr_aligned - cur_end_addr);
109 	if (len & (mtt_size - 1ULL)) {
110 		pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
111 			len, mtt_size);
112 		return -EINVAL;
113 	}
114 
115 	mtt_entries = (len >> mtt_shift);
116 
117 	/*
118 	 * Align the MTT start address to the mtt_size.
119 	 * Required to handle cases when the MR starts in the middle of an MTT
120 	 * record. Was not required in old code since the physical addresses
121 	 * provided by the dma subsystem were page aligned, which was also the
122 	 * MTT size.
123 	 */
124 	cur_start_addr = round_down(cur_start_addr, mtt_size);
125 	/* A new block is started ... */
126 	for (k = 0; k < mtt_entries; ++k) {
127 		pages[*npages] = cur_start_addr + (mtt_size * k);
128 		(*npages)++;
129 		/*
130 		 * Be friendly to mlx4_write_mtt() and pass it chunks of
131 		 * appropriate size.
132 		 */
133 		if (*npages == PAGE_SIZE / sizeof(u64)) {
134 			err = mlx4_write_mtt(dev->dev, mtt, *start_index,
135 					     *npages, pages);
136 			if (err)
137 				return err;
138 
139 			(*start_index) += *npages;
140 			*npages = 0;
141 		}
142 	}
143 
144 	return 0;
145 }
146 
147 static inline u64 alignment_of(u64 ptr)
148 {
149 	return ilog2(ptr & (~(ptr - 1)));
150 }
151 
152 static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
153 				       u64 current_block_end,
154 				       u64 block_shift)
155 {
156 	/* Check whether the alignment of the new block is aligned as well as
157 	 * the previous block.
158 	 * Block address must start with zeros till size of entity_size.
159 	 */
160 	if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
161 		/*
162 		 * It is not as well aligned as the previous block-reduce the
163 		 * mtt size accordingly. Here we take the last right bit which
164 		 * is 1.
165 		 */
166 		block_shift = alignment_of(next_block_start);
167 
168 	/*
169 	 * Check whether the alignment of the end of previous block - is it
170 	 * aligned as well as the start of the block
171 	 */
172 	if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
173 		/*
174 		 * It is not as well aligned as the start of the block -
175 		 * reduce the mtt size accordingly.
176 		 */
177 		block_shift = alignment_of(current_block_end);
178 
179 	return block_shift;
180 }
181 
182 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
183 			   struct ib_umem *umem)
184 {
185 	u64 *pages;
186 	u64 len = 0;
187 	int err = 0;
188 	u64 mtt_size;
189 	u64 cur_start_addr = 0;
190 	u64 mtt_shift;
191 	int start_index = 0;
192 	int npages = 0;
193 	struct scatterlist *sg;
194 	int i;
195 
196 	pages = (u64 *) __get_free_page(GFP_KERNEL);
197 	if (!pages)
198 		return -ENOMEM;
199 
200 	mtt_shift = mtt->page_shift;
201 	mtt_size = 1ULL << mtt_shift;
202 
203 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
204 		if (cur_start_addr + len == sg_dma_address(sg)) {
205 			/* still the same block */
206 			len += sg_dma_len(sg);
207 			continue;
208 		}
209 		/*
210 		 * A new block is started ...
211 		 * If len is malaligned, write an extra mtt entry to cover the
212 		 * misaligned area (round up the division)
213 		 */
214 		err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
215 						   mtt_shift, len,
216 						   cur_start_addr,
217 						   pages, &start_index,
218 						   &npages);
219 		if (err)
220 			goto out;
221 
222 		cur_start_addr = sg_dma_address(sg);
223 		len = sg_dma_len(sg);
224 	}
225 
226 	/* Handle the last block */
227 	if (len > 0) {
228 		/*
229 		 * If len is malaligned, write an extra mtt entry to cover
230 		 * the misaligned area (round up the division)
231 		 */
232 		err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
233 						   mtt_shift, len,
234 						   cur_start_addr, pages,
235 						   &start_index, &npages);
236 		if (err)
237 			goto out;
238 	}
239 
240 	if (npages)
241 		err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
242 
243 out:
244 	free_page((unsigned long) pages);
245 	return err;
246 }
247 
248 /*
249  * Calculate optimal mtt size based on contiguous pages.
250  * Function will return also the number of pages that are not aligned to the
251  * calculated mtt_size to be added to total number of pages. For that we should
252  * check the first chunk length & last chunk length and if not aligned to
253  * mtt_size we should increment the non_aligned_pages number. All chunks in the
254  * middle already handled as part of mtt shift calculation for both their start
255  * & end addresses.
256  */
257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
258 				       int *num_of_mtts)
259 {
260 	u64 block_shift = MLX4_MAX_MTT_SHIFT;
261 	u64 min_shift = umem->page_shift;
262 	u64 last_block_aligned_end = 0;
263 	u64 current_block_start = 0;
264 	u64 first_block_start = 0;
265 	u64 current_block_len = 0;
266 	u64 last_block_end = 0;
267 	struct scatterlist *sg;
268 	u64 current_block_end;
269 	u64 misalignment_bits;
270 	u64 next_block_start;
271 	u64 total_len = 0;
272 	int i;
273 
274 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
275 		/*
276 		 * Initialization - save the first chunk start as the
277 		 * current_block_start - block means contiguous pages.
278 		 */
279 		if (current_block_len == 0 && current_block_start == 0) {
280 			current_block_start = sg_dma_address(sg);
281 			first_block_start = current_block_start;
282 			/*
283 			 * Find the bits that are different between the physical
284 			 * address and the virtual address for the start of the
285 			 * MR.
286 			 * umem_get aligned the start_va to a page boundary.
287 			 * Therefore, we need to align the start va to the same
288 			 * boundary.
289 			 * misalignment_bits is needed to handle the  case of a
290 			 * single memory region. In this case, the rest of the
291 			 * logic will not reduce the block size.  If we use a
292 			 * block size which is bigger than the alignment of the
293 			 * misalignment bits, we might use the virtual page
294 			 * number instead of the physical page number, resulting
295 			 * in access to the wrong data.
296 			 */
297 			misalignment_bits =
298 			(start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
299 			^ current_block_start;
300 			block_shift = min(alignment_of(misalignment_bits),
301 					  block_shift);
302 		}
303 
304 		/*
305 		 * Go over the scatter entries and check if they continue the
306 		 * previous scatter entry.
307 		 */
308 		next_block_start = sg_dma_address(sg);
309 		current_block_end = current_block_start	+ current_block_len;
310 		/* If we have a split (non-contig.) between two blocks */
311 		if (current_block_end != next_block_start) {
312 			block_shift = mlx4_ib_umem_calc_block_mtt
313 					(next_block_start,
314 					 current_block_end,
315 					 block_shift);
316 
317 			/*
318 			 * If we reached the minimum shift for 4k page we stop
319 			 * the loop.
320 			 */
321 			if (block_shift <= min_shift)
322 				goto end;
323 
324 			/*
325 			 * If not saved yet we are in first block - we save the
326 			 * length of first block to calculate the
327 			 * non_aligned_pages number at the end.
328 			 */
329 			total_len += current_block_len;
330 
331 			/* Start a new block */
332 			current_block_start = next_block_start;
333 			current_block_len = sg_dma_len(sg);
334 			continue;
335 		}
336 		/* The scatter entry is another part of the current block,
337 		 * increase the block size.
338 		 * An entry in the scatter can be larger than 4k (page) as of
339 		 * dma mapping which merge some blocks together.
340 		 */
341 		current_block_len += sg_dma_len(sg);
342 	}
343 
344 	/* Account for the last block in the total len */
345 	total_len += current_block_len;
346 	/* Add to the first block the misalignment that it suffers from. */
347 	total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
348 	last_block_end = current_block_start + current_block_len;
349 	last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
350 	total_len += (last_block_aligned_end - last_block_end);
351 
352 	if (total_len & ((1ULL << block_shift) - 1ULL))
353 		pr_warn("misaligned total length detected (%llu, %llu)!",
354 			total_len, block_shift);
355 
356 	*num_of_mtts = total_len >> block_shift;
357 end:
358 	if (block_shift < min_shift) {
359 		/*
360 		 * If shift is less than the min we set a warning and return the
361 		 * min shift.
362 		 */
363 		pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
364 
365 		block_shift = min_shift;
366 	}
367 	return block_shift;
368 }
369 
370 static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
371 					u64 length, u64 virt_addr,
372 					int access_flags)
373 {
374 	/*
375 	 * Force registering the memory as writable if the underlying pages
376 	 * are writable.  This is so rereg can change the access permissions
377 	 * from readable to writable without having to run through ib_umem_get
378 	 * again
379 	 */
380 	if (!ib_access_writable(access_flags)) {
381 		struct vm_area_struct *vma;
382 
383 		down_read(&current->mm->mmap_sem);
384 		/*
385 		 * FIXME: Ideally this would iterate over all the vmas that
386 		 * cover the memory, but for now it requires a single vma to
387 		 * entirely cover the MR to support RO mappings.
388 		 */
389 		vma = find_vma(current->mm, start);
390 		if (vma && vma->vm_end >= start + length &&
391 		    vma->vm_start <= start) {
392 			if (vma->vm_flags & VM_WRITE)
393 				access_flags |= IB_ACCESS_LOCAL_WRITE;
394 		} else {
395 			access_flags |= IB_ACCESS_LOCAL_WRITE;
396 		}
397 
398 		up_read(&current->mm->mmap_sem);
399 	}
400 
401 	return ib_umem_get(udata, start, length, access_flags, 0);
402 }
403 
404 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
405 				  u64 virt_addr, int access_flags,
406 				  struct ib_udata *udata)
407 {
408 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
409 	struct mlx4_ib_mr *mr;
410 	int shift;
411 	int err;
412 	int n;
413 
414 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
415 	if (!mr)
416 		return ERR_PTR(-ENOMEM);
417 
418 	mr->umem =
419 		mlx4_get_umem_mr(udata, start, length, virt_addr, access_flags);
420 	if (IS_ERR(mr->umem)) {
421 		err = PTR_ERR(mr->umem);
422 		goto err_free;
423 	}
424 
425 	n = ib_umem_page_count(mr->umem);
426 	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
427 
428 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
429 			    convert_access(access_flags), n, shift, &mr->mmr);
430 	if (err)
431 		goto err_umem;
432 
433 	err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
434 	if (err)
435 		goto err_mr;
436 
437 	err = mlx4_mr_enable(dev->dev, &mr->mmr);
438 	if (err)
439 		goto err_mr;
440 
441 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
442 	mr->ibmr.length = length;
443 	mr->ibmr.iova = virt_addr;
444 	mr->ibmr.page_size = 1U << shift;
445 
446 	return &mr->ibmr;
447 
448 err_mr:
449 	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
450 
451 err_umem:
452 	ib_umem_release(mr->umem);
453 
454 err_free:
455 	kfree(mr);
456 
457 	return ERR_PTR(err);
458 }
459 
460 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
461 			  u64 start, u64 length, u64 virt_addr,
462 			  int mr_access_flags, struct ib_pd *pd,
463 			  struct ib_udata *udata)
464 {
465 	struct mlx4_ib_dev *dev = to_mdev(mr->device);
466 	struct mlx4_ib_mr *mmr = to_mmr(mr);
467 	struct mlx4_mpt_entry *mpt_entry;
468 	struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
469 	int err;
470 
471 	/* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
472 	 * we assume that the calls can't run concurrently. Otherwise, a
473 	 * race exists.
474 	 */
475 	err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
476 
477 	if (err)
478 		return err;
479 
480 	if (flags & IB_MR_REREG_PD) {
481 		err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
482 					   to_mpd(pd)->pdn);
483 
484 		if (err)
485 			goto release_mpt_entry;
486 	}
487 
488 	if (flags & IB_MR_REREG_ACCESS) {
489 		if (ib_access_writable(mr_access_flags) &&
490 		    !mmr->umem->writable) {
491 			err = -EPERM;
492 			goto release_mpt_entry;
493 		}
494 
495 		err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
496 					       convert_access(mr_access_flags));
497 
498 		if (err)
499 			goto release_mpt_entry;
500 	}
501 
502 	if (flags & IB_MR_REREG_TRANS) {
503 		int shift;
504 		int n;
505 
506 		mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
507 		ib_umem_release(mmr->umem);
508 		mmr->umem = mlx4_get_umem_mr(udata, start, length, virt_addr,
509 					     mr_access_flags);
510 		if (IS_ERR(mmr->umem)) {
511 			err = PTR_ERR(mmr->umem);
512 			/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
513 			mmr->umem = NULL;
514 			goto release_mpt_entry;
515 		}
516 		n = ib_umem_page_count(mmr->umem);
517 		shift = mmr->umem->page_shift;
518 
519 		err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
520 					      virt_addr, length, n, shift,
521 					      *pmpt_entry);
522 		if (err) {
523 			ib_umem_release(mmr->umem);
524 			goto release_mpt_entry;
525 		}
526 		mmr->mmr.iova       = virt_addr;
527 		mmr->mmr.size       = length;
528 
529 		err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
530 		if (err) {
531 			mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
532 			ib_umem_release(mmr->umem);
533 			goto release_mpt_entry;
534 		}
535 	}
536 
537 	/* If we couldn't transfer the MR to the HCA, just remember to
538 	 * return a failure. But dereg_mr will free the resources.
539 	 */
540 	err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
541 	if (!err && flags & IB_MR_REREG_ACCESS)
542 		mmr->mmr.access = mr_access_flags;
543 
544 release_mpt_entry:
545 	mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
546 
547 	return err;
548 }
549 
550 static int
551 mlx4_alloc_priv_pages(struct ib_device *device,
552 		      struct mlx4_ib_mr *mr,
553 		      int max_pages)
554 {
555 	int ret;
556 
557 	/* Ensure that size is aligned to DMA cacheline
558 	 * requirements.
559 	 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
560 	 * so page_map_size will never cross PAGE_SIZE.
561 	 */
562 	mr->page_map_size = roundup(max_pages * sizeof(u64),
563 				    MLX4_MR_PAGES_ALIGN);
564 
565 	/* Prevent cross page boundary allocation. */
566 	mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
567 	if (!mr->pages)
568 		return -ENOMEM;
569 
570 	mr->page_map = dma_map_single(device->dev.parent, mr->pages,
571 				      mr->page_map_size, DMA_TO_DEVICE);
572 
573 	if (dma_mapping_error(device->dev.parent, mr->page_map)) {
574 		ret = -ENOMEM;
575 		goto err;
576 	}
577 
578 	return 0;
579 
580 err:
581 	free_page((unsigned long)mr->pages);
582 	return ret;
583 }
584 
585 static void
586 mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
587 {
588 	if (mr->pages) {
589 		struct ib_device *device = mr->ibmr.device;
590 
591 		dma_unmap_single(device->dev.parent, mr->page_map,
592 				 mr->page_map_size, DMA_TO_DEVICE);
593 		free_page((unsigned long)mr->pages);
594 		mr->pages = NULL;
595 	}
596 }
597 
598 int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
599 {
600 	struct mlx4_ib_mr *mr = to_mmr(ibmr);
601 	int ret;
602 
603 	mlx4_free_priv_pages(mr);
604 
605 	ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
606 	if (ret)
607 		return ret;
608 	if (mr->umem)
609 		ib_umem_release(mr->umem);
610 	kfree(mr);
611 
612 	return 0;
613 }
614 
615 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
616 			       struct ib_udata *udata)
617 {
618 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
619 	struct mlx4_ib_mw *mw;
620 	int err;
621 
622 	mw = kmalloc(sizeof(*mw), GFP_KERNEL);
623 	if (!mw)
624 		return ERR_PTR(-ENOMEM);
625 
626 	err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
627 			    to_mlx4_type(type), &mw->mmw);
628 	if (err)
629 		goto err_free;
630 
631 	err = mlx4_mw_enable(dev->dev, &mw->mmw);
632 	if (err)
633 		goto err_mw;
634 
635 	mw->ibmw.rkey = mw->mmw.key;
636 
637 	return &mw->ibmw;
638 
639 err_mw:
640 	mlx4_mw_free(dev->dev, &mw->mmw);
641 
642 err_free:
643 	kfree(mw);
644 
645 	return ERR_PTR(err);
646 }
647 
648 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
649 {
650 	struct mlx4_ib_mw *mw = to_mmw(ibmw);
651 
652 	mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
653 	kfree(mw);
654 
655 	return 0;
656 }
657 
658 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
659 			       u32 max_num_sg, struct ib_udata *udata)
660 {
661 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
662 	struct mlx4_ib_mr *mr;
663 	int err;
664 
665 	if (mr_type != IB_MR_TYPE_MEM_REG ||
666 	    max_num_sg > MLX4_MAX_FAST_REG_PAGES)
667 		return ERR_PTR(-EINVAL);
668 
669 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
670 	if (!mr)
671 		return ERR_PTR(-ENOMEM);
672 
673 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
674 			    max_num_sg, 0, &mr->mmr);
675 	if (err)
676 		goto err_free;
677 
678 	err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
679 	if (err)
680 		goto err_free_mr;
681 
682 	mr->max_pages = max_num_sg;
683 	err = mlx4_mr_enable(dev->dev, &mr->mmr);
684 	if (err)
685 		goto err_free_pl;
686 
687 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
688 	mr->umem = NULL;
689 
690 	return &mr->ibmr;
691 
692 err_free_pl:
693 	mr->ibmr.device = pd->device;
694 	mlx4_free_priv_pages(mr);
695 err_free_mr:
696 	(void) mlx4_mr_free(dev->dev, &mr->mmr);
697 err_free:
698 	kfree(mr);
699 	return ERR_PTR(err);
700 }
701 
702 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
703 				 struct ib_fmr_attr *fmr_attr)
704 {
705 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
706 	struct mlx4_ib_fmr *fmr;
707 	int err = -ENOMEM;
708 
709 	fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
710 	if (!fmr)
711 		return ERR_PTR(-ENOMEM);
712 
713 	err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
714 			     fmr_attr->max_pages, fmr_attr->max_maps,
715 			     fmr_attr->page_shift, &fmr->mfmr);
716 	if (err)
717 		goto err_free;
718 
719 	err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
720 	if (err)
721 		goto err_mr;
722 
723 	fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
724 
725 	return &fmr->ibfmr;
726 
727 err_mr:
728 	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
729 
730 err_free:
731 	kfree(fmr);
732 
733 	return ERR_PTR(err);
734 }
735 
736 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
737 		      int npages, u64 iova)
738 {
739 	struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
740 	struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
741 
742 	return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
743 				 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
744 }
745 
746 int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
747 {
748 	struct ib_fmr *ibfmr;
749 	int err;
750 	struct mlx4_dev *mdev = NULL;
751 
752 	list_for_each_entry(ibfmr, fmr_list, list) {
753 		if (mdev && to_mdev(ibfmr->device)->dev != mdev)
754 			return -EINVAL;
755 		mdev = to_mdev(ibfmr->device)->dev;
756 	}
757 
758 	if (!mdev)
759 		return 0;
760 
761 	list_for_each_entry(ibfmr, fmr_list, list) {
762 		struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
763 
764 		mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
765 	}
766 
767 	/*
768 	 * Make sure all MPT status updates are visible before issuing
769 	 * SYNC_TPT firmware command.
770 	 */
771 	wmb();
772 
773 	err = mlx4_SYNC_TPT(mdev);
774 	if (err)
775 		pr_warn("SYNC_TPT error %d when "
776 		       "unmapping FMRs\n", err);
777 
778 	return 0;
779 }
780 
781 int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
782 {
783 	struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
784 	struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
785 	int err;
786 
787 	err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
788 
789 	if (!err)
790 		kfree(ifmr);
791 
792 	return err;
793 }
794 
795 static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
796 {
797 	struct mlx4_ib_mr *mr = to_mmr(ibmr);
798 
799 	if (unlikely(mr->npages == mr->max_pages))
800 		return -ENOMEM;
801 
802 	mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
803 
804 	return 0;
805 }
806 
807 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
808 		      unsigned int *sg_offset)
809 {
810 	struct mlx4_ib_mr *mr = to_mmr(ibmr);
811 	int rc;
812 
813 	mr->npages = 0;
814 
815 	ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
816 				   mr->page_map_size, DMA_TO_DEVICE);
817 
818 	rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
819 
820 	ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
821 				      mr->page_map_size, DMA_TO_DEVICE);
822 
823 	return rc;
824 }
825