mr.c (0ea8a56de21be24cb79abb03dee79aabcd60a316) | mr.c (d18bb3e15201918b8d07e85a6e010ca5ed28dad5) |
---|---|
1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the --- 257 unchanged lines hidden (view full) --- 266 u64 last_block_end = 0; 267 struct scatterlist *sg; 268 u64 current_block_end; 269 u64 misalignment_bits; 270 u64 next_block_start; 271 u64 total_len = 0; 272 int i; 273 | 1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the --- 257 unchanged lines hidden (view full) --- 266 u64 last_block_end = 0; 267 struct scatterlist *sg; 268 u64 current_block_end; 269 u64 misalignment_bits; 270 u64 next_block_start; 271 u64 total_len = 0; 272 int i; 273 |
274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); 275 |
|
274 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { 275 /* 276 * Initialization - save the first chunk start as the 277 * current_block_start - block means contiguous pages. 278 */ 279 if (current_block_len == 0 && current_block_start == 0) { 280 current_block_start = sg_dma_address(sg); 281 first_block_start = current_block_start; --- 134 unchanged lines hidden (view full) --- 416 return ERR_PTR(-ENOMEM); 417 418 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); 419 if (IS_ERR(mr->umem)) { 420 err = PTR_ERR(mr->umem); 421 goto err_free; 422 } 423 | 276 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { 277 /* 278 * Initialization - save the first chunk start as the 279 * current_block_start - block means contiguous pages. 280 */ 281 if (current_block_len == 0 && current_block_start == 0) { 282 current_block_start = sg_dma_address(sg); 283 first_block_start = current_block_start; --- 134 unchanged lines hidden (view full) --- 418 return ERR_PTR(-ENOMEM); 419 420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); 421 if (IS_ERR(mr->umem)) { 422 err = PTR_ERR(mr->umem); 423 goto err_free; 424 } 425 |
424 n = ib_umem_page_count(mr->umem); | |
425 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); 426 427 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, 428 convert_access(access_flags), n, shift, &mr->mmr); 429 if (err) 430 goto err_umem; 431 432 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); --- 73 unchanged lines hidden (view full) --- 506 mmr->umem = mlx4_get_umem_mr(mr->device, start, length, 507 mr_access_flags); 508 if (IS_ERR(mmr->umem)) { 509 err = PTR_ERR(mmr->umem); 510 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ 511 mmr->umem = NULL; 512 goto release_mpt_entry; 513 } | 426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); 427 428 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, 429 convert_access(access_flags), n, shift, &mr->mmr); 430 if (err) 431 goto err_umem; 432 433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); --- 73 unchanged lines hidden (view full) --- 507 mmr->umem = mlx4_get_umem_mr(mr->device, start, length, 508 mr_access_flags); 509 if (IS_ERR(mmr->umem)) { 510 err = PTR_ERR(mmr->umem); 511 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ 512 mmr->umem = NULL; 513 goto release_mpt_entry; 514 } |
514 n = ib_umem_page_count(mmr->umem); | 515 n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE); |
515 shift = PAGE_SHIFT; 516 517 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, 518 virt_addr, length, n, shift, 519 *pmpt_entry); 520 if (err) { 521 ib_umem_release(mmr->umem); 522 goto release_mpt_entry; --- 82 unchanged lines hidden (view full) --- 605 return ret; 606 if (mr->umem) 607 ib_umem_release(mr->umem); 608 kfree(mr); 609 610 return 0; 611} 612 | 516 shift = PAGE_SHIFT; 517 518 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, 519 virt_addr, length, n, shift, 520 *pmpt_entry); 521 if (err) { 522 ib_umem_release(mmr->umem); 523 goto release_mpt_entry; --- 82 unchanged lines hidden (view full) --- 606 return ret; 607 if (mr->umem) 608 ib_umem_release(mr->umem); 609 kfree(mr); 610 611 return 0; 612} 613 |
613struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 614 struct ib_udata *udata) | 614int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) |
615{ | 615{ |
616 struct mlx4_ib_dev *dev = to_mdev(pd->device); 617 struct mlx4_ib_mw *mw; | 616 struct mlx4_ib_dev *dev = to_mdev(ibmw->device); 617 struct mlx4_ib_mw *mw = to_mmw(ibmw); |
618 int err; 619 | 618 int err; 619 |
620 mw = kmalloc(sizeof(*mw), GFP_KERNEL); 621 if (!mw) 622 return ERR_PTR(-ENOMEM); 623 624 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, 625 to_mlx4_type(type), &mw->mmw); | 620 err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn, 621 to_mlx4_type(ibmw->type), &mw->mmw); |
626 if (err) | 622 if (err) |
627 goto err_free; | 623 return err; |
628 629 err = mlx4_mw_enable(dev->dev, &mw->mmw); 630 if (err) 631 goto err_mw; 632 | 624 625 err = mlx4_mw_enable(dev->dev, &mw->mmw); 626 if (err) 627 goto err_mw; 628 |
633 mw->ibmw.rkey = mw->mmw.key; | 629 ibmw->rkey = mw->mmw.key; 630 return 0; |
634 | 631 |
635 return &mw->ibmw; 636 | |
637err_mw: 638 mlx4_mw_free(dev->dev, &mw->mmw); | 632err_mw: 633 mlx4_mw_free(dev->dev, &mw->mmw); |
639 640err_free: 641 kfree(mw); 642 643 return ERR_PTR(err); | 634 return err; |
644} 645 646int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) 647{ 648 struct mlx4_ib_mw *mw = to_mmw(ibmw); 649 650 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | 635} 636 637int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) 638{ 639 struct mlx4_ib_mw *mw = to_mmw(ibmw); 640 641 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); |
651 kfree(mw); 652 | |
653 return 0; 654} 655 656struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 657 u32 max_num_sg) 658{ 659 struct mlx4_ib_dev *dev = to_mdev(pd->device); 660 struct mlx4_ib_mr *mr; --- 69 unchanged lines hidden --- | 642 return 0; 643} 644 645struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 646 u32 max_num_sg) 647{ 648 struct mlx4_ib_dev *dev = to_mdev(pd->device); 649 struct mlx4_ib_mr *mr; --- 69 unchanged lines hidden --- |