mr.c (0ea8a56de21be24cb79abb03dee79aabcd60a316) | mr.c (d18bb3e15201918b8d07e85a6e010ca5ed28dad5) |
---|---|
1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 1959 unchanged lines hidden (view full) --- 1968 1969struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 1970 u32 max_num_sg, u32 max_num_meta_sg) 1971{ 1972 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, 1973 max_num_meta_sg); 1974} 1975 | 1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 1959 unchanged lines hidden (view full) --- 1968 1969struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, 1970 u32 max_num_sg, u32 max_num_meta_sg) 1971{ 1972 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, 1973 max_num_meta_sg); 1974} 1975 |
1976struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 1977 struct ib_udata *udata) | 1976int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) |
1978{ | 1977{ |
1979 struct mlx5_ib_dev *dev = to_mdev(pd->device); | 1978 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); |
1980 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); | 1979 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); |
1981 struct mlx5_ib_mw *mw = NULL; | 1980 struct mlx5_ib_mw *mw = to_mmw(ibmw); |
1982 u32 *in = NULL; 1983 void *mkc; 1984 int ndescs; 1985 int err; 1986 struct mlx5_ib_alloc_mw req = {}; 1987 struct { 1988 __u32 comp_mask; 1989 __u32 response_length; 1990 } resp = {}; 1991 1992 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1993 if (err) | 1981 u32 *in = NULL; 1982 void *mkc; 1983 int ndescs; 1984 int err; 1985 struct mlx5_ib_alloc_mw req = {}; 1986 struct { 1987 __u32 comp_mask; 1988 __u32 response_length; 1989 } resp = {}; 1990 1991 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1992 if (err) |
1994 return ERR_PTR(err); | 1993 return err; |
1995 1996 if (req.comp_mask || req.reserved1 || req.reserved2) | 1994 1995 if (req.comp_mask || req.reserved1 || req.reserved2) |
1997 return ERR_PTR(-EOPNOTSUPP); | 1996 return -EOPNOTSUPP; |
1998 1999 if (udata->inlen > sizeof(req) && 2000 !ib_is_udata_cleared(udata, sizeof(req), 2001 udata->inlen - sizeof(req))) | 1997 1998 if (udata->inlen > sizeof(req) && 1999 !ib_is_udata_cleared(udata, sizeof(req), 2000 udata->inlen - sizeof(req))) |
2002 return ERR_PTR(-EOPNOTSUPP); | 2001 return -EOPNOTSUPP; |
2003 2004 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 2005 | 2002 2003 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); 2004 |
2006 mw = kzalloc(sizeof(*mw), GFP_KERNEL); | |
2007 in = kzalloc(inlen, GFP_KERNEL); | 2005 in = kzalloc(inlen, GFP_KERNEL); |
2008 if (!mw || !in) { | 2006 if (!in) { |
2009 err = -ENOMEM; 2010 goto free; 2011 } 2012 2013 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2014 2015 MLX5_SET(mkc, mkc, free, 1); 2016 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); | 2007 err = -ENOMEM; 2008 goto free; 2009 } 2010 2011 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 2012 2013 MLX5_SET(mkc, mkc, free, 1); 2014 MLX5_SET(mkc, mkc, translations_octword_size, ndescs); |
2017 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); | 2015 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); |
2018 MLX5_SET(mkc, mkc, umr_en, 1); 2019 MLX5_SET(mkc, mkc, lr, 1); 2020 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); | 2016 MLX5_SET(mkc, mkc, umr_en, 1); 2017 MLX5_SET(mkc, mkc, lr, 1); 2018 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); |
2021 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2))); | 2019 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); |
2022 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2023 2024 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); 2025 if (err) 2026 goto free; 2027 2028 mw->mmkey.type = MLX5_MKEY_MW; | 2020 MLX5_SET(mkc, mkc, qpn, 0xffffff); 2021 2022 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); 2023 if (err) 2024 goto free; 2025 2026 mw->mmkey.type = MLX5_MKEY_MW; |
2029 mw->ibmw.rkey = mw->mmkey.key; | 2027 ibmw->rkey = mw->mmkey.key; |
2030 mw->ndescs = ndescs; 2031 | 2028 mw->ndescs = ndescs; 2029 |
2032 resp.response_length = min(offsetof(typeof(resp), response_length) + 2033 sizeof(resp.response_length), udata->outlen); | 2030 resp.response_length = 2031 min(offsetofend(typeof(resp), response_length), udata->outlen); |
2034 if (resp.response_length) { 2035 err = ib_copy_to_udata(udata, &resp, resp.response_length); | 2032 if (resp.response_length) { 2033 err = ib_copy_to_udata(udata, &resp, resp.response_length); |
2036 if (err) { 2037 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); 2038 goto free; 2039 } | 2034 if (err) 2035 goto free_mkey; |
2040 } 2041 2042 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2043 err = xa_err(xa_store(&dev->odp_mkeys, 2044 mlx5_base_mkey(mw->mmkey.key), &mw->mmkey, 2045 GFP_KERNEL)); 2046 if (err) 2047 goto free_mkey; 2048 } 2049 2050 kfree(in); | 2036 } 2037 2038 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2039 err = xa_err(xa_store(&dev->odp_mkeys, 2040 mlx5_base_mkey(mw->mmkey.key), &mw->mmkey, 2041 GFP_KERNEL)); 2042 if (err) 2043 goto free_mkey; 2044 } 2045 2046 kfree(in); |
2051 return &mw->ibmw; | 2047 return 0; |
2052 2053free_mkey: 2054 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); 2055free: | 2048 2049free_mkey: 2050 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); 2051free: |
2056 kfree(mw); | |
2057 kfree(in); | 2052 kfree(in); |
2058 return ERR_PTR(err); | 2053 return err; |
2059} 2060 2061int mlx5_ib_dealloc_mw(struct ib_mw *mw) 2062{ 2063 struct mlx5_ib_dev *dev = to_mdev(mw->device); 2064 struct mlx5_ib_mw *mmw = to_mmw(mw); | 2054} 2055 2056int mlx5_ib_dealloc_mw(struct ib_mw *mw) 2057{ 2058 struct mlx5_ib_dev *dev = to_mdev(mw->device); 2059 struct mlx5_ib_mw *mmw = to_mmw(mw); |
2065 int err; | |
2066 2067 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2068 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)); 2069 /* 2070 * pagefault_single_data_segment() may be accessing mmw under 2071 * SRCU if the user bound an ODP MR to this MW. 2072 */ 2073 synchronize_srcu(&dev->odp_srcu); 2074 } 2075 | 2060 2061 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 2062 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)); 2063 /* 2064 * pagefault_single_data_segment() may be accessing mmw under 2065 * SRCU if the user bound an ODP MR to this MW. 2066 */ 2067 synchronize_srcu(&dev->odp_srcu); 2068 } 2069 |
2076 err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); 2077 if (err) 2078 return err; 2079 kfree(mmw); 2080 return 0; | 2070 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); |
2081} 2082 2083int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 2084 struct ib_mr_status *mr_status) 2085{ 2086 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 2087 int ret = 0; 2088 --- 343 unchanged lines hidden --- | 2071} 2072 2073int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 2074 struct ib_mr_status *mr_status) 2075{ 2076 struct mlx5_ib_mr *mmr = to_mmr(ibmr); 2077 int ret = 0; 2078 --- 343 unchanged lines hidden --- |