1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. 4 */ 5 6 #include "rxe.h" 7 8 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 9 { 10 struct rxe_mw *mw = to_rmw(ibmw); 11 struct rxe_pd *pd = to_rpd(ibmw->pd); 12 struct rxe_dev *rxe = to_rdev(ibmw->device); 13 int ret; 14 15 rxe_get(pd); 16 17 ret = rxe_add_to_pool(&rxe->mw_pool, mw); 18 if (ret) { 19 rxe_put(pd); 20 return ret; 21 } 22 23 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); 24 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? 25 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; 26 spin_lock_init(&mw->lock); 27 28 return 0; 29 } 30 31 static void rxe_do_dealloc_mw(struct rxe_mw *mw) 32 { 33 if (mw->mr) { 34 struct rxe_mr *mr = mw->mr; 35 36 mw->mr = NULL; 37 atomic_dec(&mr->num_mw); 38 rxe_put(mr); 39 } 40 41 if (mw->qp) { 42 struct rxe_qp *qp = mw->qp; 43 44 mw->qp = NULL; 45 rxe_put(qp); 46 } 47 48 mw->access = 0; 49 mw->addr = 0; 50 mw->length = 0; 51 mw->state = RXE_MW_STATE_INVALID; 52 } 53 54 int rxe_dealloc_mw(struct ib_mw *ibmw) 55 { 56 struct rxe_mw *mw = to_rmw(ibmw); 57 struct rxe_pd *pd = to_rpd(ibmw->pd); 58 59 spin_lock_bh(&mw->lock); 60 rxe_do_dealloc_mw(mw); 61 spin_unlock_bh(&mw->lock); 62 63 rxe_put(mw); 64 rxe_put(pd); 65 66 return 0; 67 } 68 69 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 70 struct rxe_mw *mw, struct rxe_mr *mr) 71 { 72 u32 key = wqe->wr.wr.mw.rkey & 0xff; 73 74 if (mw->ibmw.type == IB_MW_TYPE_1) { 75 if (unlikely(mw->state != RXE_MW_STATE_VALID)) { 76 pr_err_once( 77 "attempt to bind a type 1 MW not in the valid state\n"); 78 return -EINVAL; 79 } 80 81 /* o10-36.2.2 */ 82 if (unlikely((mw->access & IB_ZERO_BASED))) { 83 pr_err_once("attempt to bind a zero based type 1 MW\n"); 84 return -EINVAL; 85 } 86 } 87 88 if (mw->ibmw.type == IB_MW_TYPE_2) { 89 /* o10-37.2.30 */ 90 if (unlikely(mw->state != RXE_MW_STATE_FREE)) { 91 pr_err_once( 92 "attempt to bind a type 2 MW not in the free state\n"); 93 return -EINVAL; 94 } 95 96 /* C10-72 */ 97 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { 98 pr_err_once( 99 "attempt to bind type 2 MW with qp with different PD\n"); 100 return -EINVAL; 101 } 102 103 /* o10-37.2.40 */ 104 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { 105 pr_err_once( 106 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); 107 return -EINVAL; 108 } 109 } 110 111 if (unlikely(key == (mw->rkey & 0xff))) { 112 pr_err_once("attempt to bind MW with same key\n"); 113 return -EINVAL; 114 } 115 116 /* remaining checks only apply to a nonzero MR */ 117 if (!mr) 118 return 0; 119 120 if (unlikely(mr->access & IB_ZERO_BASED)) { 121 pr_err_once("attempt to bind MW to zero based MR\n"); 122 return -EINVAL; 123 } 124 125 /* C10-73 */ 126 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { 127 pr_err_once( 128 "attempt to bind an MW to an MR without bind access\n"); 129 return -EINVAL; 130 } 131 132 /* C10-74 */ 133 if (unlikely((mw->access & 134 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && 135 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { 136 pr_err_once( 137 "attempt to bind an writeable MW to an MR without local write access\n"); 138 return -EINVAL; 139 } 140 141 /* C10-75 */ 142 if (mw->access & IB_ZERO_BASED) { 143 if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) { 144 pr_err_once( 145 "attempt to bind a ZB MW outside of the MR\n"); 146 return -EINVAL; 147 } 148 } else { 149 if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) || 150 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > 151 (mr->cur_map_set->iova + mr->cur_map_set->length)))) { 152 pr_err_once( 153 "attempt to bind a VA MW outside of the MR\n"); 154 return -EINVAL; 155 } 156 } 157 158 return 0; 159 } 160 161 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 162 struct rxe_mw *mw, struct rxe_mr *mr) 163 { 164 u32 key = wqe->wr.wr.mw.rkey & 0xff; 165 166 mw->rkey = (mw->rkey & ~0xff) | key; 167 mw->access = wqe->wr.wr.mw.access; 168 mw->state = RXE_MW_STATE_VALID; 169 mw->addr = wqe->wr.wr.mw.addr; 170 mw->length = wqe->wr.wr.mw.length; 171 172 if (mw->mr) { 173 rxe_put(mw->mr); 174 atomic_dec(&mw->mr->num_mw); 175 mw->mr = NULL; 176 } 177 178 if (mw->length) { 179 mw->mr = mr; 180 atomic_inc(&mr->num_mw); 181 rxe_get(mr); 182 } 183 184 if (mw->ibmw.type == IB_MW_TYPE_2) { 185 rxe_get(qp); 186 mw->qp = qp; 187 } 188 } 189 190 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 191 { 192 int ret; 193 struct rxe_mw *mw; 194 struct rxe_mr *mr; 195 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 196 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; 197 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; 198 199 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); 200 if (unlikely(!mw)) { 201 ret = -EINVAL; 202 goto err; 203 } 204 205 if (unlikely(mw->rkey != mw_rkey)) { 206 ret = -EINVAL; 207 goto err_drop_mw; 208 } 209 210 if (likely(wqe->wr.wr.mw.length)) { 211 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); 212 if (unlikely(!mr)) { 213 ret = -EINVAL; 214 goto err_drop_mw; 215 } 216 217 if (unlikely(mr->lkey != mr_lkey)) { 218 ret = -EINVAL; 219 goto err_drop_mr; 220 } 221 } else { 222 mr = NULL; 223 } 224 225 spin_lock_bh(&mw->lock); 226 227 ret = rxe_check_bind_mw(qp, wqe, mw, mr); 228 if (ret) 229 goto err_unlock; 230 231 rxe_do_bind_mw(qp, wqe, mw, mr); 232 err_unlock: 233 spin_unlock_bh(&mw->lock); 234 err_drop_mr: 235 if (mr) 236 rxe_put(mr); 237 err_drop_mw: 238 rxe_put(mw); 239 err: 240 return ret; 241 } 242 243 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) 244 { 245 if (unlikely(mw->state == RXE_MW_STATE_INVALID)) 246 return -EINVAL; 247 248 /* o10-37.2.26 */ 249 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1)) 250 return -EINVAL; 251 252 return 0; 253 } 254 255 static void rxe_do_invalidate_mw(struct rxe_mw *mw) 256 { 257 struct rxe_qp *qp; 258 struct rxe_mr *mr; 259 260 /* valid type 2 MW will always have a QP pointer */ 261 qp = mw->qp; 262 mw->qp = NULL; 263 rxe_put(qp); 264 265 /* valid type 2 MW will always have an MR pointer */ 266 mr = mw->mr; 267 mw->mr = NULL; 268 atomic_dec(&mr->num_mw); 269 rxe_put(mr); 270 271 mw->access = 0; 272 mw->addr = 0; 273 mw->length = 0; 274 mw->state = RXE_MW_STATE_FREE; 275 } 276 277 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) 278 { 279 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 280 struct rxe_mw *mw; 281 int ret; 282 283 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); 284 if (!mw) { 285 ret = -EINVAL; 286 goto err; 287 } 288 289 if (rkey != mw->rkey) { 290 ret = -EINVAL; 291 goto err_drop_ref; 292 } 293 294 spin_lock_bh(&mw->lock); 295 296 ret = rxe_check_invalidate_mw(qp, mw); 297 if (ret) 298 goto err_unlock; 299 300 rxe_do_invalidate_mw(mw); 301 err_unlock: 302 spin_unlock_bh(&mw->lock); 303 err_drop_ref: 304 rxe_put(mw); 305 err: 306 return ret; 307 } 308 309 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) 310 { 311 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 312 struct rxe_pd *pd = to_rpd(qp->ibqp.pd); 313 struct rxe_mw *mw; 314 int index = rkey >> 8; 315 316 mw = rxe_pool_get_index(&rxe->mw_pool, index); 317 if (!mw) 318 return NULL; 319 320 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || 321 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || 322 (mw->length == 0) || 323 (access && !(access & mw->access)) || 324 mw->state != RXE_MW_STATE_VALID)) { 325 rxe_put(mw); 326 return NULL; 327 } 328 329 return mw; 330 } 331