1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. 4 */ 5 6 #include "rxe.h" 7 8 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 9 { 10 struct rxe_mw *mw = to_rmw(ibmw); 11 struct rxe_pd *pd = to_rpd(ibmw->pd); 12 struct rxe_dev *rxe = to_rdev(ibmw->device); 13 int ret; 14 15 rxe_add_ref(pd); 16 17 ret = rxe_add_to_pool(&rxe->mw_pool, mw); 18 if (ret) { 19 rxe_drop_ref(pd); 20 return ret; 21 } 22 23 rxe_add_index(mw); 24 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); 25 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? 26 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; 27 spin_lock_init(&mw->lock); 28 29 return 0; 30 } 31 32 static void rxe_do_dealloc_mw(struct rxe_mw *mw) 33 { 34 if (mw->mr) { 35 struct rxe_mr *mr = mw->mr; 36 37 mw->mr = NULL; 38 atomic_dec(&mr->num_mw); 39 rxe_drop_ref(mr); 40 } 41 42 if (mw->qp) { 43 struct rxe_qp *qp = mw->qp; 44 45 mw->qp = NULL; 46 rxe_drop_ref(qp); 47 } 48 49 mw->access = 0; 50 mw->addr = 0; 51 mw->length = 0; 52 mw->state = RXE_MW_STATE_INVALID; 53 } 54 55 int rxe_dealloc_mw(struct ib_mw *ibmw) 56 { 57 struct rxe_mw *mw = to_rmw(ibmw); 58 struct rxe_pd *pd = to_rpd(ibmw->pd); 59 60 spin_lock_bh(&mw->lock); 61 rxe_do_dealloc_mw(mw); 62 spin_unlock_bh(&mw->lock); 63 64 rxe_drop_ref(mw); 65 rxe_drop_ref(pd); 66 67 return 0; 68 } 69 70 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 71 struct rxe_mw *mw, struct rxe_mr *mr) 72 { 73 u32 key = wqe->wr.wr.mw.rkey & 0xff; 74 75 if (mw->ibmw.type == IB_MW_TYPE_1) { 76 if (unlikely(mw->state != RXE_MW_STATE_VALID)) { 77 pr_err_once( 78 "attempt to bind a type 1 MW not in the valid state\n"); 79 return -EINVAL; 80 } 81 82 /* o10-36.2.2 */ 83 if (unlikely((mw->access & IB_ZERO_BASED))) { 84 pr_err_once("attempt to bind a zero based type 1 MW\n"); 85 return -EINVAL; 86 } 87 } 88 89 if (mw->ibmw.type == IB_MW_TYPE_2) { 90 /* o10-37.2.30 */ 91 if (unlikely(mw->state != RXE_MW_STATE_FREE)) { 92 pr_err_once( 93 "attempt to bind a type 2 MW not in the free state\n"); 94 return -EINVAL; 95 } 96 97 /* C10-72 */ 98 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { 99 pr_err_once( 100 "attempt to bind type 2 MW with qp with different PD\n"); 101 return -EINVAL; 102 } 103 104 /* o10-37.2.40 */ 105 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { 106 pr_err_once( 107 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); 108 return -EINVAL; 109 } 110 } 111 112 if (unlikely(key == (mw->rkey & 0xff))) { 113 pr_err_once("attempt to bind MW with same key\n"); 114 return -EINVAL; 115 } 116 117 /* remaining checks only apply to a nonzero MR */ 118 if (!mr) 119 return 0; 120 121 if (unlikely(mr->access & IB_ZERO_BASED)) { 122 pr_err_once("attempt to bind MW to zero based MR\n"); 123 return -EINVAL; 124 } 125 126 /* C10-73 */ 127 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { 128 pr_err_once( 129 "attempt to bind an MW to an MR without bind access\n"); 130 return -EINVAL; 131 } 132 133 /* C10-74 */ 134 if (unlikely((mw->access & 135 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && 136 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { 137 pr_err_once( 138 "attempt to bind an writeable MW to an MR without local write access\n"); 139 return -EINVAL; 140 } 141 142 /* C10-75 */ 143 if (mw->access & IB_ZERO_BASED) { 144 if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) { 145 pr_err_once( 146 "attempt to bind a ZB MW outside of the MR\n"); 147 return -EINVAL; 148 } 149 } else { 150 if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) || 151 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > 152 (mr->cur_map_set->iova + mr->cur_map_set->length)))) { 153 pr_err_once( 154 "attempt to bind a VA MW outside of the MR\n"); 155 return -EINVAL; 156 } 157 } 158 159 return 0; 160 } 161 162 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 163 struct rxe_mw *mw, struct rxe_mr *mr) 164 { 165 u32 key = wqe->wr.wr.mw.rkey & 0xff; 166 167 mw->rkey = (mw->rkey & ~0xff) | key; 168 mw->access = wqe->wr.wr.mw.access; 169 mw->state = RXE_MW_STATE_VALID; 170 mw->addr = wqe->wr.wr.mw.addr; 171 mw->length = wqe->wr.wr.mw.length; 172 173 if (mw->mr) { 174 rxe_drop_ref(mw->mr); 175 atomic_dec(&mw->mr->num_mw); 176 mw->mr = NULL; 177 } 178 179 if (mw->length) { 180 mw->mr = mr; 181 atomic_inc(&mr->num_mw); 182 rxe_add_ref(mr); 183 } 184 185 if (mw->ibmw.type == IB_MW_TYPE_2) { 186 rxe_add_ref(qp); 187 mw->qp = qp; 188 } 189 } 190 191 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 192 { 193 int ret; 194 struct rxe_mw *mw; 195 struct rxe_mr *mr; 196 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 197 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; 198 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; 199 200 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); 201 if (unlikely(!mw)) { 202 ret = -EINVAL; 203 goto err; 204 } 205 206 if (unlikely(mw->rkey != mw_rkey)) { 207 ret = -EINVAL; 208 goto err_drop_mw; 209 } 210 211 if (likely(wqe->wr.wr.mw.length)) { 212 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); 213 if (unlikely(!mr)) { 214 ret = -EINVAL; 215 goto err_drop_mw; 216 } 217 218 if (unlikely(mr->lkey != mr_lkey)) { 219 ret = -EINVAL; 220 goto err_drop_mr; 221 } 222 } else { 223 mr = NULL; 224 } 225 226 spin_lock_bh(&mw->lock); 227 228 ret = rxe_check_bind_mw(qp, wqe, mw, mr); 229 if (ret) 230 goto err_unlock; 231 232 rxe_do_bind_mw(qp, wqe, mw, mr); 233 err_unlock: 234 spin_unlock_bh(&mw->lock); 235 err_drop_mr: 236 if (mr) 237 rxe_drop_ref(mr); 238 err_drop_mw: 239 rxe_drop_ref(mw); 240 err: 241 return ret; 242 } 243 244 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) 245 { 246 if (unlikely(mw->state == RXE_MW_STATE_INVALID)) 247 return -EINVAL; 248 249 /* o10-37.2.26 */ 250 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1)) 251 return -EINVAL; 252 253 return 0; 254 } 255 256 static void rxe_do_invalidate_mw(struct rxe_mw *mw) 257 { 258 struct rxe_qp *qp; 259 struct rxe_mr *mr; 260 261 /* valid type 2 MW will always have a QP pointer */ 262 qp = mw->qp; 263 mw->qp = NULL; 264 rxe_drop_ref(qp); 265 266 /* valid type 2 MW will always have an MR pointer */ 267 mr = mw->mr; 268 mw->mr = NULL; 269 atomic_dec(&mr->num_mw); 270 rxe_drop_ref(mr); 271 272 mw->access = 0; 273 mw->addr = 0; 274 mw->length = 0; 275 mw->state = RXE_MW_STATE_FREE; 276 } 277 278 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) 279 { 280 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 281 struct rxe_mw *mw; 282 int ret; 283 284 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); 285 if (!mw) { 286 ret = -EINVAL; 287 goto err; 288 } 289 290 if (rkey != mw->rkey) { 291 ret = -EINVAL; 292 goto err_drop_ref; 293 } 294 295 spin_lock_bh(&mw->lock); 296 297 ret = rxe_check_invalidate_mw(qp, mw); 298 if (ret) 299 goto err_unlock; 300 301 rxe_do_invalidate_mw(mw); 302 err_unlock: 303 spin_unlock_bh(&mw->lock); 304 err_drop_ref: 305 rxe_drop_ref(mw); 306 err: 307 return ret; 308 } 309 310 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) 311 { 312 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 313 struct rxe_pd *pd = to_rpd(qp->ibqp.pd); 314 struct rxe_mw *mw; 315 int index = rkey >> 8; 316 317 mw = rxe_pool_get_index(&rxe->mw_pool, index); 318 if (!mw) 319 return NULL; 320 321 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || 322 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || 323 (mw->length == 0) || 324 (access && !(access & mw->access)) || 325 mw->state != RXE_MW_STATE_VALID)) { 326 rxe_drop_ref(mw); 327 return NULL; 328 } 329 330 return mw; 331 } 332 333 void rxe_mw_cleanup(struct rxe_pool_elem *elem) 334 { 335 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); 336 337 rxe_drop_index(mw); 338 } 339