1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. 4 */ 5 6 /* 7 * The rdma_rxe driver supports type 1 or type 2B memory windows. 8 * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by 9 * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw() 10 * but bound by bind_mw work requests. The ibv_bind_mw() call is converted 11 * by libibverbs to a bind_mw work request. 12 */ 13 14 #include "rxe.h" 15 16 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 17 { 18 struct rxe_mw *mw = to_rmw(ibmw); 19 struct rxe_pd *pd = to_rpd(ibmw->pd); 20 struct rxe_dev *rxe = to_rdev(ibmw->device); 21 int ret; 22 23 rxe_get(pd); 24 25 ret = rxe_add_to_pool(&rxe->mw_pool, mw); 26 if (ret) { 27 rxe_put(pd); 28 return ret; 29 } 30 31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); 32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? 33 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; 34 spin_lock_init(&mw->lock); 35 36 rxe_finalize(mw); 37 38 return 0; 39 } 40 41 int rxe_dealloc_mw(struct ib_mw *ibmw) 42 { 43 struct rxe_mw *mw = to_rmw(ibmw); 44 45 rxe_cleanup(mw); 46 47 return 0; 48 } 49 50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 51 struct rxe_mw *mw, struct rxe_mr *mr) 52 { 53 if (mw->ibmw.type == IB_MW_TYPE_1) { 54 if (unlikely(mw->state != RXE_MW_STATE_VALID)) { 55 rxe_dbg_mw(mw, 56 "attempt to bind a type 1 MW not in the valid state\n"); 57 return -EINVAL; 58 } 59 60 /* o10-36.2.2 */ 61 if (unlikely((mw->access & IB_ZERO_BASED))) { 62 rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n"); 63 return -EINVAL; 64 } 65 } 66 67 if (mw->ibmw.type == IB_MW_TYPE_2) { 68 /* o10-37.2.30 */ 69 if (unlikely(mw->state != RXE_MW_STATE_FREE)) { 70 rxe_dbg_mw(mw, 71 "attempt to bind a type 2 MW not in the free state\n"); 72 return -EINVAL; 73 } 74 75 /* C10-72 */ 76 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { 77 rxe_dbg_mw(mw, 78 "attempt to bind type 2 MW with qp with different PD\n"); 79 return -EINVAL; 80 } 81 82 /* o10-37.2.40 */ 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { 84 rxe_dbg_mw(mw, 85 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); 86 return -EINVAL; 87 } 88 } 89 90 /* remaining checks only apply to a nonzero MR */ 91 if (!mr) 92 return 0; 93 94 if (unlikely(mr->access & IB_ZERO_BASED)) { 95 rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n"); 96 return -EINVAL; 97 } 98 99 /* C10-73 */ 100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { 101 rxe_dbg_mw(mw, 102 "attempt to bind an MW to an MR without bind access\n"); 103 return -EINVAL; 104 } 105 106 /* C10-74 */ 107 if (unlikely((mw->access & 108 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && 109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { 110 rxe_dbg_mw(mw, 111 "attempt to bind an Writable MW to an MR without local write access\n"); 112 return -EINVAL; 113 } 114 115 /* C10-75 */ 116 if (mw->access & IB_ZERO_BASED) { 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { 118 rxe_dbg_mw(mw, 119 "attempt to bind a ZB MW outside of the MR\n"); 120 return -EINVAL; 121 } 122 } else { 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > 125 (mr->ibmr.iova + mr->ibmr.length)))) { 126 rxe_dbg_mw(mw, 127 "attempt to bind a VA MW outside of the MR\n"); 128 return -EINVAL; 129 } 130 } 131 132 return 0; 133 } 134 135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 136 struct rxe_mw *mw, struct rxe_mr *mr) 137 { 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; 139 140 mw->rkey = (mw->rkey & ~0xff) | key; 141 mw->access = wqe->wr.wr.mw.access; 142 mw->state = RXE_MW_STATE_VALID; 143 mw->addr = wqe->wr.wr.mw.addr; 144 mw->length = wqe->wr.wr.mw.length; 145 146 if (mw->mr) { 147 rxe_put(mw->mr); 148 atomic_dec(&mw->mr->num_mw); 149 mw->mr = NULL; 150 } 151 152 if (mw->length) { 153 mw->mr = mr; 154 atomic_inc(&mr->num_mw); 155 rxe_get(mr); 156 } 157 158 if (mw->ibmw.type == IB_MW_TYPE_2) { 159 rxe_get(qp); 160 mw->qp = qp; 161 } 162 } 163 164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 165 { 166 int ret; 167 struct rxe_mw *mw; 168 struct rxe_mr *mr; 169 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; 171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; 172 173 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); 174 if (unlikely(!mw)) { 175 ret = -EINVAL; 176 goto err; 177 } 178 179 if (unlikely(mw->rkey != mw_rkey)) { 180 ret = -EINVAL; 181 goto err_drop_mw; 182 } 183 184 if (likely(wqe->wr.wr.mw.length)) { 185 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); 186 if (unlikely(!mr)) { 187 ret = -EINVAL; 188 goto err_drop_mw; 189 } 190 191 if (unlikely(mr->lkey != mr_lkey)) { 192 ret = -EINVAL; 193 goto err_drop_mr; 194 } 195 } else { 196 mr = NULL; 197 } 198 199 spin_lock_bh(&mw->lock); 200 201 ret = rxe_check_bind_mw(qp, wqe, mw, mr); 202 if (ret) 203 goto err_unlock; 204 205 rxe_do_bind_mw(qp, wqe, mw, mr); 206 err_unlock: 207 spin_unlock_bh(&mw->lock); 208 err_drop_mr: 209 if (mr) 210 rxe_put(mr); 211 err_drop_mw: 212 rxe_put(mw); 213 err: 214 return ret; 215 } 216 217 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) 218 { 219 if (unlikely(mw->state == RXE_MW_STATE_INVALID)) 220 return -EINVAL; 221 222 /* o10-37.2.26 */ 223 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1)) 224 return -EINVAL; 225 226 return 0; 227 } 228 229 static void rxe_do_invalidate_mw(struct rxe_mw *mw) 230 { 231 struct rxe_qp *qp; 232 struct rxe_mr *mr; 233 234 /* valid type 2 MW will always have a QP pointer */ 235 qp = mw->qp; 236 mw->qp = NULL; 237 rxe_put(qp); 238 239 /* valid type 2 MW will always have an MR pointer */ 240 mr = mw->mr; 241 mw->mr = NULL; 242 atomic_dec(&mr->num_mw); 243 rxe_put(mr); 244 245 mw->access = 0; 246 mw->addr = 0; 247 mw->length = 0; 248 mw->state = RXE_MW_STATE_FREE; 249 } 250 251 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) 252 { 253 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 254 struct rxe_mw *mw; 255 int ret; 256 257 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); 258 if (!mw) { 259 ret = -EINVAL; 260 goto err; 261 } 262 263 if (rkey != mw->rkey) { 264 ret = -EINVAL; 265 goto err_drop_ref; 266 } 267 268 spin_lock_bh(&mw->lock); 269 270 ret = rxe_check_invalidate_mw(qp, mw); 271 if (ret) 272 goto err_unlock; 273 274 rxe_do_invalidate_mw(mw); 275 err_unlock: 276 spin_unlock_bh(&mw->lock); 277 err_drop_ref: 278 rxe_put(mw); 279 err: 280 return ret; 281 } 282 283 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) 284 { 285 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 286 struct rxe_pd *pd = to_rpd(qp->ibqp.pd); 287 struct rxe_mw *mw; 288 int index = rkey >> 8; 289 290 mw = rxe_pool_get_index(&rxe->mw_pool, index); 291 if (!mw) 292 return NULL; 293 294 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || 295 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || 296 (mw->length == 0) || ((access & mw->access) != access) || 297 mw->state != RXE_MW_STATE_VALID)) { 298 rxe_put(mw); 299 return NULL; 300 } 301 302 return mw; 303 } 304 305 void rxe_mw_cleanup(struct rxe_pool_elem *elem) 306 { 307 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); 308 struct rxe_pd *pd = to_rpd(mw->ibmw.pd); 309 310 rxe_put(pd); 311 312 if (mw->mr) { 313 struct rxe_mr *mr = mw->mr; 314 315 mw->mr = NULL; 316 atomic_dec(&mr->num_mw); 317 rxe_put(mr); 318 } 319 320 if (mw->qp) { 321 struct rxe_qp *qp = mw->qp; 322 323 mw->qp = NULL; 324 rxe_put(qp); 325 } 326 327 mw->access = 0; 328 mw->addr = 0; 329 mw->length = 0; 330 mw->state = RXE_MW_STATE_INVALID; 331 } 332