1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved. 4 */ 5 6 #include "rxe.h" 7 8 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 9 { 10 struct rxe_mw *mw = to_rmw(ibmw); 11 struct rxe_pd *pd = to_rpd(ibmw->pd); 12 struct rxe_dev *rxe = to_rdev(ibmw->device); 13 int ret; 14 15 rxe_add_ref(pd); 16 17 ret = rxe_add_to_pool(&rxe->mw_pool, mw); 18 if (ret) { 19 rxe_drop_ref(pd); 20 return ret; 21 } 22 23 rxe_add_index(mw); 24 ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); 25 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? 26 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; 27 spin_lock_init(&mw->lock); 28 29 return 0; 30 } 31 32 static void rxe_do_dealloc_mw(struct rxe_mw *mw) 33 { 34 if (mw->mr) { 35 struct rxe_mr *mr = mw->mr; 36 37 mw->mr = NULL; 38 atomic_dec(&mr->num_mw); 39 rxe_drop_ref(mr); 40 } 41 42 if (mw->qp) { 43 struct rxe_qp *qp = mw->qp; 44 45 mw->qp = NULL; 46 rxe_drop_ref(qp); 47 } 48 49 mw->access = 0; 50 mw->addr = 0; 51 mw->length = 0; 52 mw->state = RXE_MW_STATE_INVALID; 53 } 54 55 int rxe_dealloc_mw(struct ib_mw *ibmw) 56 { 57 struct rxe_mw *mw = to_rmw(ibmw); 58 struct rxe_pd *pd = to_rpd(ibmw->pd); 59 unsigned long flags; 60 61 spin_lock_irqsave(&mw->lock, flags); 62 rxe_do_dealloc_mw(mw); 63 spin_unlock_irqrestore(&mw->lock, flags); 64 65 rxe_drop_ref(mw); 66 rxe_drop_ref(pd); 67 68 return 0; 69 } 70 71 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 72 struct rxe_mw *mw, struct rxe_mr *mr) 73 { 74 if (mw->ibmw.type == IB_MW_TYPE_1) { 75 if (unlikely(mw->state != RXE_MW_STATE_VALID)) { 76 pr_err_once( 77 "attempt to bind a type 1 MW not in the valid state\n"); 78 return -EINVAL; 79 } 80 81 /* o10-36.2.2 */ 82 if (unlikely((mw->access & IB_ZERO_BASED))) { 83 pr_err_once("attempt to bind a zero based type 1 MW\n"); 84 return -EINVAL; 85 } 86 } 87 88 if (mw->ibmw.type == IB_MW_TYPE_2) { 89 /* o10-37.2.30 */ 90 if (unlikely(mw->state != RXE_MW_STATE_FREE)) { 91 pr_err_once( 92 "attempt to bind a type 2 MW not in the free state\n"); 93 return -EINVAL; 94 } 95 96 /* C10-72 */ 97 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) { 98 pr_err_once( 99 "attempt to bind type 2 MW with qp with different PD\n"); 100 return -EINVAL; 101 } 102 103 /* o10-37.2.40 */ 104 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { 105 pr_err_once( 106 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); 107 return -EINVAL; 108 } 109 } 110 111 if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) { 112 pr_err_once("attempt to bind MW with same key\n"); 113 return -EINVAL; 114 } 115 116 /* remaining checks only apply to a nonzero MR */ 117 if (!mr) 118 return 0; 119 120 if (unlikely(mr->access & IB_ZERO_BASED)) { 121 pr_err_once("attempt to bind MW to zero based MR\n"); 122 return -EINVAL; 123 } 124 125 /* C10-73 */ 126 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { 127 pr_err_once( 128 "attempt to bind an MW to an MR without bind access\n"); 129 return -EINVAL; 130 } 131 132 /* C10-74 */ 133 if (unlikely((mw->access & 134 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) && 135 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { 136 pr_err_once( 137 "attempt to bind an writeable MW to an MR without local write access\n"); 138 return -EINVAL; 139 } 140 141 /* C10-75 */ 142 if (mw->access & IB_ZERO_BASED) { 143 if (unlikely(wqe->wr.wr.mw.length > mr->length)) { 144 pr_err_once( 145 "attempt to bind a ZB MW outside of the MR\n"); 146 return -EINVAL; 147 } 148 } else { 149 if (unlikely((wqe->wr.wr.mw.addr < mr->iova) || 150 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > 151 (mr->iova + mr->length)))) { 152 pr_err_once( 153 "attempt to bind a VA MW outside of the MR\n"); 154 return -EINVAL; 155 } 156 } 157 158 return 0; 159 } 160 161 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 162 struct rxe_mw *mw, struct rxe_mr *mr) 163 { 164 u32 rkey; 165 u32 new_rkey; 166 167 rkey = mw->ibmw.rkey; 168 new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff); 169 170 mw->ibmw.rkey = new_rkey; 171 mw->access = wqe->wr.wr.mw.access; 172 mw->state = RXE_MW_STATE_VALID; 173 mw->addr = wqe->wr.wr.mw.addr; 174 mw->length = wqe->wr.wr.mw.length; 175 176 if (mw->mr) { 177 rxe_drop_ref(mw->mr); 178 atomic_dec(&mw->mr->num_mw); 179 mw->mr = NULL; 180 } 181 182 if (mw->length) { 183 mw->mr = mr; 184 atomic_inc(&mr->num_mw); 185 rxe_add_ref(mr); 186 } 187 188 if (mw->ibmw.type == IB_MW_TYPE_2) { 189 rxe_add_ref(qp); 190 mw->qp = qp; 191 } 192 } 193 194 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) 195 { 196 int ret; 197 struct rxe_mw *mw; 198 struct rxe_mr *mr; 199 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 200 unsigned long flags; 201 202 mw = rxe_pool_get_index(&rxe->mw_pool, 203 wqe->wr.wr.mw.mw_rkey >> 8); 204 if (unlikely(!mw)) { 205 ret = -EINVAL; 206 goto err; 207 } 208 209 if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) { 210 ret = -EINVAL; 211 goto err_drop_mw; 212 } 213 214 if (likely(wqe->wr.wr.mw.length)) { 215 mr = rxe_pool_get_index(&rxe->mr_pool, 216 wqe->wr.wr.mw.mr_lkey >> 8); 217 if (unlikely(!mr)) { 218 ret = -EINVAL; 219 goto err_drop_mw; 220 } 221 222 if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) { 223 ret = -EINVAL; 224 goto err_drop_mr; 225 } 226 } else { 227 mr = NULL; 228 } 229 230 spin_lock_irqsave(&mw->lock, flags); 231 232 ret = rxe_check_bind_mw(qp, wqe, mw, mr); 233 if (ret) 234 goto err_unlock; 235 236 rxe_do_bind_mw(qp, wqe, mw, mr); 237 err_unlock: 238 spin_unlock_irqrestore(&mw->lock, flags); 239 err_drop_mr: 240 if (mr) 241 rxe_drop_ref(mr); 242 err_drop_mw: 243 rxe_drop_ref(mw); 244 err: 245 return ret; 246 } 247 248 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw) 249 { 250 if (unlikely(mw->state == RXE_MW_STATE_INVALID)) 251 return -EINVAL; 252 253 /* o10-37.2.26 */ 254 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1)) 255 return -EINVAL; 256 257 return 0; 258 } 259 260 static void rxe_do_invalidate_mw(struct rxe_mw *mw) 261 { 262 struct rxe_qp *qp; 263 struct rxe_mr *mr; 264 265 /* valid type 2 MW will always have a QP pointer */ 266 qp = mw->qp; 267 mw->qp = NULL; 268 rxe_drop_ref(qp); 269 270 /* valid type 2 MW will always have an MR pointer */ 271 mr = mw->mr; 272 mw->mr = NULL; 273 atomic_dec(&mr->num_mw); 274 rxe_drop_ref(mr); 275 276 mw->access = 0; 277 mw->addr = 0; 278 mw->length = 0; 279 mw->state = RXE_MW_STATE_FREE; 280 } 281 282 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) 283 { 284 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 285 unsigned long flags; 286 struct rxe_mw *mw; 287 int ret; 288 289 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); 290 if (!mw) { 291 ret = -EINVAL; 292 goto err; 293 } 294 295 if (rkey != mw->ibmw.rkey) { 296 ret = -EINVAL; 297 goto err_drop_ref; 298 } 299 300 spin_lock_irqsave(&mw->lock, flags); 301 302 ret = rxe_check_invalidate_mw(qp, mw); 303 if (ret) 304 goto err_unlock; 305 306 rxe_do_invalidate_mw(mw); 307 err_unlock: 308 spin_unlock_irqrestore(&mw->lock, flags); 309 err_drop_ref: 310 rxe_drop_ref(mw); 311 err: 312 return ret; 313 } 314 315 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) 316 { 317 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 318 struct rxe_pd *pd = to_rpd(qp->ibqp.pd); 319 struct rxe_mw *mw; 320 int index = rkey >> 8; 321 322 mw = rxe_pool_get_index(&rxe->mw_pool, index); 323 if (!mw) 324 return NULL; 325 326 if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd || 327 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || 328 (mw->length == 0) || 329 (access && !(access & mw->access)) || 330 mw->state != RXE_MW_STATE_VALID)) { 331 rxe_drop_ref(mw); 332 return NULL; 333 } 334 335 return mw; 336 } 337 338 void rxe_mw_cleanup(struct rxe_pool_entry *elem) 339 { 340 struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); 341 342 rxe_drop_index(mw); 343 } 344