1 /* 2 * Copyright(c) 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/err.h> 49 #include <linux/slab.h> 50 #include <linux/vmalloc.h> 51 #include <rdma/uverbs_ioctl.h> 52 53 #include "srq.h" 54 #include "vt.h" 55 56 /** 57 * rvt_driver_srq_init - init srq resources on a per driver basis 58 * @rdi: rvt dev structure 59 * 60 * Do any initialization needed when a driver registers with rdmavt. 61 */ 62 void rvt_driver_srq_init(struct rvt_dev_info *rdi) 63 { 64 spin_lock_init(&rdi->n_srqs_lock); 65 rdi->n_srqs_allocated = 0; 66 } 67 68 /** 69 * rvt_create_srq - create a shared receive queue 70 * @ibpd: the protection domain of the SRQ to create 71 * @srq_init_attr: the attributes of the SRQ 72 * @udata: data from libibverbs when creating a user SRQ 73 * 74 * Return: Allocated srq object 75 */ 76 struct ib_srq *rvt_create_srq(struct ib_pd *ibpd, 77 struct ib_srq_init_attr *srq_init_attr, 78 struct ib_udata *udata) 79 { 80 struct rvt_dev_info *dev = ib_to_rvt(ibpd->device); 81 struct rvt_ucontext *ucontext = rdma_udata_to_drv_context( 82 udata, struct rvt_ucontext, ibucontext); 83 struct rvt_srq *srq; 84 u32 sz; 85 struct ib_srq *ret; 86 87 if (srq_init_attr->srq_type != IB_SRQT_BASIC) 88 return ERR_PTR(-EOPNOTSUPP); 89 90 if (srq_init_attr->attr.max_sge == 0 || 91 srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge || 92 srq_init_attr->attr.max_wr == 0 || 93 srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr) 94 return ERR_PTR(-EINVAL); 95 96 srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node); 97 if (!srq) 98 return ERR_PTR(-ENOMEM); 99 100 /* 101 * Need to use vmalloc() if we want to support large #s of entries. 102 */ 103 srq->rq.size = srq_init_attr->attr.max_wr + 1; 104 srq->rq.max_sge = srq_init_attr->attr.max_sge; 105 sz = sizeof(struct ib_sge) * srq->rq.max_sge + 106 sizeof(struct rvt_rwqe); 107 srq->rq.wq = udata ? 108 vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) : 109 vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz, 110 dev->dparms.node); 111 if (!srq->rq.wq) { 112 ret = ERR_PTR(-ENOMEM); 113 goto bail_srq; 114 } 115 116 /* 117 * Return the address of the RWQ as the offset to mmap. 118 * See rvt_mmap() for details. 119 */ 120 if (udata && udata->outlen >= sizeof(__u64)) { 121 int err; 122 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; 123 124 srq->ip = 125 rvt_create_mmap_info(dev, s, &ucontext->ibucontext, 126 srq->rq.wq); 127 if (!srq->ip) { 128 ret = ERR_PTR(-ENOMEM); 129 goto bail_wq; 130 } 131 132 err = ib_copy_to_udata(udata, &srq->ip->offset, 133 sizeof(srq->ip->offset)); 134 if (err) { 135 ret = ERR_PTR(err); 136 goto bail_ip; 137 } 138 } 139 140 /* 141 * ib_create_srq() will initialize srq->ibsrq. 142 */ 143 spin_lock_init(&srq->rq.lock); 144 srq->limit = srq_init_attr->attr.srq_limit; 145 146 spin_lock(&dev->n_srqs_lock); 147 if (dev->n_srqs_allocated == dev->dparms.props.max_srq) { 148 spin_unlock(&dev->n_srqs_lock); 149 ret = ERR_PTR(-ENOMEM); 150 goto bail_ip; 151 } 152 153 dev->n_srqs_allocated++; 154 spin_unlock(&dev->n_srqs_lock); 155 156 if (srq->ip) { 157 spin_lock_irq(&dev->pending_lock); 158 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); 159 spin_unlock_irq(&dev->pending_lock); 160 } 161 162 return &srq->ibsrq; 163 164 bail_ip: 165 kfree(srq->ip); 166 bail_wq: 167 vfree(srq->rq.wq); 168 bail_srq: 169 kfree(srq); 170 return ret; 171 } 172 173 /** 174 * rvt_modify_srq - modify a shared receive queue 175 * @ibsrq: the SRQ to modify 176 * @attr: the new attributes of the SRQ 177 * @attr_mask: indicates which attributes to modify 178 * @udata: user data for libibverbs.so 179 * 180 * Return: 0 on success 181 */ 182 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 183 enum ib_srq_attr_mask attr_mask, 184 struct ib_udata *udata) 185 { 186 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 187 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); 188 struct rvt_rwq *wq; 189 int ret = 0; 190 191 if (attr_mask & IB_SRQ_MAX_WR) { 192 struct rvt_rwq *owq; 193 struct rvt_rwqe *p; 194 u32 sz, size, n, head, tail; 195 196 /* Check that the requested sizes are below the limits. */ 197 if ((attr->max_wr > dev->dparms.props.max_srq_wr) || 198 ((attr_mask & IB_SRQ_LIMIT) ? 199 attr->srq_limit : srq->limit) > attr->max_wr) 200 return -EINVAL; 201 202 sz = sizeof(struct rvt_rwqe) + 203 srq->rq.max_sge * sizeof(struct ib_sge); 204 size = attr->max_wr + 1; 205 wq = udata ? 206 vmalloc_user(sizeof(struct rvt_rwq) + size * sz) : 207 vzalloc_node(sizeof(struct rvt_rwq) + size * sz, 208 dev->dparms.node); 209 if (!wq) 210 return -ENOMEM; 211 212 /* Check that we can write the offset to mmap. */ 213 if (udata && udata->inlen >= sizeof(__u64)) { 214 __u64 offset_addr; 215 __u64 offset = 0; 216 217 ret = ib_copy_from_udata(&offset_addr, udata, 218 sizeof(offset_addr)); 219 if (ret) 220 goto bail_free; 221 udata->outbuf = (void __user *) 222 (unsigned long)offset_addr; 223 ret = ib_copy_to_udata(udata, &offset, 224 sizeof(offset)); 225 if (ret) 226 goto bail_free; 227 } 228 229 spin_lock_irq(&srq->rq.lock); 230 /* 231 * validate head and tail pointer values and compute 232 * the number of remaining WQEs. 233 */ 234 owq = srq->rq.wq; 235 head = owq->head; 236 tail = owq->tail; 237 if (head >= srq->rq.size || tail >= srq->rq.size) { 238 ret = -EINVAL; 239 goto bail_unlock; 240 } 241 n = head; 242 if (n < tail) 243 n += srq->rq.size - tail; 244 else 245 n -= tail; 246 if (size <= n) { 247 ret = -EINVAL; 248 goto bail_unlock; 249 } 250 n = 0; 251 p = wq->wq; 252 while (tail != head) { 253 struct rvt_rwqe *wqe; 254 int i; 255 256 wqe = rvt_get_rwqe_ptr(&srq->rq, tail); 257 p->wr_id = wqe->wr_id; 258 p->num_sge = wqe->num_sge; 259 for (i = 0; i < wqe->num_sge; i++) 260 p->sg_list[i] = wqe->sg_list[i]; 261 n++; 262 p = (struct rvt_rwqe *)((char *)p + sz); 263 if (++tail >= srq->rq.size) 264 tail = 0; 265 } 266 srq->rq.wq = wq; 267 srq->rq.size = size; 268 wq->head = n; 269 wq->tail = 0; 270 if (attr_mask & IB_SRQ_LIMIT) 271 srq->limit = attr->srq_limit; 272 spin_unlock_irq(&srq->rq.lock); 273 274 vfree(owq); 275 276 if (srq->ip) { 277 struct rvt_mmap_info *ip = srq->ip; 278 struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device); 279 u32 s = sizeof(struct rvt_rwq) + size * sz; 280 281 rvt_update_mmap_info(dev, ip, s, wq); 282 283 /* 284 * Return the offset to mmap. 285 * See rvt_mmap() for details. 286 */ 287 if (udata && udata->inlen >= sizeof(__u64)) { 288 ret = ib_copy_to_udata(udata, &ip->offset, 289 sizeof(ip->offset)); 290 if (ret) 291 return ret; 292 } 293 294 /* 295 * Put user mapping info onto the pending list 296 * unless it already is on the list. 297 */ 298 spin_lock_irq(&dev->pending_lock); 299 if (list_empty(&ip->pending_mmaps)) 300 list_add(&ip->pending_mmaps, 301 &dev->pending_mmaps); 302 spin_unlock_irq(&dev->pending_lock); 303 } 304 } else if (attr_mask & IB_SRQ_LIMIT) { 305 spin_lock_irq(&srq->rq.lock); 306 if (attr->srq_limit >= srq->rq.size) 307 ret = -EINVAL; 308 else 309 srq->limit = attr->srq_limit; 310 spin_unlock_irq(&srq->rq.lock); 311 } 312 return ret; 313 314 bail_unlock: 315 spin_unlock_irq(&srq->rq.lock); 316 bail_free: 317 vfree(wq); 318 return ret; 319 } 320 321 /** rvt_query_srq - query srq data 322 * @ibsrq: srq to query 323 * @attr: return info in attr 324 * 325 * Return: always 0 326 */ 327 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 328 { 329 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 330 331 attr->max_wr = srq->rq.size - 1; 332 attr->max_sge = srq->rq.max_sge; 333 attr->srq_limit = srq->limit; 334 return 0; 335 } 336 337 /** 338 * rvt_destroy_srq - destory an srq 339 * @ibsrq: srq object to destroy 340 * 341 * Return always 0 342 */ 343 int rvt_destroy_srq(struct ib_srq *ibsrq) 344 { 345 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); 346 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device); 347 348 spin_lock(&dev->n_srqs_lock); 349 dev->n_srqs_allocated--; 350 spin_unlock(&dev->n_srqs_lock); 351 if (srq->ip) 352 kref_put(&srq->ip->ref, rvt_release_mmap_info); 353 else 354 vfree(srq->rq.wq); 355 kfree(srq); 356 357 return 0; 358 } 359