1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/mlx5/qp.h> 35 #include <linux/mlx5/srq.h> 36 #include <linux/slab.h> 37 #include <rdma/ib_umem.h> 38 #include <rdma/ib_user_verbs.h> 39 40 #include "mlx5_ib.h" 41 #include "user.h" 42 43 /* not supported currently */ 44 static int srq_signature; 45 46 static void *get_wqe(struct mlx5_ib_srq *srq, int n) 47 { 48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); 49 } 50 51 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) 52 { 53 struct ib_event event; 54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; 55 56 if (ibsrq->event_handler) { 57 event.device = ibsrq->device; 58 event.element.srq = ibsrq; 59 switch (type) { 60 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 61 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 62 break; 63 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 64 event.event = IB_EVENT_SRQ_ERR; 65 break; 66 default: 67 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", 68 type, srq->srqn); 69 return; 70 } 71 72 ibsrq->event_handler(&event, ibsrq->srq_context); 73 } 74 } 75 76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 77 struct mlx5_create_srq_mbox_in **in, 78 struct ib_udata *udata, int buf_size, int *inlen) 79 { 80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 81 struct mlx5_ib_create_srq ucmd; 82 size_t ucmdlen; 83 int err; 84 int npages; 85 int page_shift; 86 int ncont; 87 u32 offset; 88 89 ucmdlen = 90 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < 91 sizeof(ucmd)) ? (sizeof(ucmd) - 92 sizeof(ucmd.reserved)) : sizeof(ucmd); 93 94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 95 mlx5_ib_dbg(dev, "failed copy udata\n"); 96 return -EFAULT; 97 } 98 99 if (ucmdlen == sizeof(ucmd) && 100 ucmd.reserved != 0) 101 return -EINVAL; 102 103 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 104 105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 106 0, 0); 107 if (IS_ERR(srq->umem)) { 108 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); 109 err = PTR_ERR(srq->umem); 110 return err; 111 } 112 113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, 114 &page_shift, &ncont, NULL); 115 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, 116 &offset); 117 if (err) { 118 mlx5_ib_warn(dev, "bad offset\n"); 119 goto err_umem; 120 } 121 122 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; 123 *in = mlx5_vzalloc(*inlen); 124 if (!(*in)) { 125 err = -ENOMEM; 126 goto err_umem; 127 } 128 129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); 130 131 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), 132 ucmd.db_addr, &srq->db); 133 if (err) { 134 mlx5_ib_dbg(dev, "map doorbell failed\n"); 135 goto err_in; 136 } 137 138 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 139 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 140 141 return 0; 142 143 err_in: 144 kvfree(*in); 145 146 err_umem: 147 ib_umem_release(srq->umem); 148 149 return err; 150 } 151 152 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 153 struct mlx5_create_srq_mbox_in **in, int buf_size, 154 int *inlen) 155 { 156 int err; 157 int i; 158 struct mlx5_wqe_srq_next_seg *next; 159 int page_shift; 160 int npages; 161 162 err = mlx5_db_alloc(dev->mdev, &srq->db); 163 if (err) { 164 mlx5_ib_warn(dev, "alloc dbell rec failed\n"); 165 return err; 166 } 167 168 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { 169 mlx5_ib_dbg(dev, "buf alloc failed\n"); 170 err = -ENOMEM; 171 goto err_db; 172 } 173 page_shift = srq->buf.page_shift; 174 175 srq->head = 0; 176 srq->tail = srq->msrq.max - 1; 177 srq->wqe_ctr = 0; 178 179 for (i = 0; i < srq->msrq.max; i++) { 180 next = get_wqe(srq, i); 181 next->next_wqe_index = 182 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); 183 } 184 185 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); 186 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", 187 buf_size, page_shift, srq->buf.npages, npages); 188 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; 189 *in = mlx5_vzalloc(*inlen); 190 if (!*in) { 191 err = -ENOMEM; 192 goto err_buf; 193 } 194 mlx5_fill_page_array(&srq->buf, (*in)->pas); 195 196 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); 197 if (!srq->wrid) { 198 mlx5_ib_dbg(dev, "kmalloc failed %lu\n", 199 (unsigned long)(srq->msrq.max * sizeof(u64))); 200 err = -ENOMEM; 201 goto err_in; 202 } 203 srq->wq_sig = !!srq_signature; 204 205 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 206 207 return 0; 208 209 err_in: 210 kvfree(*in); 211 212 err_buf: 213 mlx5_buf_free(dev->mdev, &srq->buf); 214 215 err_db: 216 mlx5_db_free(dev->mdev, &srq->db); 217 return err; 218 } 219 220 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) 221 { 222 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 223 ib_umem_release(srq->umem); 224 } 225 226 227 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) 228 { 229 kfree(srq->wrid); 230 mlx5_buf_free(dev->mdev, &srq->buf); 231 mlx5_db_free(dev->mdev, &srq->db); 232 } 233 234 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, 235 struct ib_srq_init_attr *init_attr, 236 struct ib_udata *udata) 237 { 238 struct mlx5_ib_dev *dev = to_mdev(pd->device); 239 struct mlx5_ib_srq *srq; 240 int desc_size; 241 int buf_size; 242 int err; 243 struct mlx5_create_srq_mbox_in *uninitialized_var(in); 244 int uninitialized_var(inlen); 245 int is_xrc; 246 u32 flgs, xrcdn; 247 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 248 249 /* Sanity check SRQ size before proceeding */ 250 if (init_attr->attr.max_wr >= max_srq_wqes) { 251 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 252 init_attr->attr.max_wr, 253 max_srq_wqes); 254 return ERR_PTR(-EINVAL); 255 } 256 257 srq = kmalloc(sizeof(*srq), GFP_KERNEL); 258 if (!srq) 259 return ERR_PTR(-ENOMEM); 260 261 mutex_init(&srq->mutex); 262 spin_lock_init(&srq->lock); 263 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); 264 srq->msrq.max_gs = init_attr->attr.max_sge; 265 266 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 desc_size = roundup_pow_of_two(desc_size); 269 desc_size = max_t(int, 32, desc_size); 270 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 271 sizeof(struct mlx5_wqe_data_seg); 272 srq->msrq.wqe_shift = ilog2(desc_size); 273 buf_size = srq->msrq.max * desc_size; 274 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", 275 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 276 srq->msrq.max_avail_gather); 277 278 if (pd->uobject) 279 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); 280 else 281 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); 282 283 if (err) { 284 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 285 pd->uobject ? "user" : "kernel", err); 286 goto err_srq; 287 } 288 289 is_xrc = (init_attr->srq_type == IB_SRQT_XRC); 290 in->ctx.state_log_sz = ilog2(srq->msrq.max); 291 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; 292 xrcdn = 0; 293 if (is_xrc) { 294 xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; 295 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); 296 } else if (init_attr->srq_type == IB_SRQT_BASIC) { 297 xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; 298 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); 299 } 300 301 in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); 302 303 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); 304 in->ctx.db_record = cpu_to_be64(srq->db.dma); 305 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc); 306 kvfree(in); 307 if (err) { 308 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 309 goto err_usr_kern_srq; 310 } 311 312 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 313 314 srq->msrq.event = mlx5_ib_srq_event; 315 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; 316 317 if (pd->uobject) 318 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { 319 mlx5_ib_dbg(dev, "copy to user failed\n"); 320 err = -EFAULT; 321 goto err_core; 322 } 323 324 init_attr->attr.max_wr = srq->msrq.max - 1; 325 326 return &srq->ibsrq; 327 328 err_core: 329 mlx5_core_destroy_srq(dev->mdev, &srq->msrq); 330 331 err_usr_kern_srq: 332 if (pd->uobject) 333 destroy_srq_user(pd, srq); 334 else 335 destroy_srq_kernel(dev, srq); 336 337 err_srq: 338 kfree(srq); 339 340 return ERR_PTR(err); 341 } 342 343 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 344 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 345 { 346 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 347 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 348 int ret; 349 350 /* We don't support resizing SRQs yet */ 351 if (attr_mask & IB_SRQ_MAX_WR) 352 return -EINVAL; 353 354 if (attr_mask & IB_SRQ_LIMIT) { 355 if (attr->srq_limit >= srq->msrq.max) 356 return -EINVAL; 357 358 mutex_lock(&srq->mutex); 359 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); 360 mutex_unlock(&srq->mutex); 361 362 if (ret) 363 return ret; 364 } 365 366 return 0; 367 } 368 369 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 370 { 371 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 372 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 373 int ret; 374 struct mlx5_query_srq_mbox_out *out; 375 376 out = kzalloc(sizeof(*out), GFP_KERNEL); 377 if (!out) 378 return -ENOMEM; 379 380 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); 381 if (ret) 382 goto out_box; 383 384 srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); 385 srq_attr->max_wr = srq->msrq.max - 1; 386 srq_attr->max_sge = srq->msrq.max_gs; 387 388 out_box: 389 kfree(out); 390 return ret; 391 } 392 393 int mlx5_ib_destroy_srq(struct ib_srq *srq) 394 { 395 struct mlx5_ib_dev *dev = to_mdev(srq->device); 396 struct mlx5_ib_srq *msrq = to_msrq(srq); 397 398 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); 399 400 if (srq->uobject) { 401 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 402 ib_umem_release(msrq->umem); 403 } else { 404 destroy_srq_kernel(dev, msrq); 405 } 406 407 kfree(srq); 408 return 0; 409 } 410 411 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) 412 { 413 struct mlx5_wqe_srq_next_seg *next; 414 415 /* always called with interrupts disabled. */ 416 spin_lock(&srq->lock); 417 418 next = get_wqe(srq, srq->tail); 419 next->next_wqe_index = cpu_to_be16(wqe_index); 420 srq->tail = wqe_index; 421 422 spin_unlock(&srq->lock); 423 } 424 425 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 426 struct ib_recv_wr **bad_wr) 427 { 428 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 429 struct mlx5_wqe_srq_next_seg *next; 430 struct mlx5_wqe_data_seg *scat; 431 unsigned long flags; 432 int err = 0; 433 int nreq; 434 int i; 435 436 spin_lock_irqsave(&srq->lock, flags); 437 438 for (nreq = 0; wr; nreq++, wr = wr->next) { 439 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { 440 err = -EINVAL; 441 *bad_wr = wr; 442 break; 443 } 444 445 if (unlikely(srq->head == srq->tail)) { 446 err = -ENOMEM; 447 *bad_wr = wr; 448 break; 449 } 450 451 srq->wrid[srq->head] = wr->wr_id; 452 453 next = get_wqe(srq, srq->head); 454 srq->head = be16_to_cpu(next->next_wqe_index); 455 scat = (struct mlx5_wqe_data_seg *)(next + 1); 456 457 for (i = 0; i < wr->num_sge; i++) { 458 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); 459 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); 460 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); 461 } 462 463 if (i < srq->msrq.max_avail_gather) { 464 scat[i].byte_count = 0; 465 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 466 scat[i].addr = 0; 467 } 468 } 469 470 if (likely(nreq)) { 471 srq->wqe_ctr += nreq; 472 473 /* Make sure that descriptors are written before 474 * doorbell record. 475 */ 476 wmb(); 477 478 *srq->db.db = cpu_to_be32(srq->wqe_ctr); 479 } 480 481 spin_unlock_irqrestore(&srq->lock, flags); 482 483 return err; 484 } 485