1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/mlx5/qp.h> 35 #include <linux/mlx5/srq.h> 36 #include <linux/slab.h> 37 #include <rdma/ib_umem.h> 38 #include <rdma/ib_user_verbs.h> 39 40 #include "mlx5_ib.h" 41 42 /* not supported currently */ 43 static int srq_signature; 44 45 static void *get_wqe(struct mlx5_ib_srq *srq, int n) 46 { 47 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); 48 } 49 50 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) 51 { 52 struct ib_event event; 53 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; 54 55 if (ibsrq->event_handler) { 56 event.device = ibsrq->device; 57 event.element.srq = ibsrq; 58 switch (type) { 59 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 60 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 61 break; 62 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 63 event.event = IB_EVENT_SRQ_ERR; 64 break; 65 default: 66 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", 67 type, srq->srqn); 68 return; 69 } 70 71 ibsrq->event_handler(&event, ibsrq->srq_context); 72 } 73 } 74 75 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 76 struct mlx5_srq_attr *in, 77 struct ib_udata *udata, int buf_size) 78 { 79 struct mlx5_ib_dev *dev = to_mdev(pd->device); 80 struct mlx5_ib_create_srq ucmd = {}; 81 size_t ucmdlen; 82 int err; 83 int npages; 84 int page_shift; 85 int ncont; 86 u32 offset; 87 u32 uidx = MLX5_IB_DEFAULT_UIDX; 88 89 ucmdlen = min(udata->inlen, sizeof(ucmd)); 90 91 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 92 mlx5_ib_dbg(dev, "failed copy udata\n"); 93 return -EFAULT; 94 } 95 96 if (ucmd.reserved0 || ucmd.reserved1) 97 return -EINVAL; 98 99 if (udata->inlen > sizeof(ucmd) && 100 !ib_is_udata_cleared(udata, sizeof(ucmd), 101 udata->inlen - sizeof(ucmd))) 102 return -EINVAL; 103 104 if (in->type != IB_SRQT_BASIC) { 105 err = get_srq_user_index(to_mucontext(pd->uobject->context), 106 &ucmd, udata->inlen, &uidx); 107 if (err) 108 return err; 109 } 110 111 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 112 113 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 114 0, 0); 115 if (IS_ERR(srq->umem)) { 116 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); 117 err = PTR_ERR(srq->umem); 118 return err; 119 } 120 121 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages, 122 &page_shift, &ncont, NULL); 123 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, 124 &offset); 125 if (err) { 126 mlx5_ib_warn(dev, "bad offset\n"); 127 goto err_umem; 128 } 129 130 in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL); 131 if (!in->pas) { 132 err = -ENOMEM; 133 goto err_umem; 134 } 135 136 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); 137 138 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), 139 ucmd.db_addr, &srq->db); 140 if (err) { 141 mlx5_ib_dbg(dev, "map doorbell failed\n"); 142 goto err_in; 143 } 144 145 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 146 in->page_offset = offset; 147 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 148 in->type != IB_SRQT_BASIC) 149 in->user_index = uidx; 150 151 return 0; 152 153 err_in: 154 kvfree(in->pas); 155 156 err_umem: 157 ib_umem_release(srq->umem); 158 159 return err; 160 } 161 162 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 163 struct mlx5_srq_attr *in, int buf_size) 164 { 165 int err; 166 int i; 167 struct mlx5_wqe_srq_next_seg *next; 168 169 err = mlx5_db_alloc(dev->mdev, &srq->db); 170 if (err) { 171 mlx5_ib_warn(dev, "alloc dbell rec failed\n"); 172 return err; 173 } 174 175 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { 176 mlx5_ib_dbg(dev, "buf alloc failed\n"); 177 err = -ENOMEM; 178 goto err_db; 179 } 180 181 srq->head = 0; 182 srq->tail = srq->msrq.max - 1; 183 srq->wqe_ctr = 0; 184 185 for (i = 0; i < srq->msrq.max; i++) { 186 next = get_wqe(srq, i); 187 next->next_wqe_index = 188 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); 189 } 190 191 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); 192 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL); 193 if (!in->pas) { 194 err = -ENOMEM; 195 goto err_buf; 196 } 197 mlx5_fill_page_array(&srq->buf, in->pas); 198 199 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); 200 if (!srq->wrid) { 201 err = -ENOMEM; 202 goto err_in; 203 } 204 srq->wq_sig = !!srq_signature; 205 206 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 207 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && 208 in->type != IB_SRQT_BASIC) 209 in->user_index = MLX5_IB_DEFAULT_UIDX; 210 211 return 0; 212 213 err_in: 214 kvfree(in->pas); 215 216 err_buf: 217 mlx5_buf_free(dev->mdev, &srq->buf); 218 219 err_db: 220 mlx5_db_free(dev->mdev, &srq->db); 221 return err; 222 } 223 224 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) 225 { 226 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 227 ib_umem_release(srq->umem); 228 } 229 230 231 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) 232 { 233 kvfree(srq->wrid); 234 mlx5_buf_free(dev->mdev, &srq->buf); 235 mlx5_db_free(dev->mdev, &srq->db); 236 } 237 238 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, 239 struct ib_srq_init_attr *init_attr, 240 struct ib_udata *udata) 241 { 242 struct mlx5_ib_dev *dev = to_mdev(pd->device); 243 struct mlx5_ib_srq *srq; 244 size_t desc_size; 245 size_t buf_size; 246 int err; 247 struct mlx5_srq_attr in = {0}; 248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 249 250 /* Sanity check SRQ size before proceeding */ 251 if (init_attr->attr.max_wr >= max_srq_wqes) { 252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 253 init_attr->attr.max_wr, 254 max_srq_wqes); 255 return ERR_PTR(-EINVAL); 256 } 257 258 srq = kmalloc(sizeof(*srq), GFP_KERNEL); 259 if (!srq) 260 return ERR_PTR(-ENOMEM); 261 262 mutex_init(&srq->mutex); 263 spin_lock_init(&srq->lock); 264 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); 265 srq->msrq.max_gs = init_attr->attr.max_sge; 266 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 269 if (desc_size == 0 || srq->msrq.max_gs > desc_size) { 270 err = -EINVAL; 271 goto err_srq; 272 } 273 desc_size = roundup_pow_of_two(desc_size); 274 desc_size = max_t(size_t, 32, desc_size); 275 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { 276 err = -EINVAL; 277 goto err_srq; 278 } 279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 280 sizeof(struct mlx5_wqe_data_seg); 281 srq->msrq.wqe_shift = ilog2(desc_size); 282 buf_size = srq->msrq.max * desc_size; 283 if (buf_size < desc_size) { 284 err = -EINVAL; 285 goto err_srq; 286 } 287 in.type = init_attr->srq_type; 288 289 if (pd->uobject) 290 err = create_srq_user(pd, srq, &in, udata, buf_size); 291 else 292 err = create_srq_kernel(dev, srq, &in, buf_size); 293 294 if (err) { 295 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 296 pd->uobject ? "user" : "kernel", err); 297 goto err_srq; 298 } 299 300 in.log_size = ilog2(srq->msrq.max); 301 in.wqe_shift = srq->msrq.wqe_shift - 4; 302 if (srq->wq_sig) 303 in.flags |= MLX5_SRQ_FLAG_WQ_SIG; 304 305 if (init_attr->srq_type == IB_SRQT_XRC) 306 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; 307 else 308 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn; 309 310 if (init_attr->srq_type == IB_SRQT_TM) { 311 in.tm_log_list_size = 312 ilog2(init_attr->ext.tag_matching.max_num_tags) + 1; 313 if (in.tm_log_list_size > 314 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) { 315 mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n"); 316 err = -EINVAL; 317 goto err_usr_kern_srq; 318 } 319 in.flags |= MLX5_SRQ_FLAG_RNDV; 320 } 321 322 if (ib_srq_has_cq(init_attr->srq_type)) 323 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; 324 else 325 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; 326 327 in.pd = to_mpd(pd)->pdn; 328 in.db_record = srq->db.dma; 329 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in); 330 kvfree(in.pas); 331 if (err) { 332 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 333 goto err_usr_kern_srq; 334 } 335 336 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 337 338 srq->msrq.event = mlx5_ib_srq_event; 339 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; 340 341 if (pd->uobject) 342 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { 343 mlx5_ib_dbg(dev, "copy to user failed\n"); 344 err = -EFAULT; 345 goto err_core; 346 } 347 348 init_attr->attr.max_wr = srq->msrq.max - 1; 349 350 return &srq->ibsrq; 351 352 err_core: 353 mlx5_core_destroy_srq(dev->mdev, &srq->msrq); 354 355 err_usr_kern_srq: 356 if (pd->uobject) 357 destroy_srq_user(pd, srq); 358 else 359 destroy_srq_kernel(dev, srq); 360 361 err_srq: 362 kfree(srq); 363 364 return ERR_PTR(err); 365 } 366 367 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 368 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 369 { 370 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 371 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 372 int ret; 373 374 /* We don't support resizing SRQs yet */ 375 if (attr_mask & IB_SRQ_MAX_WR) 376 return -EINVAL; 377 378 if (attr_mask & IB_SRQ_LIMIT) { 379 if (attr->srq_limit >= srq->msrq.max) 380 return -EINVAL; 381 382 mutex_lock(&srq->mutex); 383 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); 384 mutex_unlock(&srq->mutex); 385 386 if (ret) 387 return ret; 388 } 389 390 return 0; 391 } 392 393 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 394 { 395 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 396 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 397 int ret; 398 struct mlx5_srq_attr *out; 399 400 out = kzalloc(sizeof(*out), GFP_KERNEL); 401 if (!out) 402 return -ENOMEM; 403 404 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); 405 if (ret) 406 goto out_box; 407 408 srq_attr->srq_limit = out->lwm; 409 srq_attr->max_wr = srq->msrq.max - 1; 410 srq_attr->max_sge = srq->msrq.max_gs; 411 412 out_box: 413 kfree(out); 414 return ret; 415 } 416 417 int mlx5_ib_destroy_srq(struct ib_srq *srq) 418 { 419 struct mlx5_ib_dev *dev = to_mdev(srq->device); 420 struct mlx5_ib_srq *msrq = to_msrq(srq); 421 422 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); 423 424 if (srq->uobject) { 425 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 426 ib_umem_release(msrq->umem); 427 } else { 428 destroy_srq_kernel(dev, msrq); 429 } 430 431 kfree(srq); 432 return 0; 433 } 434 435 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) 436 { 437 struct mlx5_wqe_srq_next_seg *next; 438 439 /* always called with interrupts disabled. */ 440 spin_lock(&srq->lock); 441 442 next = get_wqe(srq, srq->tail); 443 next->next_wqe_index = cpu_to_be16(wqe_index); 444 srq->tail = wqe_index; 445 446 spin_unlock(&srq->lock); 447 } 448 449 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, 450 const struct ib_recv_wr **bad_wr) 451 { 452 struct mlx5_ib_srq *srq = to_msrq(ibsrq); 453 struct mlx5_wqe_srq_next_seg *next; 454 struct mlx5_wqe_data_seg *scat; 455 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); 456 struct mlx5_core_dev *mdev = dev->mdev; 457 unsigned long flags; 458 int err = 0; 459 int nreq; 460 int i; 461 462 spin_lock_irqsave(&srq->lock, flags); 463 464 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 465 err = -EIO; 466 *bad_wr = wr; 467 goto out; 468 } 469 470 for (nreq = 0; wr; nreq++, wr = wr->next) { 471 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { 472 err = -EINVAL; 473 *bad_wr = wr; 474 break; 475 } 476 477 if (unlikely(srq->head == srq->tail)) { 478 err = -ENOMEM; 479 *bad_wr = wr; 480 break; 481 } 482 483 srq->wrid[srq->head] = wr->wr_id; 484 485 next = get_wqe(srq, srq->head); 486 srq->head = be16_to_cpu(next->next_wqe_index); 487 scat = (struct mlx5_wqe_data_seg *)(next + 1); 488 489 for (i = 0; i < wr->num_sge; i++) { 490 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); 491 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); 492 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); 493 } 494 495 if (i < srq->msrq.max_avail_gather) { 496 scat[i].byte_count = 0; 497 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 498 scat[i].addr = 0; 499 } 500 } 501 502 if (likely(nreq)) { 503 srq->wqe_ctr += nreq; 504 505 /* Make sure that descriptors are written before 506 * doorbell record. 507 */ 508 wmb(); 509 510 *srq->db.db = cpu_to_be32(srq->wqe_ctr); 511 } 512 out: 513 spin_unlock_irqrestore(&srq->lock, flags); 514 515 return err; 516 } 517