1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <rdma/ib_smi.h> 38 #include <rdma/ib_umem.h> 39 #include <rdma/ib_user_verbs.h> 40 #include <rdma/uverbs_ioctl.h> 41 42 #include <linux/sched.h> 43 #include <linux/slab.h> 44 #include <linux/stat.h> 45 #include <linux/mm.h> 46 #include <linux/export.h> 47 48 #include "mthca_dev.h" 49 #include "mthca_cmd.h" 50 #include <rdma/mthca-abi.h> 51 #include "mthca_memfree.h" 52 53 static void init_query_mad(struct ib_smp *mad) 54 { 55 mad->base_version = 1; 56 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 57 mad->class_version = 1; 58 mad->method = IB_MGMT_METHOD_GET; 59 } 60 61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 62 struct ib_udata *uhw) 63 { 64 struct ib_smp *in_mad = NULL; 65 struct ib_smp *out_mad = NULL; 66 int err = -ENOMEM; 67 struct mthca_dev *mdev = to_mdev(ibdev); 68 69 if (uhw->inlen || uhw->outlen) 70 return -EINVAL; 71 72 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 73 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 74 if (!in_mad || !out_mad) 75 goto out; 76 77 memset(props, 0, sizeof *props); 78 79 props->fw_ver = mdev->fw_ver; 80 81 init_query_mad(in_mad); 82 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 83 84 err = mthca_MAD_IFC(mdev, 1, 1, 85 1, NULL, NULL, in_mad, out_mad); 86 if (err) 87 goto out; 88 89 props->device_cap_flags = mdev->device_cap_flags; 90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 91 0xffffff; 92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 95 96 props->max_mr_size = ~0ull; 97 props->page_size_cap = mdev->limits.page_size_cap; 98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; 99 props->max_qp_wr = mdev->limits.max_wqes; 100 props->max_send_sge = mdev->limits.max_sg; 101 props->max_recv_sge = mdev->limits.max_sg; 102 props->max_sge_rd = mdev->limits.max_sg; 103 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; 104 props->max_cqe = mdev->limits.max_cqes; 105 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; 106 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; 107 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; 108 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; 109 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 110 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; 111 props->max_srq_wr = mdev->limits.max_srq_wqes; 112 props->max_srq_sge = mdev->limits.max_srq_sge; 113 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; 114 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? 115 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 116 props->max_pkeys = mdev->limits.pkey_table_len; 117 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; 118 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; 119 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 120 props->max_mcast_grp; 121 122 err = 0; 123 out: 124 kfree(in_mad); 125 kfree(out_mad); 126 return err; 127 } 128 129 static int mthca_query_port(struct ib_device *ibdev, 130 u32 port, struct ib_port_attr *props) 131 { 132 struct ib_smp *in_mad = NULL; 133 struct ib_smp *out_mad = NULL; 134 int err = -ENOMEM; 135 136 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 137 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 138 if (!in_mad || !out_mad) 139 goto out; 140 141 /* props being zeroed by the caller, avoid zeroing it here */ 142 143 init_query_mad(in_mad); 144 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 145 in_mad->attr_mod = cpu_to_be32(port); 146 147 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 148 port, NULL, NULL, in_mad, out_mad); 149 if (err) 150 goto out; 151 152 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 153 props->lmc = out_mad->data[34] & 0x7; 154 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 155 props->sm_sl = out_mad->data[36] & 0xf; 156 props->state = out_mad->data[32] & 0xf; 157 props->phys_state = out_mad->data[33] >> 4; 158 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 159 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 160 props->max_msg_sz = 0x80000000; 161 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 162 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 163 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 164 props->active_width = out_mad->data[31] & 0xf; 165 props->active_speed = out_mad->data[35] >> 4; 166 props->max_mtu = out_mad->data[41] & 0xf; 167 props->active_mtu = out_mad->data[36] >> 4; 168 props->subnet_timeout = out_mad->data[51] & 0x1f; 169 props->max_vl_num = out_mad->data[37] >> 4; 170 props->init_type_reply = out_mad->data[41] >> 4; 171 172 out: 173 kfree(in_mad); 174 kfree(out_mad); 175 return err; 176 } 177 178 static int mthca_modify_device(struct ib_device *ibdev, 179 int mask, 180 struct ib_device_modify *props) 181 { 182 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 183 return -EOPNOTSUPP; 184 185 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 186 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 187 return -ERESTARTSYS; 188 memcpy(ibdev->node_desc, props->node_desc, 189 IB_DEVICE_NODE_DESC_MAX); 190 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 191 } 192 193 return 0; 194 } 195 196 static int mthca_modify_port(struct ib_device *ibdev, 197 u32 port, int port_modify_mask, 198 struct ib_port_modify *props) 199 { 200 struct mthca_set_ib_param set_ib; 201 struct ib_port_attr attr; 202 int err; 203 204 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 205 return -ERESTARTSYS; 206 207 err = ib_query_port(ibdev, port, &attr); 208 if (err) 209 goto out; 210 211 set_ib.set_si_guid = 0; 212 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); 213 214 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 215 ~props->clr_port_cap_mask; 216 217 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); 218 if (err) 219 goto out; 220 out: 221 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 222 return err; 223 } 224 225 static int mthca_query_pkey(struct ib_device *ibdev, 226 u32 port, u16 index, u16 *pkey) 227 { 228 struct ib_smp *in_mad = NULL; 229 struct ib_smp *out_mad = NULL; 230 int err = -ENOMEM; 231 232 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 233 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 234 if (!in_mad || !out_mad) 235 goto out; 236 237 init_query_mad(in_mad); 238 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 239 in_mad->attr_mod = cpu_to_be32(index / 32); 240 241 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 242 port, NULL, NULL, in_mad, out_mad); 243 if (err) 244 goto out; 245 246 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 247 248 out: 249 kfree(in_mad); 250 kfree(out_mad); 251 return err; 252 } 253 254 static int mthca_query_gid(struct ib_device *ibdev, u32 port, 255 int index, union ib_gid *gid) 256 { 257 struct ib_smp *in_mad = NULL; 258 struct ib_smp *out_mad = NULL; 259 int err = -ENOMEM; 260 261 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 262 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 263 if (!in_mad || !out_mad) 264 goto out; 265 266 init_query_mad(in_mad); 267 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 268 in_mad->attr_mod = cpu_to_be32(port); 269 270 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 271 port, NULL, NULL, in_mad, out_mad); 272 if (err) 273 goto out; 274 275 memcpy(gid->raw, out_mad->data + 8, 8); 276 277 init_query_mad(in_mad); 278 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 279 in_mad->attr_mod = cpu_to_be32(index / 8); 280 281 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 282 port, NULL, NULL, in_mad, out_mad); 283 if (err) 284 goto out; 285 286 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 287 288 out: 289 kfree(in_mad); 290 kfree(out_mad); 291 return err; 292 } 293 294 static int mthca_alloc_ucontext(struct ib_ucontext *uctx, 295 struct ib_udata *udata) 296 { 297 struct ib_device *ibdev = uctx->device; 298 struct mthca_alloc_ucontext_resp uresp = {}; 299 struct mthca_ucontext *context = to_mucontext(uctx); 300 int err; 301 302 if (!(to_mdev(ibdev)->active)) 303 return -EAGAIN; 304 305 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 306 if (mthca_is_memfree(to_mdev(ibdev))) 307 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; 308 else 309 uresp.uarc_size = 0; 310 311 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); 312 if (err) 313 return err; 314 315 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); 316 if (IS_ERR(context->db_tab)) { 317 err = PTR_ERR(context->db_tab); 318 mthca_uar_free(to_mdev(ibdev), &context->uar); 319 return err; 320 } 321 322 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { 323 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); 324 mthca_uar_free(to_mdev(ibdev), &context->uar); 325 return -EFAULT; 326 } 327 328 context->reg_mr_warned = 0; 329 330 return 0; 331 } 332 333 static void mthca_dealloc_ucontext(struct ib_ucontext *context) 334 { 335 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, 336 to_mucontext(context)->db_tab); 337 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); 338 } 339 340 static int mthca_mmap_uar(struct ib_ucontext *context, 341 struct vm_area_struct *vma) 342 { 343 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 344 return -EINVAL; 345 346 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 347 348 if (io_remap_pfn_range(vma, vma->vm_start, 349 to_mucontext(context)->uar.pfn, 350 PAGE_SIZE, vma->vm_page_prot)) 351 return -EAGAIN; 352 353 return 0; 354 } 355 356 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 357 { 358 struct ib_device *ibdev = ibpd->device; 359 struct mthca_pd *pd = to_mpd(ibpd); 360 int err; 361 362 err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); 363 if (err) 364 return err; 365 366 if (udata) { 367 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { 368 mthca_pd_free(to_mdev(ibdev), pd); 369 return -EFAULT; 370 } 371 } 372 373 return 0; 374 } 375 376 static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 377 { 378 mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); 379 return 0; 380 } 381 382 static int mthca_ah_create(struct ib_ah *ibah, 383 struct rdma_ah_init_attr *init_attr, 384 struct ib_udata *udata) 385 386 { 387 struct mthca_ah *ah = to_mah(ibah); 388 389 return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), 390 init_attr->ah_attr, ah); 391 } 392 393 static int mthca_ah_destroy(struct ib_ah *ah, u32 flags) 394 { 395 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); 396 return 0; 397 } 398 399 static int mthca_create_srq(struct ib_srq *ibsrq, 400 struct ib_srq_init_attr *init_attr, 401 struct ib_udata *udata) 402 { 403 struct mthca_create_srq ucmd; 404 struct mthca_ucontext *context = rdma_udata_to_drv_context( 405 udata, struct mthca_ucontext, ibucontext); 406 struct mthca_srq *srq = to_msrq(ibsrq); 407 int err; 408 409 if (init_attr->srq_type != IB_SRQT_BASIC) 410 return -EOPNOTSUPP; 411 412 if (udata) { 413 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 414 return -EFAULT; 415 416 err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar, 417 context->db_tab, ucmd.db_index, 418 ucmd.db_page); 419 420 if (err) 421 return err; 422 423 srq->mr.ibmr.lkey = ucmd.lkey; 424 srq->db_index = ucmd.db_index; 425 } 426 427 err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd), 428 &init_attr->attr, srq, udata); 429 430 if (err && udata) 431 mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar, 432 context->db_tab, ucmd.db_index); 433 434 if (err) 435 return err; 436 437 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 438 mthca_free_srq(to_mdev(ibsrq->device), srq); 439 return -EFAULT; 440 } 441 442 return 0; 443 } 444 445 static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) 446 { 447 if (udata) { 448 struct mthca_ucontext *context = 449 rdma_udata_to_drv_context( 450 udata, 451 struct mthca_ucontext, 452 ibucontext); 453 454 mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 455 context->db_tab, to_msrq(srq)->db_index); 456 } 457 458 mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 459 return 0; 460 } 461 462 static int mthca_create_qp(struct ib_qp *ibqp, 463 struct ib_qp_init_attr *init_attr, 464 struct ib_udata *udata) 465 { 466 struct mthca_ucontext *context = rdma_udata_to_drv_context( 467 udata, struct mthca_ucontext, ibucontext); 468 struct mthca_create_qp ucmd; 469 struct mthca_qp *qp = to_mqp(ibqp); 470 struct mthca_dev *dev = to_mdev(ibqp->device); 471 int err; 472 473 if (init_attr->create_flags) 474 return -EOPNOTSUPP; 475 476 switch (init_attr->qp_type) { 477 case IB_QPT_RC: 478 case IB_QPT_UC: 479 case IB_QPT_UD: 480 { 481 if (udata) { 482 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 483 return -EFAULT; 484 485 err = mthca_map_user_db(dev, &context->uar, 486 context->db_tab, 487 ucmd.sq_db_index, 488 ucmd.sq_db_page); 489 if (err) 490 return err; 491 492 err = mthca_map_user_db(dev, &context->uar, 493 context->db_tab, 494 ucmd.rq_db_index, 495 ucmd.rq_db_page); 496 if (err) { 497 mthca_unmap_user_db(dev, &context->uar, 498 context->db_tab, 499 ucmd.sq_db_index); 500 return err; 501 } 502 503 qp->mr.ibmr.lkey = ucmd.lkey; 504 qp->sq.db_index = ucmd.sq_db_index; 505 qp->rq.db_index = ucmd.rq_db_index; 506 } 507 508 err = mthca_alloc_qp(dev, to_mpd(ibqp->pd), 509 to_mcq(init_attr->send_cq), 510 to_mcq(init_attr->recv_cq), 511 init_attr->qp_type, init_attr->sq_sig_type, 512 &init_attr->cap, qp, udata); 513 514 if (err && udata) { 515 mthca_unmap_user_db(dev, &context->uar, context->db_tab, 516 ucmd.sq_db_index); 517 mthca_unmap_user_db(dev, &context->uar, context->db_tab, 518 ucmd.rq_db_index); 519 } 520 521 qp->ibqp.qp_num = qp->qpn; 522 break; 523 } 524 case IB_QPT_SMI: 525 case IB_QPT_GSI: 526 { 527 qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); 528 if (!qp->sqp) 529 return -ENOMEM; 530 531 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 532 533 err = mthca_alloc_sqp(dev, to_mpd(ibqp->pd), 534 to_mcq(init_attr->send_cq), 535 to_mcq(init_attr->recv_cq), 536 init_attr->sq_sig_type, &init_attr->cap, 537 qp->ibqp.qp_num, init_attr->port_num, qp, 538 udata); 539 break; 540 } 541 default: 542 /* Don't support raw QPs */ 543 return -EOPNOTSUPP; 544 } 545 546 if (err) { 547 kfree(qp->sqp); 548 return err; 549 } 550 551 init_attr->cap.max_send_wr = qp->sq.max; 552 init_attr->cap.max_recv_wr = qp->rq.max; 553 init_attr->cap.max_send_sge = qp->sq.max_gs; 554 init_attr->cap.max_recv_sge = qp->rq.max_gs; 555 init_attr->cap.max_inline_data = qp->max_inline_data; 556 557 return 0; 558 } 559 560 static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 561 { 562 if (udata) { 563 struct mthca_ucontext *context = 564 rdma_udata_to_drv_context( 565 udata, 566 struct mthca_ucontext, 567 ibucontext); 568 569 mthca_unmap_user_db(to_mdev(qp->device), 570 &context->uar, 571 context->db_tab, 572 to_mqp(qp)->sq.db_index); 573 mthca_unmap_user_db(to_mdev(qp->device), 574 &context->uar, 575 context->db_tab, 576 to_mqp(qp)->rq.db_index); 577 } 578 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 579 kfree(to_mqp(qp)->sqp); 580 return 0; 581 } 582 583 static int mthca_create_cq(struct ib_cq *ibcq, 584 const struct ib_cq_init_attr *attr, 585 struct ib_udata *udata) 586 { 587 struct ib_device *ibdev = ibcq->device; 588 int entries = attr->cqe; 589 struct mthca_create_cq ucmd; 590 struct mthca_cq *cq; 591 int nent; 592 int err; 593 struct mthca_ucontext *context = rdma_udata_to_drv_context( 594 udata, struct mthca_ucontext, ibucontext); 595 596 if (attr->flags) 597 return -EOPNOTSUPP; 598 599 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) 600 return -EINVAL; 601 602 if (udata) { 603 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 604 return -EFAULT; 605 606 err = mthca_map_user_db(to_mdev(ibdev), &context->uar, 607 context->db_tab, ucmd.set_db_index, 608 ucmd.set_db_page); 609 if (err) 610 return err; 611 612 err = mthca_map_user_db(to_mdev(ibdev), &context->uar, 613 context->db_tab, ucmd.arm_db_index, 614 ucmd.arm_db_page); 615 if (err) 616 goto err_unmap_set; 617 } 618 619 cq = to_mcq(ibcq); 620 621 if (udata) { 622 cq->buf.mr.ibmr.lkey = ucmd.lkey; 623 cq->set_ci_db_index = ucmd.set_db_index; 624 cq->arm_db_index = ucmd.arm_db_index; 625 } 626 627 for (nent = 1; nent <= entries; nent <<= 1) 628 ; /* nothing */ 629 630 err = mthca_init_cq(to_mdev(ibdev), nent, context, 631 udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, 632 cq); 633 if (err) 634 goto err_unmap_arm; 635 636 if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { 637 mthca_free_cq(to_mdev(ibdev), cq); 638 err = -EFAULT; 639 goto err_unmap_arm; 640 } 641 642 cq->resize_buf = NULL; 643 644 return 0; 645 646 err_unmap_arm: 647 if (udata) 648 mthca_unmap_user_db(to_mdev(ibdev), &context->uar, 649 context->db_tab, ucmd.arm_db_index); 650 651 err_unmap_set: 652 if (udata) 653 mthca_unmap_user_db(to_mdev(ibdev), &context->uar, 654 context->db_tab, ucmd.set_db_index); 655 656 return err; 657 } 658 659 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, 660 int entries) 661 { 662 int ret; 663 664 spin_lock_irq(&cq->lock); 665 if (cq->resize_buf) { 666 ret = -EBUSY; 667 goto unlock; 668 } 669 670 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 671 if (!cq->resize_buf) { 672 ret = -ENOMEM; 673 goto unlock; 674 } 675 676 cq->resize_buf->state = CQ_RESIZE_ALLOC; 677 678 ret = 0; 679 680 unlock: 681 spin_unlock_irq(&cq->lock); 682 683 if (ret) 684 return ret; 685 686 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 687 if (ret) { 688 spin_lock_irq(&cq->lock); 689 kfree(cq->resize_buf); 690 cq->resize_buf = NULL; 691 spin_unlock_irq(&cq->lock); 692 return ret; 693 } 694 695 cq->resize_buf->cqe = entries - 1; 696 697 spin_lock_irq(&cq->lock); 698 cq->resize_buf->state = CQ_RESIZE_READY; 699 spin_unlock_irq(&cq->lock); 700 701 return 0; 702 } 703 704 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 705 { 706 struct mthca_dev *dev = to_mdev(ibcq->device); 707 struct mthca_cq *cq = to_mcq(ibcq); 708 struct mthca_resize_cq ucmd; 709 u32 lkey; 710 int ret; 711 712 if (entries < 1 || entries > dev->limits.max_cqes) 713 return -EINVAL; 714 715 mutex_lock(&cq->mutex); 716 717 entries = roundup_pow_of_two(entries + 1); 718 if (entries == ibcq->cqe + 1) { 719 ret = 0; 720 goto out; 721 } 722 723 if (cq->is_kernel) { 724 ret = mthca_alloc_resize_buf(dev, cq, entries); 725 if (ret) 726 goto out; 727 lkey = cq->resize_buf->buf.mr.ibmr.lkey; 728 } else { 729 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 730 ret = -EFAULT; 731 goto out; 732 } 733 lkey = ucmd.lkey; 734 } 735 736 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); 737 738 if (ret) { 739 if (cq->resize_buf) { 740 mthca_free_cq_buf(dev, &cq->resize_buf->buf, 741 cq->resize_buf->cqe); 742 kfree(cq->resize_buf); 743 spin_lock_irq(&cq->lock); 744 cq->resize_buf = NULL; 745 spin_unlock_irq(&cq->lock); 746 } 747 goto out; 748 } 749 750 if (cq->is_kernel) { 751 struct mthca_cq_buf tbuf; 752 int tcqe; 753 754 spin_lock_irq(&cq->lock); 755 if (cq->resize_buf->state == CQ_RESIZE_READY) { 756 mthca_cq_resize_copy_cqes(cq); 757 tbuf = cq->buf; 758 tcqe = cq->ibcq.cqe; 759 cq->buf = cq->resize_buf->buf; 760 cq->ibcq.cqe = cq->resize_buf->cqe; 761 } else { 762 tbuf = cq->resize_buf->buf; 763 tcqe = cq->resize_buf->cqe; 764 } 765 766 kfree(cq->resize_buf); 767 cq->resize_buf = NULL; 768 spin_unlock_irq(&cq->lock); 769 770 mthca_free_cq_buf(dev, &tbuf, tcqe); 771 } else 772 ibcq->cqe = entries - 1; 773 774 out: 775 mutex_unlock(&cq->mutex); 776 777 return ret; 778 } 779 780 static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) 781 { 782 if (udata) { 783 struct mthca_ucontext *context = 784 rdma_udata_to_drv_context( 785 udata, 786 struct mthca_ucontext, 787 ibucontext); 788 789 mthca_unmap_user_db(to_mdev(cq->device), 790 &context->uar, 791 context->db_tab, 792 to_mcq(cq)->arm_db_index); 793 mthca_unmap_user_db(to_mdev(cq->device), 794 &context->uar, 795 context->db_tab, 796 to_mcq(cq)->set_ci_db_index); 797 } 798 mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); 799 return 0; 800 } 801 802 static inline u32 convert_access(int acc) 803 { 804 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | 805 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | 806 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | 807 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | 808 MTHCA_MPT_FLAG_LOCAL_READ; 809 } 810 811 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) 812 { 813 struct mthca_mr *mr; 814 int err; 815 816 mr = kmalloc(sizeof *mr, GFP_KERNEL); 817 if (!mr) 818 return ERR_PTR(-ENOMEM); 819 820 err = mthca_mr_alloc_notrans(to_mdev(pd->device), 821 to_mpd(pd)->pd_num, 822 convert_access(acc), mr); 823 824 if (err) { 825 kfree(mr); 826 return ERR_PTR(err); 827 } 828 829 mr->umem = NULL; 830 831 return &mr->ibmr; 832 } 833 834 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 835 u64 virt, int acc, struct ib_udata *udata) 836 { 837 struct mthca_dev *dev = to_mdev(pd->device); 838 struct ib_block_iter biter; 839 struct mthca_ucontext *context = rdma_udata_to_drv_context( 840 udata, struct mthca_ucontext, ibucontext); 841 struct mthca_mr *mr; 842 struct mthca_reg_mr ucmd; 843 u64 *pages; 844 int n, i; 845 int err = 0; 846 int write_mtt_size; 847 848 if (udata->inlen < sizeof ucmd) { 849 if (!context->reg_mr_warned) { 850 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", 851 current->comm); 852 mthca_warn(dev, " Update libmthca to fix this.\n"); 853 } 854 ++context->reg_mr_warned; 855 ucmd.mr_attrs = 0; 856 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 857 return ERR_PTR(-EFAULT); 858 859 mr = kmalloc(sizeof *mr, GFP_KERNEL); 860 if (!mr) 861 return ERR_PTR(-ENOMEM); 862 863 mr->umem = ib_umem_get(pd->device, start, length, acc); 864 if (IS_ERR(mr->umem)) { 865 err = PTR_ERR(mr->umem); 866 goto err; 867 } 868 869 n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE); 870 871 mr->mtt = mthca_alloc_mtt(dev, n); 872 if (IS_ERR(mr->mtt)) { 873 err = PTR_ERR(mr->mtt); 874 goto err_umem; 875 } 876 877 pages = (u64 *) __get_free_page(GFP_KERNEL); 878 if (!pages) { 879 err = -ENOMEM; 880 goto err_mtt; 881 } 882 883 i = n = 0; 884 885 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); 886 887 rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) { 888 pages[i++] = rdma_block_iter_dma_address(&biter); 889 890 /* 891 * Be friendly to write_mtt and pass it chunks 892 * of appropriate size. 893 */ 894 if (i == write_mtt_size) { 895 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 896 if (err) 897 goto mtt_done; 898 n += i; 899 i = 0; 900 } 901 } 902 903 if (i) 904 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 905 mtt_done: 906 free_page((unsigned long) pages); 907 if (err) 908 goto err_mtt; 909 910 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length, 911 convert_access(acc), mr); 912 913 if (err) 914 goto err_mtt; 915 916 return &mr->ibmr; 917 918 err_mtt: 919 mthca_free_mtt(dev, mr->mtt); 920 921 err_umem: 922 ib_umem_release(mr->umem); 923 924 err: 925 kfree(mr); 926 return ERR_PTR(err); 927 } 928 929 static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata) 930 { 931 struct mthca_mr *mmr = to_mmr(mr); 932 933 mthca_free_mr(to_mdev(mr->device), mmr); 934 ib_umem_release(mmr->umem); 935 kfree(mmr); 936 937 return 0; 938 } 939 940 static ssize_t hw_rev_show(struct device *device, 941 struct device_attribute *attr, char *buf) 942 { 943 struct mthca_dev *dev = 944 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 945 946 return sysfs_emit(buf, "%x\n", dev->rev_id); 947 } 948 static DEVICE_ATTR_RO(hw_rev); 949 950 static const char *hca_type_string(int hca_type) 951 { 952 switch (hca_type) { 953 case PCI_DEVICE_ID_MELLANOX_TAVOR: 954 return "MT23108"; 955 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: 956 return "MT25208 (MT23108 compat mode)"; 957 case PCI_DEVICE_ID_MELLANOX_ARBEL: 958 return "MT25208"; 959 case PCI_DEVICE_ID_MELLANOX_SINAI: 960 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: 961 return "MT25204"; 962 } 963 964 return "unknown"; 965 } 966 967 static ssize_t hca_type_show(struct device *device, 968 struct device_attribute *attr, char *buf) 969 { 970 struct mthca_dev *dev = 971 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 972 973 return sysfs_emit(buf, "%s\n", hca_type_string(dev->pdev->device)); 974 } 975 static DEVICE_ATTR_RO(hca_type); 976 977 static ssize_t board_id_show(struct device *device, 978 struct device_attribute *attr, char *buf) 979 { 980 struct mthca_dev *dev = 981 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 982 983 return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); 984 } 985 static DEVICE_ATTR_RO(board_id); 986 987 static struct attribute *mthca_dev_attributes[] = { 988 &dev_attr_hw_rev.attr, 989 &dev_attr_hca_type.attr, 990 &dev_attr_board_id.attr, 991 NULL 992 }; 993 994 static const struct attribute_group mthca_attr_group = { 995 .attrs = mthca_dev_attributes, 996 }; 997 998 static int mthca_init_node_data(struct mthca_dev *dev) 999 { 1000 struct ib_smp *in_mad = NULL; 1001 struct ib_smp *out_mad = NULL; 1002 int err = -ENOMEM; 1003 1004 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1005 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1006 if (!in_mad || !out_mad) 1007 goto out; 1008 1009 init_query_mad(in_mad); 1010 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1011 1012 err = mthca_MAD_IFC(dev, 1, 1, 1013 1, NULL, NULL, in_mad, out_mad); 1014 if (err) 1015 goto out; 1016 1017 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 1018 1019 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1020 1021 err = mthca_MAD_IFC(dev, 1, 1, 1022 1, NULL, NULL, in_mad, out_mad); 1023 if (err) 1024 goto out; 1025 1026 if (mthca_is_memfree(dev)) 1027 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 1028 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 1029 1030 out: 1031 kfree(in_mad); 1032 kfree(out_mad); 1033 return err; 1034 } 1035 1036 static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num, 1037 struct ib_port_immutable *immutable) 1038 { 1039 struct ib_port_attr attr; 1040 int err; 1041 1042 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 1043 1044 err = ib_query_port(ibdev, port_num, &attr); 1045 if (err) 1046 return err; 1047 1048 immutable->pkey_tbl_len = attr.pkey_tbl_len; 1049 immutable->gid_tbl_len = attr.gid_tbl_len; 1050 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 1051 1052 return 0; 1053 } 1054 1055 static void get_dev_fw_str(struct ib_device *device, char *str) 1056 { 1057 struct mthca_dev *dev = 1058 container_of(device, struct mthca_dev, ib_dev); 1059 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d", 1060 (int) (dev->fw_ver >> 32), 1061 (int) (dev->fw_ver >> 16) & 0xffff, 1062 (int) dev->fw_ver & 0xffff); 1063 } 1064 1065 static const struct ib_device_ops mthca_dev_ops = { 1066 .owner = THIS_MODULE, 1067 .driver_id = RDMA_DRIVER_MTHCA, 1068 .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION, 1069 .uverbs_no_driver_id_binding = 1, 1070 1071 .alloc_pd = mthca_alloc_pd, 1072 .alloc_ucontext = mthca_alloc_ucontext, 1073 .attach_mcast = mthca_multicast_attach, 1074 .create_ah = mthca_ah_create, 1075 .create_cq = mthca_create_cq, 1076 .create_qp = mthca_create_qp, 1077 .dealloc_pd = mthca_dealloc_pd, 1078 .dealloc_ucontext = mthca_dealloc_ucontext, 1079 .dereg_mr = mthca_dereg_mr, 1080 .destroy_ah = mthca_ah_destroy, 1081 .destroy_cq = mthca_destroy_cq, 1082 .destroy_qp = mthca_destroy_qp, 1083 .detach_mcast = mthca_multicast_detach, 1084 .device_group = &mthca_attr_group, 1085 .get_dev_fw_str = get_dev_fw_str, 1086 .get_dma_mr = mthca_get_dma_mr, 1087 .get_port_immutable = mthca_port_immutable, 1088 .mmap = mthca_mmap_uar, 1089 .modify_device = mthca_modify_device, 1090 .modify_port = mthca_modify_port, 1091 .modify_qp = mthca_modify_qp, 1092 .poll_cq = mthca_poll_cq, 1093 .process_mad = mthca_process_mad, 1094 .query_ah = mthca_ah_query, 1095 .query_device = mthca_query_device, 1096 .query_gid = mthca_query_gid, 1097 .query_pkey = mthca_query_pkey, 1098 .query_port = mthca_query_port, 1099 .query_qp = mthca_query_qp, 1100 .reg_user_mr = mthca_reg_user_mr, 1101 .resize_cq = mthca_resize_cq, 1102 1103 INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah), 1104 INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq), 1105 INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), 1106 INIT_RDMA_OBJ_SIZE(ib_qp, mthca_qp, ibqp), 1107 INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), 1108 }; 1109 1110 static const struct ib_device_ops mthca_dev_arbel_srq_ops = { 1111 .create_srq = mthca_create_srq, 1112 .destroy_srq = mthca_destroy_srq, 1113 .modify_srq = mthca_modify_srq, 1114 .post_srq_recv = mthca_arbel_post_srq_recv, 1115 .query_srq = mthca_query_srq, 1116 1117 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), 1118 }; 1119 1120 static const struct ib_device_ops mthca_dev_tavor_srq_ops = { 1121 .create_srq = mthca_create_srq, 1122 .destroy_srq = mthca_destroy_srq, 1123 .modify_srq = mthca_modify_srq, 1124 .post_srq_recv = mthca_tavor_post_srq_recv, 1125 .query_srq = mthca_query_srq, 1126 1127 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), 1128 }; 1129 1130 static const struct ib_device_ops mthca_dev_arbel_ops = { 1131 .post_recv = mthca_arbel_post_receive, 1132 .post_send = mthca_arbel_post_send, 1133 .req_notify_cq = mthca_arbel_arm_cq, 1134 }; 1135 1136 static const struct ib_device_ops mthca_dev_tavor_ops = { 1137 .post_recv = mthca_tavor_post_receive, 1138 .post_send = mthca_tavor_post_send, 1139 .req_notify_cq = mthca_tavor_arm_cq, 1140 }; 1141 1142 int mthca_register_device(struct mthca_dev *dev) 1143 { 1144 int ret; 1145 1146 ret = mthca_init_node_data(dev); 1147 if (ret) 1148 return ret; 1149 1150 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1151 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1152 dev->ib_dev.num_comp_vectors = 1; 1153 dev->ib_dev.dev.parent = &dev->pdev->dev; 1154 1155 if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1156 if (mthca_is_memfree(dev)) 1157 ib_set_device_ops(&dev->ib_dev, 1158 &mthca_dev_arbel_srq_ops); 1159 else 1160 ib_set_device_ops(&dev->ib_dev, 1161 &mthca_dev_tavor_srq_ops); 1162 } 1163 1164 ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops); 1165 1166 if (mthca_is_memfree(dev)) 1167 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops); 1168 else 1169 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops); 1170 1171 mutex_init(&dev->cap_mask_mutex); 1172 1173 ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev); 1174 if (ret) 1175 return ret; 1176 1177 mthca_start_catas_poll(dev); 1178 1179 return 0; 1180 } 1181 1182 void mthca_unregister_device(struct mthca_dev *dev) 1183 { 1184 mthca_stop_catas_poll(dev); 1185 ib_unregister_device(&dev->ib_dev); 1186 } 1187