1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <rdma/ib_smi.h> 38 #include <rdma/ib_umem.h> 39 #include <rdma/ib_user_verbs.h> 40 #include <rdma/uverbs_ioctl.h> 41 42 #include <linux/sched.h> 43 #include <linux/slab.h> 44 #include <linux/stat.h> 45 #include <linux/mm.h> 46 #include <linux/export.h> 47 48 #include "mthca_dev.h" 49 #include "mthca_cmd.h" 50 #include <rdma/mthca-abi.h> 51 #include "mthca_memfree.h" 52 53 static void init_query_mad(struct ib_smp *mad) 54 { 55 mad->base_version = 1; 56 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 57 mad->class_version = 1; 58 mad->method = IB_MGMT_METHOD_GET; 59 } 60 61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 62 struct ib_udata *uhw) 63 { 64 struct ib_smp *in_mad = NULL; 65 struct ib_smp *out_mad = NULL; 66 int err = -ENOMEM; 67 struct mthca_dev *mdev = to_mdev(ibdev); 68 69 if (uhw->inlen || uhw->outlen) 70 return -EINVAL; 71 72 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 73 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 74 if (!in_mad || !out_mad) 75 goto out; 76 77 memset(props, 0, sizeof *props); 78 79 props->fw_ver = mdev->fw_ver; 80 81 init_query_mad(in_mad); 82 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 83 84 err = mthca_MAD_IFC(mdev, 1, 1, 85 1, NULL, NULL, in_mad, out_mad); 86 if (err) 87 goto out; 88 89 props->device_cap_flags = mdev->device_cap_flags; 90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 91 0xffffff; 92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 95 96 props->max_mr_size = ~0ull; 97 props->page_size_cap = mdev->limits.page_size_cap; 98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; 99 props->max_qp_wr = mdev->limits.max_wqes; 100 props->max_send_sge = mdev->limits.max_sg; 101 props->max_recv_sge = mdev->limits.max_sg; 102 props->max_sge_rd = mdev->limits.max_sg; 103 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; 104 props->max_cqe = mdev->limits.max_cqes; 105 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; 106 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; 107 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; 108 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; 109 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 110 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; 111 props->max_srq_wr = mdev->limits.max_srq_wqes; 112 props->max_srq_sge = mdev->limits.max_srq_sge; 113 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; 114 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? 115 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 116 props->max_pkeys = mdev->limits.pkey_table_len; 117 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; 118 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; 119 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 120 props->max_mcast_grp; 121 /* 122 * If Sinai memory key optimization is being used, then only 123 * the 8-bit key portion will change. For other HCAs, the 124 * unused index bits will also be used for FMR remapping. 125 */ 126 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 127 props->max_map_per_fmr = 255; 128 else 129 props->max_map_per_fmr = 130 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1; 131 132 err = 0; 133 out: 134 kfree(in_mad); 135 kfree(out_mad); 136 return err; 137 } 138 139 static int mthca_query_port(struct ib_device *ibdev, 140 u8 port, struct ib_port_attr *props) 141 { 142 struct ib_smp *in_mad = NULL; 143 struct ib_smp *out_mad = NULL; 144 int err = -ENOMEM; 145 146 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 147 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 148 if (!in_mad || !out_mad) 149 goto out; 150 151 /* props being zeroed by the caller, avoid zeroing it here */ 152 153 init_query_mad(in_mad); 154 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 155 in_mad->attr_mod = cpu_to_be32(port); 156 157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 158 port, NULL, NULL, in_mad, out_mad); 159 if (err) 160 goto out; 161 162 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 163 props->lmc = out_mad->data[34] & 0x7; 164 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 165 props->sm_sl = out_mad->data[36] & 0xf; 166 props->state = out_mad->data[32] & 0xf; 167 props->phys_state = out_mad->data[33] >> 4; 168 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 169 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 170 props->max_msg_sz = 0x80000000; 171 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 172 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 173 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 174 props->active_width = out_mad->data[31] & 0xf; 175 props->active_speed = out_mad->data[35] >> 4; 176 props->max_mtu = out_mad->data[41] & 0xf; 177 props->active_mtu = out_mad->data[36] >> 4; 178 props->subnet_timeout = out_mad->data[51] & 0x1f; 179 props->max_vl_num = out_mad->data[37] >> 4; 180 props->init_type_reply = out_mad->data[41] >> 4; 181 182 out: 183 kfree(in_mad); 184 kfree(out_mad); 185 return err; 186 } 187 188 static int mthca_modify_device(struct ib_device *ibdev, 189 int mask, 190 struct ib_device_modify *props) 191 { 192 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 193 return -EOPNOTSUPP; 194 195 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 196 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 197 return -ERESTARTSYS; 198 memcpy(ibdev->node_desc, props->node_desc, 199 IB_DEVICE_NODE_DESC_MAX); 200 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 201 } 202 203 return 0; 204 } 205 206 static int mthca_modify_port(struct ib_device *ibdev, 207 u8 port, int port_modify_mask, 208 struct ib_port_modify *props) 209 { 210 struct mthca_set_ib_param set_ib; 211 struct ib_port_attr attr; 212 int err; 213 214 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 215 return -ERESTARTSYS; 216 217 err = ib_query_port(ibdev, port, &attr); 218 if (err) 219 goto out; 220 221 set_ib.set_si_guid = 0; 222 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); 223 224 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 225 ~props->clr_port_cap_mask; 226 227 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); 228 if (err) 229 goto out; 230 out: 231 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 232 return err; 233 } 234 235 static int mthca_query_pkey(struct ib_device *ibdev, 236 u8 port, u16 index, u16 *pkey) 237 { 238 struct ib_smp *in_mad = NULL; 239 struct ib_smp *out_mad = NULL; 240 int err = -ENOMEM; 241 242 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 243 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 244 if (!in_mad || !out_mad) 245 goto out; 246 247 init_query_mad(in_mad); 248 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 249 in_mad->attr_mod = cpu_to_be32(index / 32); 250 251 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 252 port, NULL, NULL, in_mad, out_mad); 253 if (err) 254 goto out; 255 256 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 257 258 out: 259 kfree(in_mad); 260 kfree(out_mad); 261 return err; 262 } 263 264 static int mthca_query_gid(struct ib_device *ibdev, u8 port, 265 int index, union ib_gid *gid) 266 { 267 struct ib_smp *in_mad = NULL; 268 struct ib_smp *out_mad = NULL; 269 int err = -ENOMEM; 270 271 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 272 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 273 if (!in_mad || !out_mad) 274 goto out; 275 276 init_query_mad(in_mad); 277 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 278 in_mad->attr_mod = cpu_to_be32(port); 279 280 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 281 port, NULL, NULL, in_mad, out_mad); 282 if (err) 283 goto out; 284 285 memcpy(gid->raw, out_mad->data + 8, 8); 286 287 init_query_mad(in_mad); 288 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 289 in_mad->attr_mod = cpu_to_be32(index / 8); 290 291 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 292 port, NULL, NULL, in_mad, out_mad); 293 if (err) 294 goto out; 295 296 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 297 298 out: 299 kfree(in_mad); 300 kfree(out_mad); 301 return err; 302 } 303 304 static int mthca_alloc_ucontext(struct ib_ucontext *uctx, 305 struct ib_udata *udata) 306 { 307 struct ib_device *ibdev = uctx->device; 308 struct mthca_alloc_ucontext_resp uresp = {}; 309 struct mthca_ucontext *context = to_mucontext(uctx); 310 int err; 311 312 if (!(to_mdev(ibdev)->active)) 313 return -EAGAIN; 314 315 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 316 if (mthca_is_memfree(to_mdev(ibdev))) 317 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; 318 else 319 uresp.uarc_size = 0; 320 321 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); 322 if (err) 323 return err; 324 325 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); 326 if (IS_ERR(context->db_tab)) { 327 err = PTR_ERR(context->db_tab); 328 mthca_uar_free(to_mdev(ibdev), &context->uar); 329 return err; 330 } 331 332 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { 333 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); 334 mthca_uar_free(to_mdev(ibdev), &context->uar); 335 return -EFAULT; 336 } 337 338 context->reg_mr_warned = 0; 339 340 return 0; 341 } 342 343 static void mthca_dealloc_ucontext(struct ib_ucontext *context) 344 { 345 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, 346 to_mucontext(context)->db_tab); 347 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); 348 } 349 350 static int mthca_mmap_uar(struct ib_ucontext *context, 351 struct vm_area_struct *vma) 352 { 353 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 354 return -EINVAL; 355 356 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 357 358 if (io_remap_pfn_range(vma, vma->vm_start, 359 to_mucontext(context)->uar.pfn, 360 PAGE_SIZE, vma->vm_page_prot)) 361 return -EAGAIN; 362 363 return 0; 364 } 365 366 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 367 { 368 struct ib_device *ibdev = ibpd->device; 369 struct mthca_pd *pd = to_mpd(ibpd); 370 int err; 371 372 err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd); 373 if (err) 374 return err; 375 376 if (udata) { 377 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { 378 mthca_pd_free(to_mdev(ibdev), pd); 379 return -EFAULT; 380 } 381 } 382 383 return 0; 384 } 385 386 static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 387 { 388 mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); 389 } 390 391 static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr, 392 u32 flags, struct ib_udata *udata) 393 394 { 395 struct mthca_ah *ah = to_mah(ibah); 396 397 return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr, 398 ah); 399 } 400 401 static void mthca_ah_destroy(struct ib_ah *ah, u32 flags) 402 { 403 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); 404 } 405 406 static int mthca_create_srq(struct ib_srq *ibsrq, 407 struct ib_srq_init_attr *init_attr, 408 struct ib_udata *udata) 409 { 410 struct mthca_create_srq ucmd; 411 struct mthca_ucontext *context = rdma_udata_to_drv_context( 412 udata, struct mthca_ucontext, ibucontext); 413 struct mthca_srq *srq = to_msrq(ibsrq); 414 int err; 415 416 if (init_attr->srq_type != IB_SRQT_BASIC) 417 return -EOPNOTSUPP; 418 419 if (udata) { 420 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 421 return -EFAULT; 422 423 err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar, 424 context->db_tab, ucmd.db_index, 425 ucmd.db_page); 426 427 if (err) 428 return err; 429 430 srq->mr.ibmr.lkey = ucmd.lkey; 431 srq->db_index = ucmd.db_index; 432 } 433 434 err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd), 435 &init_attr->attr, srq, udata); 436 437 if (err && udata) 438 mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar, 439 context->db_tab, ucmd.db_index); 440 441 if (err) 442 return err; 443 444 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 445 mthca_free_srq(to_mdev(ibsrq->device), srq); 446 return -EFAULT; 447 } 448 449 return 0; 450 } 451 452 static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) 453 { 454 if (udata) { 455 struct mthca_ucontext *context = 456 rdma_udata_to_drv_context( 457 udata, 458 struct mthca_ucontext, 459 ibucontext); 460 461 mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 462 context->db_tab, to_msrq(srq)->db_index); 463 } 464 465 mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 466 } 467 468 static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 469 struct ib_qp_init_attr *init_attr, 470 struct ib_udata *udata) 471 { 472 struct mthca_ucontext *context = rdma_udata_to_drv_context( 473 udata, struct mthca_ucontext, ibucontext); 474 struct mthca_create_qp ucmd; 475 struct mthca_qp *qp; 476 int err; 477 478 if (init_attr->create_flags) 479 return ERR_PTR(-EINVAL); 480 481 switch (init_attr->qp_type) { 482 case IB_QPT_RC: 483 case IB_QPT_UC: 484 case IB_QPT_UD: 485 { 486 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 487 if (!qp) 488 return ERR_PTR(-ENOMEM); 489 490 if (udata) { 491 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 492 kfree(qp); 493 return ERR_PTR(-EFAULT); 494 } 495 496 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 497 context->db_tab, 498 ucmd.sq_db_index, ucmd.sq_db_page); 499 if (err) { 500 kfree(qp); 501 return ERR_PTR(err); 502 } 503 504 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 505 context->db_tab, 506 ucmd.rq_db_index, ucmd.rq_db_page); 507 if (err) { 508 mthca_unmap_user_db(to_mdev(pd->device), 509 &context->uar, 510 context->db_tab, 511 ucmd.sq_db_index); 512 kfree(qp); 513 return ERR_PTR(err); 514 } 515 516 qp->mr.ibmr.lkey = ucmd.lkey; 517 qp->sq.db_index = ucmd.sq_db_index; 518 qp->rq.db_index = ucmd.rq_db_index; 519 } 520 521 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), 522 to_mcq(init_attr->send_cq), 523 to_mcq(init_attr->recv_cq), 524 init_attr->qp_type, init_attr->sq_sig_type, 525 &init_attr->cap, qp, udata); 526 527 if (err && udata) { 528 mthca_unmap_user_db(to_mdev(pd->device), 529 &context->uar, 530 context->db_tab, 531 ucmd.sq_db_index); 532 mthca_unmap_user_db(to_mdev(pd->device), 533 &context->uar, 534 context->db_tab, 535 ucmd.rq_db_index); 536 } 537 538 qp->ibqp.qp_num = qp->qpn; 539 break; 540 } 541 case IB_QPT_SMI: 542 case IB_QPT_GSI: 543 { 544 /* Don't allow userspace to create special QPs */ 545 if (udata) 546 return ERR_PTR(-EINVAL); 547 548 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); 549 if (!qp) 550 return ERR_PTR(-ENOMEM); 551 552 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 553 554 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), 555 to_mcq(init_attr->send_cq), 556 to_mcq(init_attr->recv_cq), 557 init_attr->sq_sig_type, &init_attr->cap, 558 qp->ibqp.qp_num, init_attr->port_num, 559 to_msqp(qp), udata); 560 break; 561 } 562 default: 563 /* Don't support raw QPs */ 564 return ERR_PTR(-EOPNOTSUPP); 565 } 566 567 if (err) { 568 kfree(qp); 569 return ERR_PTR(err); 570 } 571 572 init_attr->cap.max_send_wr = qp->sq.max; 573 init_attr->cap.max_recv_wr = qp->rq.max; 574 init_attr->cap.max_send_sge = qp->sq.max_gs; 575 init_attr->cap.max_recv_sge = qp->rq.max_gs; 576 init_attr->cap.max_inline_data = qp->max_inline_data; 577 578 return &qp->ibqp; 579 } 580 581 static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) 582 { 583 if (udata) { 584 struct mthca_ucontext *context = 585 rdma_udata_to_drv_context( 586 udata, 587 struct mthca_ucontext, 588 ibucontext); 589 590 mthca_unmap_user_db(to_mdev(qp->device), 591 &context->uar, 592 context->db_tab, 593 to_mqp(qp)->sq.db_index); 594 mthca_unmap_user_db(to_mdev(qp->device), 595 &context->uar, 596 context->db_tab, 597 to_mqp(qp)->rq.db_index); 598 } 599 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 600 kfree(qp); 601 return 0; 602 } 603 604 static int mthca_create_cq(struct ib_cq *ibcq, 605 const struct ib_cq_init_attr *attr, 606 struct ib_udata *udata) 607 { 608 struct ib_device *ibdev = ibcq->device; 609 int entries = attr->cqe; 610 struct mthca_create_cq ucmd; 611 struct mthca_cq *cq; 612 int nent; 613 int err; 614 struct mthca_ucontext *context = rdma_udata_to_drv_context( 615 udata, struct mthca_ucontext, ibucontext); 616 617 if (attr->flags) 618 return -EINVAL; 619 620 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) 621 return -EINVAL; 622 623 if (udata) { 624 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 625 return -EFAULT; 626 627 err = mthca_map_user_db(to_mdev(ibdev), &context->uar, 628 context->db_tab, ucmd.set_db_index, 629 ucmd.set_db_page); 630 if (err) 631 return err; 632 633 err = mthca_map_user_db(to_mdev(ibdev), &context->uar, 634 context->db_tab, ucmd.arm_db_index, 635 ucmd.arm_db_page); 636 if (err) 637 goto err_unmap_set; 638 } 639 640 cq = to_mcq(ibcq); 641 642 if (udata) { 643 cq->buf.mr.ibmr.lkey = ucmd.lkey; 644 cq->set_ci_db_index = ucmd.set_db_index; 645 cq->arm_db_index = ucmd.arm_db_index; 646 } 647 648 for (nent = 1; nent <= entries; nent <<= 1) 649 ; /* nothing */ 650 651 err = mthca_init_cq(to_mdev(ibdev), nent, context, 652 udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, 653 cq); 654 if (err) 655 goto err_unmap_arm; 656 657 if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { 658 mthca_free_cq(to_mdev(ibdev), cq); 659 err = -EFAULT; 660 goto err_unmap_arm; 661 } 662 663 cq->resize_buf = NULL; 664 665 return 0; 666 667 err_unmap_arm: 668 if (udata) 669 mthca_unmap_user_db(to_mdev(ibdev), &context->uar, 670 context->db_tab, ucmd.arm_db_index); 671 672 err_unmap_set: 673 if (udata) 674 mthca_unmap_user_db(to_mdev(ibdev), &context->uar, 675 context->db_tab, ucmd.set_db_index); 676 677 return err; 678 } 679 680 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, 681 int entries) 682 { 683 int ret; 684 685 spin_lock_irq(&cq->lock); 686 if (cq->resize_buf) { 687 ret = -EBUSY; 688 goto unlock; 689 } 690 691 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 692 if (!cq->resize_buf) { 693 ret = -ENOMEM; 694 goto unlock; 695 } 696 697 cq->resize_buf->state = CQ_RESIZE_ALLOC; 698 699 ret = 0; 700 701 unlock: 702 spin_unlock_irq(&cq->lock); 703 704 if (ret) 705 return ret; 706 707 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 708 if (ret) { 709 spin_lock_irq(&cq->lock); 710 kfree(cq->resize_buf); 711 cq->resize_buf = NULL; 712 spin_unlock_irq(&cq->lock); 713 return ret; 714 } 715 716 cq->resize_buf->cqe = entries - 1; 717 718 spin_lock_irq(&cq->lock); 719 cq->resize_buf->state = CQ_RESIZE_READY; 720 spin_unlock_irq(&cq->lock); 721 722 return 0; 723 } 724 725 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 726 { 727 struct mthca_dev *dev = to_mdev(ibcq->device); 728 struct mthca_cq *cq = to_mcq(ibcq); 729 struct mthca_resize_cq ucmd; 730 u32 lkey; 731 int ret; 732 733 if (entries < 1 || entries > dev->limits.max_cqes) 734 return -EINVAL; 735 736 mutex_lock(&cq->mutex); 737 738 entries = roundup_pow_of_two(entries + 1); 739 if (entries == ibcq->cqe + 1) { 740 ret = 0; 741 goto out; 742 } 743 744 if (cq->is_kernel) { 745 ret = mthca_alloc_resize_buf(dev, cq, entries); 746 if (ret) 747 goto out; 748 lkey = cq->resize_buf->buf.mr.ibmr.lkey; 749 } else { 750 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 751 ret = -EFAULT; 752 goto out; 753 } 754 lkey = ucmd.lkey; 755 } 756 757 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); 758 759 if (ret) { 760 if (cq->resize_buf) { 761 mthca_free_cq_buf(dev, &cq->resize_buf->buf, 762 cq->resize_buf->cqe); 763 kfree(cq->resize_buf); 764 spin_lock_irq(&cq->lock); 765 cq->resize_buf = NULL; 766 spin_unlock_irq(&cq->lock); 767 } 768 goto out; 769 } 770 771 if (cq->is_kernel) { 772 struct mthca_cq_buf tbuf; 773 int tcqe; 774 775 spin_lock_irq(&cq->lock); 776 if (cq->resize_buf->state == CQ_RESIZE_READY) { 777 mthca_cq_resize_copy_cqes(cq); 778 tbuf = cq->buf; 779 tcqe = cq->ibcq.cqe; 780 cq->buf = cq->resize_buf->buf; 781 cq->ibcq.cqe = cq->resize_buf->cqe; 782 } else { 783 tbuf = cq->resize_buf->buf; 784 tcqe = cq->resize_buf->cqe; 785 } 786 787 kfree(cq->resize_buf); 788 cq->resize_buf = NULL; 789 spin_unlock_irq(&cq->lock); 790 791 mthca_free_cq_buf(dev, &tbuf, tcqe); 792 } else 793 ibcq->cqe = entries - 1; 794 795 out: 796 mutex_unlock(&cq->mutex); 797 798 return ret; 799 } 800 801 static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) 802 { 803 if (udata) { 804 struct mthca_ucontext *context = 805 rdma_udata_to_drv_context( 806 udata, 807 struct mthca_ucontext, 808 ibucontext); 809 810 mthca_unmap_user_db(to_mdev(cq->device), 811 &context->uar, 812 context->db_tab, 813 to_mcq(cq)->arm_db_index); 814 mthca_unmap_user_db(to_mdev(cq->device), 815 &context->uar, 816 context->db_tab, 817 to_mcq(cq)->set_ci_db_index); 818 } 819 mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); 820 } 821 822 static inline u32 convert_access(int acc) 823 { 824 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | 825 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | 826 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | 827 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | 828 MTHCA_MPT_FLAG_LOCAL_READ; 829 } 830 831 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) 832 { 833 struct mthca_mr *mr; 834 int err; 835 836 mr = kmalloc(sizeof *mr, GFP_KERNEL); 837 if (!mr) 838 return ERR_PTR(-ENOMEM); 839 840 err = mthca_mr_alloc_notrans(to_mdev(pd->device), 841 to_mpd(pd)->pd_num, 842 convert_access(acc), mr); 843 844 if (err) { 845 kfree(mr); 846 return ERR_PTR(err); 847 } 848 849 mr->umem = NULL; 850 851 return &mr->ibmr; 852 } 853 854 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 855 u64 virt, int acc, struct ib_udata *udata) 856 { 857 struct mthca_dev *dev = to_mdev(pd->device); 858 struct sg_dma_page_iter sg_iter; 859 struct mthca_ucontext *context = rdma_udata_to_drv_context( 860 udata, struct mthca_ucontext, ibucontext); 861 struct mthca_mr *mr; 862 struct mthca_reg_mr ucmd; 863 u64 *pages; 864 int n, i; 865 int err = 0; 866 int write_mtt_size; 867 868 if (udata->inlen < sizeof ucmd) { 869 if (!context->reg_mr_warned) { 870 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", 871 current->comm); 872 mthca_warn(dev, " Update libmthca to fix this.\n"); 873 } 874 ++context->reg_mr_warned; 875 ucmd.mr_attrs = 0; 876 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 877 return ERR_PTR(-EFAULT); 878 879 mr = kmalloc(sizeof *mr, GFP_KERNEL); 880 if (!mr) 881 return ERR_PTR(-ENOMEM); 882 883 mr->umem = ib_umem_get(pd->device, start, length, acc); 884 if (IS_ERR(mr->umem)) { 885 err = PTR_ERR(mr->umem); 886 goto err; 887 } 888 889 n = ib_umem_num_pages(mr->umem); 890 891 mr->mtt = mthca_alloc_mtt(dev, n); 892 if (IS_ERR(mr->mtt)) { 893 err = PTR_ERR(mr->mtt); 894 goto err_umem; 895 } 896 897 pages = (u64 *) __get_free_page(GFP_KERNEL); 898 if (!pages) { 899 err = -ENOMEM; 900 goto err_mtt; 901 } 902 903 i = n = 0; 904 905 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); 906 907 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) { 908 pages[i++] = sg_page_iter_dma_address(&sg_iter); 909 910 /* 911 * Be friendly to write_mtt and pass it chunks 912 * of appropriate size. 913 */ 914 if (i == write_mtt_size) { 915 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 916 if (err) 917 goto mtt_done; 918 n += i; 919 i = 0; 920 } 921 } 922 923 if (i) 924 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 925 mtt_done: 926 free_page((unsigned long) pages); 927 if (err) 928 goto err_mtt; 929 930 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length, 931 convert_access(acc), mr); 932 933 if (err) 934 goto err_mtt; 935 936 return &mr->ibmr; 937 938 err_mtt: 939 mthca_free_mtt(dev, mr->mtt); 940 941 err_umem: 942 ib_umem_release(mr->umem); 943 944 err: 945 kfree(mr); 946 return ERR_PTR(err); 947 } 948 949 static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata) 950 { 951 struct mthca_mr *mmr = to_mmr(mr); 952 953 mthca_free_mr(to_mdev(mr->device), mmr); 954 ib_umem_release(mmr->umem); 955 kfree(mmr); 956 957 return 0; 958 } 959 960 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 961 struct ib_fmr_attr *fmr_attr) 962 { 963 struct mthca_fmr *fmr; 964 int err; 965 966 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); 967 if (!fmr) 968 return ERR_PTR(-ENOMEM); 969 970 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); 971 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, 972 convert_access(mr_access_flags), fmr); 973 974 if (err) { 975 kfree(fmr); 976 return ERR_PTR(err); 977 } 978 979 return &fmr->ibmr; 980 } 981 982 static int mthca_dealloc_fmr(struct ib_fmr *fmr) 983 { 984 struct mthca_fmr *mfmr = to_mfmr(fmr); 985 int err; 986 987 err = mthca_free_fmr(to_mdev(fmr->device), mfmr); 988 if (err) 989 return err; 990 991 kfree(mfmr); 992 return 0; 993 } 994 995 static int mthca_unmap_fmr(struct list_head *fmr_list) 996 { 997 struct ib_fmr *fmr; 998 int err; 999 struct mthca_dev *mdev = NULL; 1000 1001 list_for_each_entry(fmr, fmr_list, list) { 1002 if (mdev && to_mdev(fmr->device) != mdev) 1003 return -EINVAL; 1004 mdev = to_mdev(fmr->device); 1005 } 1006 1007 if (!mdev) 1008 return 0; 1009 1010 if (mthca_is_memfree(mdev)) { 1011 list_for_each_entry(fmr, fmr_list, list) 1012 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); 1013 1014 wmb(); 1015 } else 1016 list_for_each_entry(fmr, fmr_list, list) 1017 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); 1018 1019 err = mthca_SYNC_TPT(mdev); 1020 return err; 1021 } 1022 1023 static ssize_t hw_rev_show(struct device *device, 1024 struct device_attribute *attr, char *buf) 1025 { 1026 struct mthca_dev *dev = 1027 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 1028 1029 return sprintf(buf, "%x\n", dev->rev_id); 1030 } 1031 static DEVICE_ATTR_RO(hw_rev); 1032 1033 static ssize_t hca_type_show(struct device *device, 1034 struct device_attribute *attr, char *buf) 1035 { 1036 struct mthca_dev *dev = 1037 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 1038 1039 switch (dev->pdev->device) { 1040 case PCI_DEVICE_ID_MELLANOX_TAVOR: 1041 return sprintf(buf, "MT23108\n"); 1042 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: 1043 return sprintf(buf, "MT25208 (MT23108 compat mode)\n"); 1044 case PCI_DEVICE_ID_MELLANOX_ARBEL: 1045 return sprintf(buf, "MT25208\n"); 1046 case PCI_DEVICE_ID_MELLANOX_SINAI: 1047 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: 1048 return sprintf(buf, "MT25204\n"); 1049 default: 1050 return sprintf(buf, "unknown\n"); 1051 } 1052 } 1053 static DEVICE_ATTR_RO(hca_type); 1054 1055 static ssize_t board_id_show(struct device *device, 1056 struct device_attribute *attr, char *buf) 1057 { 1058 struct mthca_dev *dev = 1059 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); 1060 1061 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); 1062 } 1063 static DEVICE_ATTR_RO(board_id); 1064 1065 static struct attribute *mthca_dev_attributes[] = { 1066 &dev_attr_hw_rev.attr, 1067 &dev_attr_hca_type.attr, 1068 &dev_attr_board_id.attr, 1069 NULL 1070 }; 1071 1072 static const struct attribute_group mthca_attr_group = { 1073 .attrs = mthca_dev_attributes, 1074 }; 1075 1076 static int mthca_init_node_data(struct mthca_dev *dev) 1077 { 1078 struct ib_smp *in_mad = NULL; 1079 struct ib_smp *out_mad = NULL; 1080 int err = -ENOMEM; 1081 1082 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1083 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1084 if (!in_mad || !out_mad) 1085 goto out; 1086 1087 init_query_mad(in_mad); 1088 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1089 1090 err = mthca_MAD_IFC(dev, 1, 1, 1091 1, NULL, NULL, in_mad, out_mad); 1092 if (err) 1093 goto out; 1094 1095 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); 1096 1097 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1098 1099 err = mthca_MAD_IFC(dev, 1, 1, 1100 1, NULL, NULL, in_mad, out_mad); 1101 if (err) 1102 goto out; 1103 1104 if (mthca_is_memfree(dev)) 1105 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 1106 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 1107 1108 out: 1109 kfree(in_mad); 1110 kfree(out_mad); 1111 return err; 1112 } 1113 1114 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, 1115 struct ib_port_immutable *immutable) 1116 { 1117 struct ib_port_attr attr; 1118 int err; 1119 1120 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; 1121 1122 err = ib_query_port(ibdev, port_num, &attr); 1123 if (err) 1124 return err; 1125 1126 immutable->pkey_tbl_len = attr.pkey_tbl_len; 1127 immutable->gid_tbl_len = attr.gid_tbl_len; 1128 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 1129 1130 return 0; 1131 } 1132 1133 static void get_dev_fw_str(struct ib_device *device, char *str) 1134 { 1135 struct mthca_dev *dev = 1136 container_of(device, struct mthca_dev, ib_dev); 1137 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d", 1138 (int) (dev->fw_ver >> 32), 1139 (int) (dev->fw_ver >> 16) & 0xffff, 1140 (int) dev->fw_ver & 0xffff); 1141 } 1142 1143 static const struct ib_device_ops mthca_dev_ops = { 1144 .owner = THIS_MODULE, 1145 .driver_id = RDMA_DRIVER_MTHCA, 1146 .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION, 1147 .uverbs_no_driver_id_binding = 1, 1148 1149 .alloc_pd = mthca_alloc_pd, 1150 .alloc_ucontext = mthca_alloc_ucontext, 1151 .attach_mcast = mthca_multicast_attach, 1152 .create_ah = mthca_ah_create, 1153 .create_cq = mthca_create_cq, 1154 .create_qp = mthca_create_qp, 1155 .dealloc_pd = mthca_dealloc_pd, 1156 .dealloc_ucontext = mthca_dealloc_ucontext, 1157 .dereg_mr = mthca_dereg_mr, 1158 .destroy_ah = mthca_ah_destroy, 1159 .destroy_cq = mthca_destroy_cq, 1160 .destroy_qp = mthca_destroy_qp, 1161 .detach_mcast = mthca_multicast_detach, 1162 .get_dev_fw_str = get_dev_fw_str, 1163 .get_dma_mr = mthca_get_dma_mr, 1164 .get_port_immutable = mthca_port_immutable, 1165 .mmap = mthca_mmap_uar, 1166 .modify_device = mthca_modify_device, 1167 .modify_port = mthca_modify_port, 1168 .modify_qp = mthca_modify_qp, 1169 .poll_cq = mthca_poll_cq, 1170 .process_mad = mthca_process_mad, 1171 .query_ah = mthca_ah_query, 1172 .query_device = mthca_query_device, 1173 .query_gid = mthca_query_gid, 1174 .query_pkey = mthca_query_pkey, 1175 .query_port = mthca_query_port, 1176 .query_qp = mthca_query_qp, 1177 .reg_user_mr = mthca_reg_user_mr, 1178 .resize_cq = mthca_resize_cq, 1179 1180 INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah), 1181 INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq), 1182 INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd), 1183 INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext), 1184 }; 1185 1186 static const struct ib_device_ops mthca_dev_arbel_srq_ops = { 1187 .create_srq = mthca_create_srq, 1188 .destroy_srq = mthca_destroy_srq, 1189 .modify_srq = mthca_modify_srq, 1190 .post_srq_recv = mthca_arbel_post_srq_recv, 1191 .query_srq = mthca_query_srq, 1192 1193 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), 1194 }; 1195 1196 static const struct ib_device_ops mthca_dev_tavor_srq_ops = { 1197 .create_srq = mthca_create_srq, 1198 .destroy_srq = mthca_destroy_srq, 1199 .modify_srq = mthca_modify_srq, 1200 .post_srq_recv = mthca_tavor_post_srq_recv, 1201 .query_srq = mthca_query_srq, 1202 1203 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq), 1204 }; 1205 1206 static const struct ib_device_ops mthca_dev_arbel_fmr_ops = { 1207 .alloc_fmr = mthca_alloc_fmr, 1208 .dealloc_fmr = mthca_dealloc_fmr, 1209 .map_phys_fmr = mthca_arbel_map_phys_fmr, 1210 .unmap_fmr = mthca_unmap_fmr, 1211 }; 1212 1213 static const struct ib_device_ops mthca_dev_tavor_fmr_ops = { 1214 .alloc_fmr = mthca_alloc_fmr, 1215 .dealloc_fmr = mthca_dealloc_fmr, 1216 .map_phys_fmr = mthca_tavor_map_phys_fmr, 1217 .unmap_fmr = mthca_unmap_fmr, 1218 }; 1219 1220 static const struct ib_device_ops mthca_dev_arbel_ops = { 1221 .post_recv = mthca_arbel_post_receive, 1222 .post_send = mthca_arbel_post_send, 1223 .req_notify_cq = mthca_arbel_arm_cq, 1224 }; 1225 1226 static const struct ib_device_ops mthca_dev_tavor_ops = { 1227 .post_recv = mthca_tavor_post_receive, 1228 .post_send = mthca_tavor_post_send, 1229 .req_notify_cq = mthca_tavor_arm_cq, 1230 }; 1231 1232 int mthca_register_device(struct mthca_dev *dev) 1233 { 1234 int ret; 1235 1236 ret = mthca_init_node_data(dev); 1237 if (ret) 1238 return ret; 1239 1240 dev->ib_dev.uverbs_cmd_mask = 1241 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1242 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1243 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1244 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1245 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1246 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1247 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1248 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1249 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1250 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 1251 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1252 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1253 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 1254 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1255 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1256 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1257 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); 1258 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1259 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1260 dev->ib_dev.num_comp_vectors = 1; 1261 dev->ib_dev.dev.parent = &dev->pdev->dev; 1262 1263 if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1264 dev->ib_dev.uverbs_cmd_mask |= 1265 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 1266 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 1267 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1268 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); 1269 1270 if (mthca_is_memfree(dev)) 1271 ib_set_device_ops(&dev->ib_dev, 1272 &mthca_dev_arbel_srq_ops); 1273 else 1274 ib_set_device_ops(&dev->ib_dev, 1275 &mthca_dev_tavor_srq_ops); 1276 } 1277 1278 if (dev->mthca_flags & MTHCA_FLAG_FMR) { 1279 if (mthca_is_memfree(dev)) 1280 ib_set_device_ops(&dev->ib_dev, 1281 &mthca_dev_arbel_fmr_ops); 1282 else 1283 ib_set_device_ops(&dev->ib_dev, 1284 &mthca_dev_tavor_fmr_ops); 1285 } 1286 1287 ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops); 1288 1289 if (mthca_is_memfree(dev)) 1290 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops); 1291 else 1292 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops); 1293 1294 mutex_init(&dev->cap_mask_mutex); 1295 1296 rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group); 1297 ret = ib_register_device(&dev->ib_dev, "mthca%d"); 1298 if (ret) 1299 return ret; 1300 1301 mthca_start_catas_poll(dev); 1302 1303 return 0; 1304 } 1305 1306 void mthca_unregister_device(struct mthca_dev *dev) 1307 { 1308 mthca_stop_catas_poll(dev); 1309 ib_unregister_device(&dev->ib_dev); 1310 } 1311