1 /* 2 * Copyright(c) 2016 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/module.h> 49 #include <linux/kernel.h> 50 #include <linux/dma-mapping.h> 51 #include "vt.h" 52 #include "cq.h" 53 #include "trace.h" 54 55 #define RVT_UVERBS_ABI_VERSION 2 56 57 MODULE_LICENSE("Dual BSD/GPL"); 58 MODULE_DESCRIPTION("RDMA Verbs Transport Library"); 59 60 static int rvt_init(void) 61 { 62 int ret = rvt_driver_cq_init(); 63 64 if (ret) 65 pr_err("Error in driver CQ init.\n"); 66 67 return ret; 68 } 69 module_init(rvt_init); 70 71 static void rvt_cleanup(void) 72 { 73 rvt_cq_exit(); 74 } 75 module_exit(rvt_cleanup); 76 77 /** 78 * rvt_alloc_device - allocate rdi 79 * @size: how big of a structure to allocate 80 * @nports: number of ports to allocate array slots for 81 * 82 * Use IB core device alloc to allocate space for the rdi which is assumed to be 83 * inside of the ib_device. Any extra space that drivers require should be 84 * included in size. 85 * 86 * We also allocate a port array based on the number of ports. 87 * 88 * Return: pointer to allocated rdi 89 */ 90 struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) 91 { 92 struct rvt_dev_info *rdi; 93 94 rdi = container_of(_ib_alloc_device(size), struct rvt_dev_info, ibdev); 95 if (!rdi) 96 return rdi; 97 98 rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL); 99 if (!rdi->ports) 100 ib_dealloc_device(&rdi->ibdev); 101 102 return rdi; 103 } 104 EXPORT_SYMBOL(rvt_alloc_device); 105 106 /** 107 * rvt_dealloc_device - deallocate rdi 108 * @rdi: structure to free 109 * 110 * Free a structure allocated with rvt_alloc_device() 111 */ 112 void rvt_dealloc_device(struct rvt_dev_info *rdi) 113 { 114 kfree(rdi->ports); 115 ib_dealloc_device(&rdi->ibdev); 116 } 117 EXPORT_SYMBOL(rvt_dealloc_device); 118 119 static int rvt_query_device(struct ib_device *ibdev, 120 struct ib_device_attr *props, 121 struct ib_udata *uhw) 122 { 123 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 124 125 if (uhw->inlen || uhw->outlen) 126 return -EINVAL; 127 /* 128 * Return rvt_dev_info.dparms.props contents 129 */ 130 *props = rdi->dparms.props; 131 return 0; 132 } 133 134 static int rvt_modify_device(struct ib_device *device, 135 int device_modify_mask, 136 struct ib_device_modify *device_modify) 137 { 138 /* 139 * There is currently no need to supply this based on qib and hfi1. 140 * Future drivers may need to implement this though. 141 */ 142 143 return -EOPNOTSUPP; 144 } 145 146 /** 147 * rvt_query_port: Passes the query port call to the driver 148 * @ibdev: Verbs IB dev 149 * @port_num: port number, 1 based from ib core 150 * @props: structure to hold returned properties 151 * 152 * Return: 0 on success 153 */ 154 static int rvt_query_port(struct ib_device *ibdev, u32 port_num, 155 struct ib_port_attr *props) 156 { 157 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 158 struct rvt_ibport *rvp; 159 u32 port_index = ibport_num_to_idx(ibdev, port_num); 160 161 rvp = rdi->ports[port_index]; 162 /* props being zeroed by the caller, avoid zeroing it here */ 163 props->sm_lid = rvp->sm_lid; 164 props->sm_sl = rvp->sm_sl; 165 props->port_cap_flags = rvp->port_cap_flags; 166 props->max_msg_sz = 0x80000000; 167 props->pkey_tbl_len = rvt_get_npkeys(rdi); 168 props->bad_pkey_cntr = rvp->pkey_violations; 169 props->qkey_viol_cntr = rvp->qkey_violations; 170 props->subnet_timeout = rvp->subnet_timeout; 171 props->init_type_reply = 0; 172 173 /* Populate the remaining ib_port_attr elements */ 174 return rdi->driver_f.query_port_state(rdi, port_num, props); 175 } 176 177 /** 178 * rvt_modify_port 179 * @ibdev: Verbs IB dev 180 * @port_num: Port number, 1 based from ib core 181 * @port_modify_mask: How to change the port 182 * @props: Structure to fill in 183 * 184 * Return: 0 on success 185 */ 186 static int rvt_modify_port(struct ib_device *ibdev, u32 port_num, 187 int port_modify_mask, struct ib_port_modify *props) 188 { 189 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 190 struct rvt_ibport *rvp; 191 int ret = 0; 192 u32 port_index = ibport_num_to_idx(ibdev, port_num); 193 194 rvp = rdi->ports[port_index]; 195 if (port_modify_mask & IB_PORT_OPA_MASK_CHG) { 196 rvp->port_cap3_flags |= props->set_port_cap_mask; 197 rvp->port_cap3_flags &= ~props->clr_port_cap_mask; 198 } else { 199 rvp->port_cap_flags |= props->set_port_cap_mask; 200 rvp->port_cap_flags &= ~props->clr_port_cap_mask; 201 } 202 203 if (props->set_port_cap_mask || props->clr_port_cap_mask) 204 rdi->driver_f.cap_mask_chg(rdi, port_num); 205 if (port_modify_mask & IB_PORT_SHUTDOWN) 206 ret = rdi->driver_f.shut_down_port(rdi, port_num); 207 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 208 rvp->qkey_violations = 0; 209 210 return ret; 211 } 212 213 /** 214 * rvt_query_pkey - Return a pkey from the table at a given index 215 * @ibdev: Verbs IB dev 216 * @port_num: Port number, 1 based from ib core 217 * @index: Index into pkey table 218 * @pkey: returned pkey from the port pkey table 219 * 220 * Return: 0 on failure pkey otherwise 221 */ 222 static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, 223 u16 *pkey) 224 { 225 /* 226 * Driver will be responsible for keeping rvt_dev_info.pkey_table up to 227 * date. This function will just return that value. There is no need to 228 * lock, if a stale value is read and sent to the user so be it there is 229 * no way to protect against that anyway. 230 */ 231 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 232 u32 port_index; 233 234 port_index = ibport_num_to_idx(ibdev, port_num); 235 236 if (index >= rvt_get_npkeys(rdi)) 237 return -EINVAL; 238 239 *pkey = rvt_get_pkey(rdi, port_index, index); 240 return 0; 241 } 242 243 /** 244 * rvt_query_gid - Return a gid from the table 245 * @ibdev: Verbs IB dev 246 * @port_num: Port number, 1 based from ib core 247 * @guid_index: Index in table 248 * @gid: Gid to return 249 * 250 * Return: 0 on success 251 */ 252 static int rvt_query_gid(struct ib_device *ibdev, u32 port_num, 253 int guid_index, union ib_gid *gid) 254 { 255 struct rvt_dev_info *rdi; 256 struct rvt_ibport *rvp; 257 u32 port_index; 258 259 /* 260 * Driver is responsible for updating the guid table. Which will be used 261 * to craft the return value. This will work similar to how query_pkey() 262 * is being done. 263 */ 264 port_index = ibport_num_to_idx(ibdev, port_num); 265 266 rdi = ib_to_rvt(ibdev); 267 rvp = rdi->ports[port_index]; 268 269 gid->global.subnet_prefix = rvp->gid_prefix; 270 271 return rdi->driver_f.get_guid_be(rdi, rvp, guid_index, 272 &gid->global.interface_id); 273 } 274 275 /** 276 * rvt_alloc_ucontext - Allocate a user context 277 * @uctx: Verbs context 278 * @udata: User data allocated 279 */ 280 static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) 281 { 282 return 0; 283 } 284 285 /** 286 * rvt_dealloc_ucontext - Free a user context 287 * @context: Unused 288 */ 289 static void rvt_dealloc_ucontext(struct ib_ucontext *context) 290 { 291 return; 292 } 293 294 static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num, 295 struct ib_port_immutable *immutable) 296 { 297 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 298 struct ib_port_attr attr; 299 int err; 300 301 immutable->core_cap_flags = rdi->dparms.core_cap_flags; 302 303 err = ib_query_port(ibdev, port_num, &attr); 304 if (err) 305 return err; 306 307 immutable->pkey_tbl_len = attr.pkey_tbl_len; 308 immutable->gid_tbl_len = attr.gid_tbl_len; 309 immutable->max_mad_size = rdi->dparms.max_mad_size; 310 311 return 0; 312 } 313 314 enum { 315 MISC, 316 QUERY_DEVICE, 317 MODIFY_DEVICE, 318 QUERY_PORT, 319 MODIFY_PORT, 320 QUERY_PKEY, 321 QUERY_GID, 322 ALLOC_UCONTEXT, 323 DEALLOC_UCONTEXT, 324 GET_PORT_IMMUTABLE, 325 CREATE_QP, 326 MODIFY_QP, 327 DESTROY_QP, 328 QUERY_QP, 329 POST_SEND, 330 POST_RECV, 331 POST_SRQ_RECV, 332 CREATE_AH, 333 DESTROY_AH, 334 MODIFY_AH, 335 QUERY_AH, 336 CREATE_SRQ, 337 MODIFY_SRQ, 338 DESTROY_SRQ, 339 QUERY_SRQ, 340 ATTACH_MCAST, 341 DETACH_MCAST, 342 GET_DMA_MR, 343 REG_USER_MR, 344 DEREG_MR, 345 ALLOC_MR, 346 MAP_MR_SG, 347 ALLOC_FMR, 348 MAP_PHYS_FMR, 349 UNMAP_FMR, 350 DEALLOC_FMR, 351 MMAP, 352 CREATE_CQ, 353 DESTROY_CQ, 354 POLL_CQ, 355 REQ_NOTFIY_CQ, 356 RESIZE_CQ, 357 ALLOC_PD, 358 DEALLOC_PD, 359 _VERB_IDX_MAX /* Must always be last! */ 360 }; 361 362 static const struct ib_device_ops rvt_dev_ops = { 363 .uverbs_abi_ver = RVT_UVERBS_ABI_VERSION, 364 365 .alloc_mr = rvt_alloc_mr, 366 .alloc_pd = rvt_alloc_pd, 367 .alloc_ucontext = rvt_alloc_ucontext, 368 .attach_mcast = rvt_attach_mcast, 369 .create_ah = rvt_create_ah, 370 .create_cq = rvt_create_cq, 371 .create_qp = rvt_create_qp, 372 .create_srq = rvt_create_srq, 373 .create_user_ah = rvt_create_ah, 374 .dealloc_pd = rvt_dealloc_pd, 375 .dealloc_ucontext = rvt_dealloc_ucontext, 376 .dereg_mr = rvt_dereg_mr, 377 .destroy_ah = rvt_destroy_ah, 378 .destroy_cq = rvt_destroy_cq, 379 .destroy_qp = rvt_destroy_qp, 380 .destroy_srq = rvt_destroy_srq, 381 .detach_mcast = rvt_detach_mcast, 382 .get_dma_mr = rvt_get_dma_mr, 383 .get_port_immutable = rvt_get_port_immutable, 384 .map_mr_sg = rvt_map_mr_sg, 385 .mmap = rvt_mmap, 386 .modify_ah = rvt_modify_ah, 387 .modify_device = rvt_modify_device, 388 .modify_port = rvt_modify_port, 389 .modify_qp = rvt_modify_qp, 390 .modify_srq = rvt_modify_srq, 391 .poll_cq = rvt_poll_cq, 392 .post_recv = rvt_post_recv, 393 .post_send = rvt_post_send, 394 .post_srq_recv = rvt_post_srq_recv, 395 .query_ah = rvt_query_ah, 396 .query_device = rvt_query_device, 397 .query_gid = rvt_query_gid, 398 .query_pkey = rvt_query_pkey, 399 .query_port = rvt_query_port, 400 .query_qp = rvt_query_qp, 401 .query_srq = rvt_query_srq, 402 .reg_user_mr = rvt_reg_user_mr, 403 .req_notify_cq = rvt_req_notify_cq, 404 .resize_cq = rvt_resize_cq, 405 406 INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah), 407 INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq), 408 INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd), 409 INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq), 410 INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext), 411 }; 412 413 static noinline int check_support(struct rvt_dev_info *rdi, int verb) 414 { 415 switch (verb) { 416 case MISC: 417 /* 418 * These functions are not part of verbs specifically but are 419 * required for rdmavt to function. 420 */ 421 if ((!rdi->ibdev.ops.init_port) || 422 (!rdi->driver_f.get_pci_dev)) 423 return -EINVAL; 424 break; 425 426 case MODIFY_DEVICE: 427 /* 428 * rdmavt does not support modify device currently drivers must 429 * provide. 430 */ 431 if (!rdi->ibdev.ops.modify_device) 432 return -EOPNOTSUPP; 433 break; 434 435 case QUERY_PORT: 436 if (!rdi->ibdev.ops.query_port) 437 if (!rdi->driver_f.query_port_state) 438 return -EINVAL; 439 break; 440 441 case MODIFY_PORT: 442 if (!rdi->ibdev.ops.modify_port) 443 if (!rdi->driver_f.cap_mask_chg || 444 !rdi->driver_f.shut_down_port) 445 return -EINVAL; 446 break; 447 448 case QUERY_GID: 449 if (!rdi->ibdev.ops.query_gid) 450 if (!rdi->driver_f.get_guid_be) 451 return -EINVAL; 452 break; 453 454 case CREATE_QP: 455 if (!rdi->ibdev.ops.create_qp) 456 if (!rdi->driver_f.qp_priv_alloc || 457 !rdi->driver_f.qp_priv_free || 458 !rdi->driver_f.notify_qp_reset || 459 !rdi->driver_f.flush_qp_waiters || 460 !rdi->driver_f.stop_send_queue || 461 !rdi->driver_f.quiesce_qp) 462 return -EINVAL; 463 break; 464 465 case MODIFY_QP: 466 if (!rdi->ibdev.ops.modify_qp) 467 if (!rdi->driver_f.notify_qp_reset || 468 !rdi->driver_f.schedule_send || 469 !rdi->driver_f.get_pmtu_from_attr || 470 !rdi->driver_f.flush_qp_waiters || 471 !rdi->driver_f.stop_send_queue || 472 !rdi->driver_f.quiesce_qp || 473 !rdi->driver_f.notify_error_qp || 474 !rdi->driver_f.mtu_from_qp || 475 !rdi->driver_f.mtu_to_path_mtu) 476 return -EINVAL; 477 break; 478 479 case DESTROY_QP: 480 if (!rdi->ibdev.ops.destroy_qp) 481 if (!rdi->driver_f.qp_priv_free || 482 !rdi->driver_f.notify_qp_reset || 483 !rdi->driver_f.flush_qp_waiters || 484 !rdi->driver_f.stop_send_queue || 485 !rdi->driver_f.quiesce_qp) 486 return -EINVAL; 487 break; 488 489 case POST_SEND: 490 if (!rdi->ibdev.ops.post_send) 491 if (!rdi->driver_f.schedule_send || 492 !rdi->driver_f.do_send || 493 !rdi->post_parms) 494 return -EINVAL; 495 break; 496 497 } 498 499 return 0; 500 } 501 502 /** 503 * rvt_register_device - register a driver 504 * @rdi: main dev structure for all of rdmavt operations 505 * 506 * It is up to drivers to allocate the rdi and fill in the appropriate 507 * information. 508 * 509 * Return: 0 on success otherwise an errno. 510 */ 511 int rvt_register_device(struct rvt_dev_info *rdi) 512 { 513 int ret = 0, i; 514 515 if (!rdi) 516 return -EINVAL; 517 518 /* 519 * Check to ensure drivers have setup the required helpers for the verbs 520 * they want rdmavt to handle 521 */ 522 for (i = 0; i < _VERB_IDX_MAX; i++) 523 if (check_support(rdi, i)) { 524 pr_err("Driver support req not met at %d\n", i); 525 return -EINVAL; 526 } 527 528 ib_set_device_ops(&rdi->ibdev, &rvt_dev_ops); 529 530 /* Once we get past here we can use rvt_pr macros and tracepoints */ 531 trace_rvt_dbg(rdi, "Driver attempting registration"); 532 rvt_mmap_init(rdi); 533 534 /* Queue Pairs */ 535 ret = rvt_driver_qp_init(rdi); 536 if (ret) { 537 pr_err("Error in driver QP init.\n"); 538 return -EINVAL; 539 } 540 541 /* Address Handle */ 542 spin_lock_init(&rdi->n_ahs_lock); 543 rdi->n_ahs_allocated = 0; 544 545 /* Shared Receive Queue */ 546 rvt_driver_srq_init(rdi); 547 548 /* Multicast */ 549 rvt_driver_mcast_init(rdi); 550 551 /* Mem Region */ 552 ret = rvt_driver_mr_init(rdi); 553 if (ret) { 554 pr_err("Error in driver MR init.\n"); 555 goto bail_no_mr; 556 } 557 558 /* Memory Working Set Size */ 559 ret = rvt_wss_init(rdi); 560 if (ret) { 561 rvt_pr_err(rdi, "Error in WSS init.\n"); 562 goto bail_mr; 563 } 564 565 /* Completion queues */ 566 spin_lock_init(&rdi->n_cqs_lock); 567 568 /* Protection Domain */ 569 spin_lock_init(&rdi->n_pds_lock); 570 rdi->n_pds_allocated = 0; 571 572 /* 573 * There are some things which could be set by underlying drivers but 574 * really should be up to rdmavt to set. For instance drivers can't know 575 * exactly which functions rdmavt supports, nor do they know the ABI 576 * version, so we do all of this sort of stuff here. 577 */ 578 rdi->ibdev.uverbs_cmd_mask |= 579 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 580 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 581 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 582 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 583 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 584 rdi->ibdev.node_type = RDMA_NODE_IB_CA; 585 if (!rdi->ibdev.num_comp_vectors) 586 rdi->ibdev.num_comp_vectors = 1; 587 588 /* We are now good to announce we exist */ 589 ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL); 590 if (ret) { 591 rvt_pr_err(rdi, "Failed to register driver with ib core.\n"); 592 goto bail_wss; 593 } 594 595 rvt_create_mad_agents(rdi); 596 597 rvt_pr_info(rdi, "Registration with rdmavt done.\n"); 598 return ret; 599 600 bail_wss: 601 rvt_wss_exit(rdi); 602 bail_mr: 603 rvt_mr_exit(rdi); 604 605 bail_no_mr: 606 rvt_qp_exit(rdi); 607 608 return ret; 609 } 610 EXPORT_SYMBOL(rvt_register_device); 611 612 /** 613 * rvt_unregister_device - remove a driver 614 * @rdi: rvt dev struct 615 */ 616 void rvt_unregister_device(struct rvt_dev_info *rdi) 617 { 618 trace_rvt_dbg(rdi, "Driver is unregistering."); 619 if (!rdi) 620 return; 621 622 rvt_free_mad_agents(rdi); 623 624 ib_unregister_device(&rdi->ibdev); 625 rvt_wss_exit(rdi); 626 rvt_mr_exit(rdi); 627 rvt_qp_exit(rdi); 628 } 629 EXPORT_SYMBOL(rvt_unregister_device); 630 631 /** 632 * rvt_init_port - init internal data for driver port 633 * @rdi: rvt_dev_info struct 634 * @port: rvt port 635 * @port_index: 0 based index of ports, different from IB core port num 636 * @pkey_table: pkey_table for @port 637 * 638 * Keep track of a list of ports. No need to have a detach port. 639 * They persist until the driver goes away. 640 * 641 * Return: always 0 642 */ 643 int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, 644 int port_index, u16 *pkey_table) 645 { 646 647 rdi->ports[port_index] = port; 648 rdi->ports[port_index]->pkey_table = pkey_table; 649 650 return 0; 651 } 652 EXPORT_SYMBOL(rvt_init_port); 653