1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/string.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/mutex.h> 41 #include <linux/netdevice.h> 42 #include <linux/security.h> 43 #include <linux/notifier.h> 44 #include <rdma/rdma_netlink.h> 45 #include <rdma/ib_addr.h> 46 #include <rdma/ib_cache.h> 47 48 #include "core_priv.h" 49 50 MODULE_AUTHOR("Roland Dreier"); 51 MODULE_DESCRIPTION("core kernel InfiniBand API"); 52 MODULE_LICENSE("Dual BSD/GPL"); 53 54 struct ib_client_data { 55 struct list_head list; 56 struct ib_client *client; 57 void * data; 58 /* The device or client is going down. Do not call client or device 59 * callbacks other than remove(). */ 60 bool going_down; 61 }; 62 63 struct workqueue_struct *ib_comp_wq; 64 struct workqueue_struct *ib_wq; 65 EXPORT_SYMBOL_GPL(ib_wq); 66 67 /* The device_list and client_list contain devices and clients after their 68 * registration has completed, and the devices and clients are removed 69 * during unregistration. */ 70 static LIST_HEAD(device_list); 71 static LIST_HEAD(client_list); 72 73 /* 74 * device_mutex and lists_rwsem protect access to both device_list and 75 * client_list. device_mutex protects writer access by device and client 76 * registration / de-registration. lists_rwsem protects reader access to 77 * these lists. Iterators of these lists must lock it for read, while updates 78 * to the lists must be done with a write lock. A special case is when the 79 * device_mutex is locked. In this case locking the lists for read access is 80 * not necessary as the device_mutex implies it. 81 * 82 * lists_rwsem also protects access to the client data list. 83 */ 84 static DEFINE_MUTEX(device_mutex); 85 static DECLARE_RWSEM(lists_rwsem); 86 87 static int ib_security_change(struct notifier_block *nb, unsigned long event, 88 void *lsm_data); 89 static void ib_policy_change_task(struct work_struct *work); 90 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); 91 92 static struct notifier_block ibdev_lsm_nb = { 93 .notifier_call = ib_security_change, 94 }; 95 96 static int ib_device_check_mandatory(struct ib_device *device) 97 { 98 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 99 static const struct { 100 size_t offset; 101 char *name; 102 } mandatory_table[] = { 103 IB_MANDATORY_FUNC(query_device), 104 IB_MANDATORY_FUNC(query_port), 105 IB_MANDATORY_FUNC(query_pkey), 106 IB_MANDATORY_FUNC(query_gid), 107 IB_MANDATORY_FUNC(alloc_pd), 108 IB_MANDATORY_FUNC(dealloc_pd), 109 IB_MANDATORY_FUNC(create_ah), 110 IB_MANDATORY_FUNC(destroy_ah), 111 IB_MANDATORY_FUNC(create_qp), 112 IB_MANDATORY_FUNC(modify_qp), 113 IB_MANDATORY_FUNC(destroy_qp), 114 IB_MANDATORY_FUNC(post_send), 115 IB_MANDATORY_FUNC(post_recv), 116 IB_MANDATORY_FUNC(create_cq), 117 IB_MANDATORY_FUNC(destroy_cq), 118 IB_MANDATORY_FUNC(poll_cq), 119 IB_MANDATORY_FUNC(req_notify_cq), 120 IB_MANDATORY_FUNC(get_dma_mr), 121 IB_MANDATORY_FUNC(dereg_mr), 122 IB_MANDATORY_FUNC(get_port_immutable) 123 }; 124 int i; 125 126 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 127 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { 128 pr_warn("Device %s is missing mandatory function %s\n", 129 device->name, mandatory_table[i].name); 130 return -EINVAL; 131 } 132 } 133 134 return 0; 135 } 136 137 struct ib_device *__ib_device_get_by_index(u32 index) 138 { 139 struct ib_device *device; 140 141 list_for_each_entry(device, &device_list, core_list) 142 if (device->index == index) 143 return device; 144 145 return NULL; 146 } 147 148 static struct ib_device *__ib_device_get_by_name(const char *name) 149 { 150 struct ib_device *device; 151 152 list_for_each_entry(device, &device_list, core_list) 153 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) 154 return device; 155 156 return NULL; 157 } 158 159 static int alloc_name(char *name) 160 { 161 unsigned long *inuse; 162 char buf[IB_DEVICE_NAME_MAX]; 163 struct ib_device *device; 164 int i; 165 166 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); 167 if (!inuse) 168 return -ENOMEM; 169 170 list_for_each_entry(device, &device_list, core_list) { 171 if (!sscanf(device->name, name, &i)) 172 continue; 173 if (i < 0 || i >= PAGE_SIZE * 8) 174 continue; 175 snprintf(buf, sizeof buf, name, i); 176 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) 177 set_bit(i, inuse); 178 } 179 180 i = find_first_zero_bit(inuse, PAGE_SIZE * 8); 181 free_page((unsigned long) inuse); 182 snprintf(buf, sizeof buf, name, i); 183 184 if (__ib_device_get_by_name(buf)) 185 return -ENFILE; 186 187 strlcpy(name, buf, IB_DEVICE_NAME_MAX); 188 return 0; 189 } 190 191 static void ib_device_release(struct device *device) 192 { 193 struct ib_device *dev = container_of(device, struct ib_device, dev); 194 195 WARN_ON(dev->reg_state == IB_DEV_REGISTERED); 196 if (dev->reg_state == IB_DEV_UNREGISTERED) { 197 /* 198 * In IB_DEV_UNINITIALIZED state, cache or port table 199 * is not even created. Free cache and port table only when 200 * device reaches UNREGISTERED state. 201 */ 202 ib_cache_release_one(dev); 203 kfree(dev->port_immutable); 204 } 205 kfree(dev); 206 } 207 208 static int ib_device_uevent(struct device *device, 209 struct kobj_uevent_env *env) 210 { 211 struct ib_device *dev = container_of(device, struct ib_device, dev); 212 213 if (add_uevent_var(env, "NAME=%s", dev->name)) 214 return -ENOMEM; 215 216 /* 217 * It would be nice to pass the node GUID with the event... 218 */ 219 220 return 0; 221 } 222 223 static struct class ib_class = { 224 .name = "infiniband", 225 .dev_release = ib_device_release, 226 .dev_uevent = ib_device_uevent, 227 }; 228 229 /** 230 * ib_alloc_device - allocate an IB device struct 231 * @size:size of structure to allocate 232 * 233 * Low-level drivers should use ib_alloc_device() to allocate &struct 234 * ib_device. @size is the size of the structure to be allocated, 235 * including any private data used by the low-level driver. 236 * ib_dealloc_device() must be used to free structures allocated with 237 * ib_alloc_device(). 238 */ 239 struct ib_device *ib_alloc_device(size_t size) 240 { 241 struct ib_device *device; 242 243 if (WARN_ON(size < sizeof(struct ib_device))) 244 return NULL; 245 246 device = kzalloc(size, GFP_KERNEL); 247 if (!device) 248 return NULL; 249 250 device->dev.class = &ib_class; 251 device_initialize(&device->dev); 252 253 dev_set_drvdata(&device->dev, device); 254 255 INIT_LIST_HEAD(&device->event_handler_list); 256 spin_lock_init(&device->event_handler_lock); 257 spin_lock_init(&device->client_data_lock); 258 INIT_LIST_HEAD(&device->client_data_list); 259 INIT_LIST_HEAD(&device->port_list); 260 261 return device; 262 } 263 EXPORT_SYMBOL(ib_alloc_device); 264 265 /** 266 * ib_dealloc_device - free an IB device struct 267 * @device:structure to free 268 * 269 * Free a structure allocated with ib_alloc_device(). 270 */ 271 void ib_dealloc_device(struct ib_device *device) 272 { 273 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && 274 device->reg_state != IB_DEV_UNINITIALIZED); 275 kobject_put(&device->dev.kobj); 276 } 277 EXPORT_SYMBOL(ib_dealloc_device); 278 279 static int add_client_context(struct ib_device *device, struct ib_client *client) 280 { 281 struct ib_client_data *context; 282 unsigned long flags; 283 284 context = kmalloc(sizeof *context, GFP_KERNEL); 285 if (!context) 286 return -ENOMEM; 287 288 context->client = client; 289 context->data = NULL; 290 context->going_down = false; 291 292 down_write(&lists_rwsem); 293 spin_lock_irqsave(&device->client_data_lock, flags); 294 list_add(&context->list, &device->client_data_list); 295 spin_unlock_irqrestore(&device->client_data_lock, flags); 296 up_write(&lists_rwsem); 297 298 return 0; 299 } 300 301 static int verify_immutable(const struct ib_device *dev, u8 port) 302 { 303 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 304 rdma_max_mad_size(dev, port) != 0); 305 } 306 307 static int read_port_immutable(struct ib_device *device) 308 { 309 int ret; 310 u8 start_port = rdma_start_port(device); 311 u8 end_port = rdma_end_port(device); 312 u8 port; 313 314 /** 315 * device->port_immutable is indexed directly by the port number to make 316 * access to this data as efficient as possible. 317 * 318 * Therefore port_immutable is declared as a 1 based array with 319 * potential empty slots at the beginning. 320 */ 321 device->port_immutable = kzalloc(sizeof(*device->port_immutable) 322 * (end_port + 1), 323 GFP_KERNEL); 324 if (!device->port_immutable) 325 return -ENOMEM; 326 327 for (port = start_port; port <= end_port; ++port) { 328 ret = device->get_port_immutable(device, port, 329 &device->port_immutable[port]); 330 if (ret) 331 return ret; 332 333 if (verify_immutable(device, port)) 334 return -EINVAL; 335 } 336 return 0; 337 } 338 339 void ib_get_device_fw_str(struct ib_device *dev, char *str) 340 { 341 if (dev->get_dev_fw_str) 342 dev->get_dev_fw_str(dev, str); 343 else 344 str[0] = '\0'; 345 } 346 EXPORT_SYMBOL(ib_get_device_fw_str); 347 348 static int setup_port_pkey_list(struct ib_device *device) 349 { 350 int i; 351 352 /** 353 * device->port_pkey_list is indexed directly by the port number, 354 * Therefore it is declared as a 1 based array with potential empty 355 * slots at the beginning. 356 */ 357 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, 358 sizeof(*device->port_pkey_list), 359 GFP_KERNEL); 360 361 if (!device->port_pkey_list) 362 return -ENOMEM; 363 364 for (i = 0; i < (rdma_end_port(device) + 1); i++) { 365 spin_lock_init(&device->port_pkey_list[i].list_lock); 366 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); 367 } 368 369 return 0; 370 } 371 372 static void ib_policy_change_task(struct work_struct *work) 373 { 374 struct ib_device *dev; 375 376 down_read(&lists_rwsem); 377 list_for_each_entry(dev, &device_list, core_list) { 378 int i; 379 380 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { 381 u64 sp; 382 int ret = ib_get_cached_subnet_prefix(dev, 383 i, 384 &sp); 385 386 WARN_ONCE(ret, 387 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", 388 ret); 389 if (!ret) 390 ib_security_cache_change(dev, i, sp); 391 } 392 } 393 up_read(&lists_rwsem); 394 } 395 396 static int ib_security_change(struct notifier_block *nb, unsigned long event, 397 void *lsm_data) 398 { 399 if (event != LSM_POLICY_CHANGE) 400 return NOTIFY_DONE; 401 402 schedule_work(&ib_policy_change_work); 403 404 return NOTIFY_OK; 405 } 406 407 /** 408 * __dev_new_index - allocate an device index 409 * 410 * Returns a suitable unique value for a new device interface 411 * number. It assumes that there are less than 2^32-1 ib devices 412 * will be present in the system. 413 */ 414 static u32 __dev_new_index(void) 415 { 416 /* 417 * The device index to allow stable naming. 418 * Similar to struct net -> ifindex. 419 */ 420 static u32 index; 421 422 for (;;) { 423 if (!(++index)) 424 index = 1; 425 426 if (!__ib_device_get_by_index(index)) 427 return index; 428 } 429 } 430 431 /** 432 * ib_register_device - Register an IB device with IB core 433 * @device:Device to register 434 * 435 * Low-level drivers use ib_register_device() to register their 436 * devices with the IB core. All registered clients will receive a 437 * callback for each device that is added. @device must be allocated 438 * with ib_alloc_device(). 439 */ 440 int ib_register_device(struct ib_device *device, 441 int (*port_callback)(struct ib_device *, 442 u8, struct kobject *)) 443 { 444 int ret; 445 struct ib_client *client; 446 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 447 struct device *parent = device->dev.parent; 448 449 WARN_ON_ONCE(!parent); 450 WARN_ON_ONCE(device->dma_device); 451 if (device->dev.dma_ops) { 452 /* 453 * The caller provided custom DMA operations. Copy the 454 * DMA-related fields that are used by e.g. dma_alloc_coherent() 455 * into device->dev. 456 */ 457 device->dma_device = &device->dev; 458 if (!device->dev.dma_mask) 459 device->dev.dma_mask = parent->dma_mask; 460 if (!device->dev.coherent_dma_mask) 461 device->dev.coherent_dma_mask = 462 parent->coherent_dma_mask; 463 } else { 464 /* 465 * The caller did not provide custom DMA operations. Use the 466 * DMA mapping operations of the parent device. 467 */ 468 device->dma_device = parent; 469 } 470 471 mutex_lock(&device_mutex); 472 473 if (strchr(device->name, '%')) { 474 ret = alloc_name(device->name); 475 if (ret) 476 goto out; 477 } 478 479 if (ib_device_check_mandatory(device)) { 480 ret = -EINVAL; 481 goto out; 482 } 483 484 ret = read_port_immutable(device); 485 if (ret) { 486 pr_warn("Couldn't create per port immutable data %s\n", 487 device->name); 488 goto out; 489 } 490 491 ret = setup_port_pkey_list(device); 492 if (ret) { 493 pr_warn("Couldn't create per port_pkey_list\n"); 494 goto out; 495 } 496 497 ret = ib_cache_setup_one(device); 498 if (ret) { 499 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); 500 goto port_cleanup; 501 } 502 503 ret = ib_device_register_rdmacg(device); 504 if (ret) { 505 pr_warn("Couldn't register device with rdma cgroup\n"); 506 goto cache_cleanup; 507 } 508 509 memset(&device->attrs, 0, sizeof(device->attrs)); 510 ret = device->query_device(device, &device->attrs, &uhw); 511 if (ret) { 512 pr_warn("Couldn't query the device attributes\n"); 513 goto cache_cleanup; 514 } 515 516 ret = ib_device_register_sysfs(device, port_callback); 517 if (ret) { 518 pr_warn("Couldn't register device %s with driver model\n", 519 device->name); 520 goto cache_cleanup; 521 } 522 523 device->reg_state = IB_DEV_REGISTERED; 524 525 list_for_each_entry(client, &client_list, list) 526 if (!add_client_context(device, client) && client->add) 527 client->add(device); 528 529 device->index = __dev_new_index(); 530 down_write(&lists_rwsem); 531 list_add_tail(&device->core_list, &device_list); 532 up_write(&lists_rwsem); 533 mutex_unlock(&device_mutex); 534 return 0; 535 536 cache_cleanup: 537 ib_cache_cleanup_one(device); 538 ib_cache_release_one(device); 539 port_cleanup: 540 kfree(device->port_immutable); 541 out: 542 mutex_unlock(&device_mutex); 543 return ret; 544 } 545 EXPORT_SYMBOL(ib_register_device); 546 547 /** 548 * ib_unregister_device - Unregister an IB device 549 * @device:Device to unregister 550 * 551 * Unregister an IB device. All clients will receive a remove callback. 552 */ 553 void ib_unregister_device(struct ib_device *device) 554 { 555 struct ib_client_data *context, *tmp; 556 unsigned long flags; 557 558 mutex_lock(&device_mutex); 559 560 down_write(&lists_rwsem); 561 list_del(&device->core_list); 562 spin_lock_irqsave(&device->client_data_lock, flags); 563 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 564 context->going_down = true; 565 spin_unlock_irqrestore(&device->client_data_lock, flags); 566 downgrade_write(&lists_rwsem); 567 568 list_for_each_entry_safe(context, tmp, &device->client_data_list, 569 list) { 570 if (context->client->remove) 571 context->client->remove(device, context->data); 572 } 573 up_read(&lists_rwsem); 574 575 ib_device_unregister_rdmacg(device); 576 ib_device_unregister_sysfs(device); 577 578 mutex_unlock(&device_mutex); 579 580 ib_cache_cleanup_one(device); 581 582 ib_security_destroy_port_pkey_list(device); 583 kfree(device->port_pkey_list); 584 585 down_write(&lists_rwsem); 586 spin_lock_irqsave(&device->client_data_lock, flags); 587 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 588 kfree(context); 589 spin_unlock_irqrestore(&device->client_data_lock, flags); 590 up_write(&lists_rwsem); 591 592 device->reg_state = IB_DEV_UNREGISTERED; 593 } 594 EXPORT_SYMBOL(ib_unregister_device); 595 596 /** 597 * ib_register_client - Register an IB client 598 * @client:Client to register 599 * 600 * Upper level users of the IB drivers can use ib_register_client() to 601 * register callbacks for IB device addition and removal. When an IB 602 * device is added, each registered client's add method will be called 603 * (in the order the clients were registered), and when a device is 604 * removed, each client's remove method will be called (in the reverse 605 * order that clients were registered). In addition, when 606 * ib_register_client() is called, the client will receive an add 607 * callback for all devices already registered. 608 */ 609 int ib_register_client(struct ib_client *client) 610 { 611 struct ib_device *device; 612 613 mutex_lock(&device_mutex); 614 615 list_for_each_entry(device, &device_list, core_list) 616 if (!add_client_context(device, client) && client->add) 617 client->add(device); 618 619 down_write(&lists_rwsem); 620 list_add_tail(&client->list, &client_list); 621 up_write(&lists_rwsem); 622 623 mutex_unlock(&device_mutex); 624 625 return 0; 626 } 627 EXPORT_SYMBOL(ib_register_client); 628 629 /** 630 * ib_unregister_client - Unregister an IB client 631 * @client:Client to unregister 632 * 633 * Upper level users use ib_unregister_client() to remove their client 634 * registration. When ib_unregister_client() is called, the client 635 * will receive a remove callback for each IB device still registered. 636 */ 637 void ib_unregister_client(struct ib_client *client) 638 { 639 struct ib_client_data *context, *tmp; 640 struct ib_device *device; 641 unsigned long flags; 642 643 mutex_lock(&device_mutex); 644 645 down_write(&lists_rwsem); 646 list_del(&client->list); 647 up_write(&lists_rwsem); 648 649 list_for_each_entry(device, &device_list, core_list) { 650 struct ib_client_data *found_context = NULL; 651 652 down_write(&lists_rwsem); 653 spin_lock_irqsave(&device->client_data_lock, flags); 654 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 655 if (context->client == client) { 656 context->going_down = true; 657 found_context = context; 658 break; 659 } 660 spin_unlock_irqrestore(&device->client_data_lock, flags); 661 up_write(&lists_rwsem); 662 663 if (client->remove) 664 client->remove(device, found_context ? 665 found_context->data : NULL); 666 667 if (!found_context) { 668 pr_warn("No client context found for %s/%s\n", 669 device->name, client->name); 670 continue; 671 } 672 673 down_write(&lists_rwsem); 674 spin_lock_irqsave(&device->client_data_lock, flags); 675 list_del(&found_context->list); 676 kfree(found_context); 677 spin_unlock_irqrestore(&device->client_data_lock, flags); 678 up_write(&lists_rwsem); 679 } 680 681 mutex_unlock(&device_mutex); 682 } 683 EXPORT_SYMBOL(ib_unregister_client); 684 685 /** 686 * ib_get_client_data - Get IB client context 687 * @device:Device to get context for 688 * @client:Client to get context for 689 * 690 * ib_get_client_data() returns client context set with 691 * ib_set_client_data(). 692 */ 693 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) 694 { 695 struct ib_client_data *context; 696 void *ret = NULL; 697 unsigned long flags; 698 699 spin_lock_irqsave(&device->client_data_lock, flags); 700 list_for_each_entry(context, &device->client_data_list, list) 701 if (context->client == client) { 702 ret = context->data; 703 break; 704 } 705 spin_unlock_irqrestore(&device->client_data_lock, flags); 706 707 return ret; 708 } 709 EXPORT_SYMBOL(ib_get_client_data); 710 711 /** 712 * ib_set_client_data - Set IB client context 713 * @device:Device to set context for 714 * @client:Client to set context for 715 * @data:Context to set 716 * 717 * ib_set_client_data() sets client context that can be retrieved with 718 * ib_get_client_data(). 719 */ 720 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 721 void *data) 722 { 723 struct ib_client_data *context; 724 unsigned long flags; 725 726 spin_lock_irqsave(&device->client_data_lock, flags); 727 list_for_each_entry(context, &device->client_data_list, list) 728 if (context->client == client) { 729 context->data = data; 730 goto out; 731 } 732 733 pr_warn("No client context found for %s/%s\n", 734 device->name, client->name); 735 736 out: 737 spin_unlock_irqrestore(&device->client_data_lock, flags); 738 } 739 EXPORT_SYMBOL(ib_set_client_data); 740 741 /** 742 * ib_register_event_handler - Register an IB event handler 743 * @event_handler:Handler to register 744 * 745 * ib_register_event_handler() registers an event handler that will be 746 * called back when asynchronous IB events occur (as defined in 747 * chapter 11 of the InfiniBand Architecture Specification). This 748 * callback may occur in interrupt context. 749 */ 750 void ib_register_event_handler(struct ib_event_handler *event_handler) 751 { 752 unsigned long flags; 753 754 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 755 list_add_tail(&event_handler->list, 756 &event_handler->device->event_handler_list); 757 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 758 } 759 EXPORT_SYMBOL(ib_register_event_handler); 760 761 /** 762 * ib_unregister_event_handler - Unregister an event handler 763 * @event_handler:Handler to unregister 764 * 765 * Unregister an event handler registered with 766 * ib_register_event_handler(). 767 */ 768 void ib_unregister_event_handler(struct ib_event_handler *event_handler) 769 { 770 unsigned long flags; 771 772 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 773 list_del(&event_handler->list); 774 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 775 } 776 EXPORT_SYMBOL(ib_unregister_event_handler); 777 778 /** 779 * ib_dispatch_event - Dispatch an asynchronous event 780 * @event:Event to dispatch 781 * 782 * Low-level drivers must call ib_dispatch_event() to dispatch the 783 * event to all registered event handlers when an asynchronous event 784 * occurs. 785 */ 786 void ib_dispatch_event(struct ib_event *event) 787 { 788 unsigned long flags; 789 struct ib_event_handler *handler; 790 791 spin_lock_irqsave(&event->device->event_handler_lock, flags); 792 793 list_for_each_entry(handler, &event->device->event_handler_list, list) 794 handler->handler(handler, event); 795 796 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 797 } 798 EXPORT_SYMBOL(ib_dispatch_event); 799 800 /** 801 * ib_query_port - Query IB port attributes 802 * @device:Device to query 803 * @port_num:Port number to query 804 * @port_attr:Port attributes 805 * 806 * ib_query_port() returns the attributes of a port through the 807 * @port_attr pointer. 808 */ 809 int ib_query_port(struct ib_device *device, 810 u8 port_num, 811 struct ib_port_attr *port_attr) 812 { 813 union ib_gid gid; 814 int err; 815 816 if (!rdma_is_port_valid(device, port_num)) 817 return -EINVAL; 818 819 memset(port_attr, 0, sizeof(*port_attr)); 820 err = device->query_port(device, port_num, port_attr); 821 if (err || port_attr->subnet_prefix) 822 return err; 823 824 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 825 return 0; 826 827 err = ib_query_gid(device, port_num, 0, &gid, NULL); 828 if (err) 829 return err; 830 831 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 832 return 0; 833 } 834 EXPORT_SYMBOL(ib_query_port); 835 836 /** 837 * ib_query_gid - Get GID table entry 838 * @device:Device to query 839 * @port_num:Port number to query 840 * @index:GID table index to query 841 * @gid:Returned GID 842 * @attr: Returned GID attributes related to this GID index (only in RoCE). 843 * NULL means ignore. 844 * 845 * ib_query_gid() fetches the specified GID table entry. 846 */ 847 int ib_query_gid(struct ib_device *device, 848 u8 port_num, int index, union ib_gid *gid, 849 struct ib_gid_attr *attr) 850 { 851 if (rdma_cap_roce_gid_table(device, port_num)) 852 return ib_get_cached_gid(device, port_num, index, gid, attr); 853 854 if (attr) 855 return -EINVAL; 856 857 return device->query_gid(device, port_num, index, gid); 858 } 859 EXPORT_SYMBOL(ib_query_gid); 860 861 /** 862 * ib_enum_roce_netdev - enumerate all RoCE ports 863 * @ib_dev : IB device we want to query 864 * @filter: Should we call the callback? 865 * @filter_cookie: Cookie passed to filter 866 * @cb: Callback to call for each found RoCE ports 867 * @cookie: Cookie passed back to the callback 868 * 869 * Enumerates all of the physical RoCE ports of ib_dev 870 * which are related to netdevice and calls callback() on each 871 * device for which filter() function returns non zero. 872 */ 873 void ib_enum_roce_netdev(struct ib_device *ib_dev, 874 roce_netdev_filter filter, 875 void *filter_cookie, 876 roce_netdev_callback cb, 877 void *cookie) 878 { 879 u8 port; 880 881 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); 882 port++) 883 if (rdma_protocol_roce(ib_dev, port)) { 884 struct net_device *idev = NULL; 885 886 if (ib_dev->get_netdev) 887 idev = ib_dev->get_netdev(ib_dev, port); 888 889 if (idev && 890 idev->reg_state >= NETREG_UNREGISTERED) { 891 dev_put(idev); 892 idev = NULL; 893 } 894 895 if (filter(ib_dev, port, idev, filter_cookie)) 896 cb(ib_dev, port, idev, cookie); 897 898 if (idev) 899 dev_put(idev); 900 } 901 } 902 903 /** 904 * ib_enum_all_roce_netdevs - enumerate all RoCE devices 905 * @filter: Should we call the callback? 906 * @filter_cookie: Cookie passed to filter 907 * @cb: Callback to call for each found RoCE ports 908 * @cookie: Cookie passed back to the callback 909 * 910 * Enumerates all RoCE devices' physical ports which are related 911 * to netdevices and calls callback() on each device for which 912 * filter() function returns non zero. 913 */ 914 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 915 void *filter_cookie, 916 roce_netdev_callback cb, 917 void *cookie) 918 { 919 struct ib_device *dev; 920 921 down_read(&lists_rwsem); 922 list_for_each_entry(dev, &device_list, core_list) 923 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 924 up_read(&lists_rwsem); 925 } 926 927 /** 928 * ib_enum_all_devs - enumerate all ib_devices 929 * @cb: Callback to call for each found ib_device 930 * 931 * Enumerates all ib_devices and calls callback() on each device. 932 */ 933 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, 934 struct netlink_callback *cb) 935 { 936 struct ib_device *dev; 937 unsigned int idx = 0; 938 int ret = 0; 939 940 down_read(&lists_rwsem); 941 list_for_each_entry(dev, &device_list, core_list) { 942 ret = nldev_cb(dev, skb, cb, idx); 943 if (ret) 944 break; 945 idx++; 946 } 947 948 up_read(&lists_rwsem); 949 return ret; 950 } 951 952 /** 953 * ib_query_pkey - Get P_Key table entry 954 * @device:Device to query 955 * @port_num:Port number to query 956 * @index:P_Key table index to query 957 * @pkey:Returned P_Key 958 * 959 * ib_query_pkey() fetches the specified P_Key table entry. 960 */ 961 int ib_query_pkey(struct ib_device *device, 962 u8 port_num, u16 index, u16 *pkey) 963 { 964 return device->query_pkey(device, port_num, index, pkey); 965 } 966 EXPORT_SYMBOL(ib_query_pkey); 967 968 /** 969 * ib_modify_device - Change IB device attributes 970 * @device:Device to modify 971 * @device_modify_mask:Mask of attributes to change 972 * @device_modify:New attribute values 973 * 974 * ib_modify_device() changes a device's attributes as specified by 975 * the @device_modify_mask and @device_modify structure. 976 */ 977 int ib_modify_device(struct ib_device *device, 978 int device_modify_mask, 979 struct ib_device_modify *device_modify) 980 { 981 if (!device->modify_device) 982 return -ENOSYS; 983 984 return device->modify_device(device, device_modify_mask, 985 device_modify); 986 } 987 EXPORT_SYMBOL(ib_modify_device); 988 989 /** 990 * ib_modify_port - Modifies the attributes for the specified port. 991 * @device: The device to modify. 992 * @port_num: The number of the port to modify. 993 * @port_modify_mask: Mask used to specify which attributes of the port 994 * to change. 995 * @port_modify: New attribute values for the port. 996 * 997 * ib_modify_port() changes a port's attributes as specified by the 998 * @port_modify_mask and @port_modify structure. 999 */ 1000 int ib_modify_port(struct ib_device *device, 1001 u8 port_num, int port_modify_mask, 1002 struct ib_port_modify *port_modify) 1003 { 1004 int rc; 1005 1006 if (!rdma_is_port_valid(device, port_num)) 1007 return -EINVAL; 1008 1009 if (device->modify_port) 1010 rc = device->modify_port(device, port_num, port_modify_mask, 1011 port_modify); 1012 else 1013 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; 1014 return rc; 1015 } 1016 EXPORT_SYMBOL(ib_modify_port); 1017 1018 /** 1019 * ib_find_gid - Returns the port number and GID table index where 1020 * a specified GID value occurs. 1021 * @device: The device to query. 1022 * @gid: The GID value to search for. 1023 * @gid_type: Type of GID. 1024 * @ndev: The ndev related to the GID to search for. 1025 * @port_num: The port number of the device where the GID value was found. 1026 * @index: The index into the GID table where the GID was found. This 1027 * parameter may be NULL. 1028 */ 1029 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1030 enum ib_gid_type gid_type, struct net_device *ndev, 1031 u8 *port_num, u16 *index) 1032 { 1033 union ib_gid tmp_gid; 1034 int ret, port, i; 1035 1036 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { 1037 if (rdma_cap_roce_gid_table(device, port)) { 1038 if (!ib_find_cached_gid_by_port(device, gid, gid_type, port, 1039 ndev, index)) { 1040 *port_num = port; 1041 return 0; 1042 } 1043 } 1044 1045 if (gid_type != IB_GID_TYPE_IB) 1046 continue; 1047 1048 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { 1049 ret = ib_query_gid(device, port, i, &tmp_gid, NULL); 1050 if (ret) 1051 return ret; 1052 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 1053 *port_num = port; 1054 if (index) 1055 *index = i; 1056 return 0; 1057 } 1058 } 1059 } 1060 1061 return -ENOENT; 1062 } 1063 EXPORT_SYMBOL(ib_find_gid); 1064 1065 /** 1066 * ib_find_pkey - Returns the PKey table index where a specified 1067 * PKey value occurs. 1068 * @device: The device to query. 1069 * @port_num: The port number of the device to search for the PKey. 1070 * @pkey: The PKey value to search for. 1071 * @index: The index into the PKey table where the PKey was found. 1072 */ 1073 int ib_find_pkey(struct ib_device *device, 1074 u8 port_num, u16 pkey, u16 *index) 1075 { 1076 int ret, i; 1077 u16 tmp_pkey; 1078 int partial_ix = -1; 1079 1080 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { 1081 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 1082 if (ret) 1083 return ret; 1084 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 1085 /* if there is full-member pkey take it.*/ 1086 if (tmp_pkey & 0x8000) { 1087 *index = i; 1088 return 0; 1089 } 1090 if (partial_ix < 0) 1091 partial_ix = i; 1092 } 1093 } 1094 1095 /*no full-member, if exists take the limited*/ 1096 if (partial_ix >= 0) { 1097 *index = partial_ix; 1098 return 0; 1099 } 1100 return -ENOENT; 1101 } 1102 EXPORT_SYMBOL(ib_find_pkey); 1103 1104 /** 1105 * ib_get_net_dev_by_params() - Return the appropriate net_dev 1106 * for a received CM request 1107 * @dev: An RDMA device on which the request has been received. 1108 * @port: Port number on the RDMA device. 1109 * @pkey: The Pkey the request came on. 1110 * @gid: A GID that the net_dev uses to communicate. 1111 * @addr: Contains the IP address that the request specified as its 1112 * destination. 1113 */ 1114 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 1115 u8 port, 1116 u16 pkey, 1117 const union ib_gid *gid, 1118 const struct sockaddr *addr) 1119 { 1120 struct net_device *net_dev = NULL; 1121 struct ib_client_data *context; 1122 1123 if (!rdma_protocol_ib(dev, port)) 1124 return NULL; 1125 1126 down_read(&lists_rwsem); 1127 1128 list_for_each_entry(context, &dev->client_data_list, list) { 1129 struct ib_client *client = context->client; 1130 1131 if (context->going_down) 1132 continue; 1133 1134 if (client->get_net_dev_by_params) { 1135 net_dev = client->get_net_dev_by_params(dev, port, pkey, 1136 gid, addr, 1137 context->data); 1138 if (net_dev) 1139 break; 1140 } 1141 } 1142 1143 up_read(&lists_rwsem); 1144 1145 return net_dev; 1146 } 1147 EXPORT_SYMBOL(ib_get_net_dev_by_params); 1148 1149 static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { 1150 [RDMA_NL_LS_OP_RESOLVE] = { 1151 .doit = ib_nl_handle_resolve_resp, 1152 .flags = RDMA_NL_ADMIN_PERM, 1153 }, 1154 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 1155 .doit = ib_nl_handle_set_timeout, 1156 .flags = RDMA_NL_ADMIN_PERM, 1157 }, 1158 [RDMA_NL_LS_OP_IP_RESOLVE] = { 1159 .doit = ib_nl_handle_ip_res_resp, 1160 .flags = RDMA_NL_ADMIN_PERM, 1161 }, 1162 }; 1163 1164 static int __init ib_core_init(void) 1165 { 1166 int ret; 1167 1168 ib_wq = alloc_workqueue("infiniband", 0, 0); 1169 if (!ib_wq) 1170 return -ENOMEM; 1171 1172 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1173 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 1174 if (!ib_comp_wq) { 1175 ret = -ENOMEM; 1176 goto err; 1177 } 1178 1179 ret = class_register(&ib_class); 1180 if (ret) { 1181 pr_warn("Couldn't create InfiniBand device class\n"); 1182 goto err_comp; 1183 } 1184 1185 ret = rdma_nl_init(); 1186 if (ret) { 1187 pr_warn("Couldn't init IB netlink interface: err %d\n", ret); 1188 goto err_sysfs; 1189 } 1190 1191 ret = addr_init(); 1192 if (ret) { 1193 pr_warn("Could't init IB address resolution\n"); 1194 goto err_ibnl; 1195 } 1196 1197 ret = ib_mad_init(); 1198 if (ret) { 1199 pr_warn("Couldn't init IB MAD\n"); 1200 goto err_addr; 1201 } 1202 1203 ret = ib_sa_init(); 1204 if (ret) { 1205 pr_warn("Couldn't init SA\n"); 1206 goto err_mad; 1207 } 1208 1209 ret = register_lsm_notifier(&ibdev_lsm_nb); 1210 if (ret) { 1211 pr_warn("Couldn't register LSM notifier. ret %d\n", ret); 1212 goto err_sa; 1213 } 1214 1215 nldev_init(); 1216 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); 1217 ib_cache_setup(); 1218 1219 return 0; 1220 1221 err_sa: 1222 ib_sa_cleanup(); 1223 err_mad: 1224 ib_mad_cleanup(); 1225 err_addr: 1226 addr_cleanup(); 1227 err_ibnl: 1228 rdma_nl_exit(); 1229 err_sysfs: 1230 class_unregister(&ib_class); 1231 err_comp: 1232 destroy_workqueue(ib_comp_wq); 1233 err: 1234 destroy_workqueue(ib_wq); 1235 return ret; 1236 } 1237 1238 static void __exit ib_core_cleanup(void) 1239 { 1240 ib_cache_cleanup(); 1241 nldev_exit(); 1242 rdma_nl_unregister(RDMA_NL_LS); 1243 unregister_lsm_notifier(&ibdev_lsm_nb); 1244 ib_sa_cleanup(); 1245 ib_mad_cleanup(); 1246 addr_cleanup(); 1247 rdma_nl_exit(); 1248 class_unregister(&ib_class); 1249 destroy_workqueue(ib_comp_wq); 1250 /* Make sure that any pending umem accounting work is done. */ 1251 destroy_workqueue(ib_wq); 1252 } 1253 1254 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); 1255 1256 module_init(ib_core_init); 1257 module_exit(ib_core_cleanup); 1258