1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifdef CONFIG_SECURITY_INFINIBAND 34 35 #include <linux/security.h> 36 #include <linux/completion.h> 37 #include <linux/list.h> 38 39 #include <rdma/ib_verbs.h> 40 #include <rdma/ib_cache.h> 41 #include "core_priv.h" 42 #include "mad_priv.h" 43 44 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) 45 { 46 struct pkey_index_qp_list *pkey = NULL; 47 struct pkey_index_qp_list *tmp_pkey; 48 struct ib_device *dev = pp->sec->dev; 49 50 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock); 51 list_for_each_entry(tmp_pkey, 52 &dev->port_pkey_list[pp->port_num].pkey_list, 53 pkey_index_list) { 54 if (tmp_pkey->pkey_index == pp->pkey_index) { 55 pkey = tmp_pkey; 56 break; 57 } 58 } 59 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock); 60 return pkey; 61 } 62 63 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, 64 u16 *pkey, 65 u64 *subnet_prefix) 66 { 67 struct ib_device *dev = pp->sec->dev; 68 int ret; 69 70 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); 71 if (ret) 72 return ret; 73 74 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); 75 76 return ret; 77 } 78 79 static int enforce_qp_pkey_security(u16 pkey, 80 u64 subnet_prefix, 81 struct ib_qp_security *qp_sec) 82 { 83 struct ib_qp_security *shared_qp_sec; 84 int ret; 85 86 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); 87 if (ret) 88 return ret; 89 90 list_for_each_entry(shared_qp_sec, 91 &qp_sec->shared_qp_list, 92 shared_qp_list) { 93 ret = security_ib_pkey_access(shared_qp_sec->security, 94 subnet_prefix, 95 pkey); 96 if (ret) 97 return ret; 98 } 99 return 0; 100 } 101 102 /* The caller of this function must hold the QP security 103 * mutex of the QP of the security structure in *pps. 104 * 105 * It takes separate ports_pkeys and security structure 106 * because in some cases the pps will be for a new settings 107 * or the pps will be for the real QP and security structure 108 * will be for a shared QP. 109 */ 110 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, 111 struct ib_qp_security *sec) 112 { 113 u64 subnet_prefix; 114 u16 pkey; 115 int ret = 0; 116 117 if (!pps) 118 return 0; 119 120 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { 121 ret = get_pkey_and_subnet_prefix(&pps->main, 122 &pkey, 123 &subnet_prefix); 124 if (ret) 125 return ret; 126 127 ret = enforce_qp_pkey_security(pkey, 128 subnet_prefix, 129 sec); 130 if (ret) 131 return ret; 132 } 133 134 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { 135 ret = get_pkey_and_subnet_prefix(&pps->alt, 136 &pkey, 137 &subnet_prefix); 138 if (ret) 139 return ret; 140 141 ret = enforce_qp_pkey_security(pkey, 142 subnet_prefix, 143 sec); 144 } 145 146 return ret; 147 } 148 149 /* The caller of this function must hold the QP security 150 * mutex. 151 */ 152 static void qp_to_error(struct ib_qp_security *sec) 153 { 154 struct ib_qp_security *shared_qp_sec; 155 struct ib_qp_attr attr = { 156 .qp_state = IB_QPS_ERR 157 }; 158 struct ib_event event = { 159 .event = IB_EVENT_QP_FATAL 160 }; 161 162 /* If the QP is in the process of being destroyed 163 * the qp pointer in the security structure is 164 * undefined. It cannot be modified now. 165 */ 166 if (sec->destroying) 167 return; 168 169 ib_modify_qp(sec->qp, 170 &attr, 171 IB_QP_STATE); 172 173 if (sec->qp->event_handler && sec->qp->qp_context) { 174 event.element.qp = sec->qp; 175 sec->qp->event_handler(&event, 176 sec->qp->qp_context); 177 } 178 179 list_for_each_entry(shared_qp_sec, 180 &sec->shared_qp_list, 181 shared_qp_list) { 182 struct ib_qp *qp = shared_qp_sec->qp; 183 184 if (qp->event_handler && qp->qp_context) { 185 event.element.qp = qp; 186 event.device = qp->device; 187 qp->event_handler(&event, 188 qp->qp_context); 189 } 190 } 191 } 192 193 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, 194 struct ib_device *device, 195 u8 port_num, 196 u64 subnet_prefix) 197 { 198 struct ib_port_pkey *pp, *tmp_pp; 199 bool comp; 200 LIST_HEAD(to_error_list); 201 u16 pkey_val; 202 203 if (!ib_get_cached_pkey(device, 204 port_num, 205 pkey->pkey_index, 206 &pkey_val)) { 207 spin_lock(&pkey->qp_list_lock); 208 list_for_each_entry(pp, &pkey->qp_list, qp_list) { 209 if (atomic_read(&pp->sec->error_list_count)) 210 continue; 211 212 if (enforce_qp_pkey_security(pkey_val, 213 subnet_prefix, 214 pp->sec)) { 215 atomic_inc(&pp->sec->error_list_count); 216 list_add(&pp->to_error_list, 217 &to_error_list); 218 } 219 } 220 spin_unlock(&pkey->qp_list_lock); 221 } 222 223 list_for_each_entry_safe(pp, 224 tmp_pp, 225 &to_error_list, 226 to_error_list) { 227 mutex_lock(&pp->sec->mutex); 228 qp_to_error(pp->sec); 229 list_del(&pp->to_error_list); 230 atomic_dec(&pp->sec->error_list_count); 231 comp = pp->sec->destroying; 232 mutex_unlock(&pp->sec->mutex); 233 234 if (comp) 235 complete(&pp->sec->error_complete); 236 } 237 } 238 239 /* The caller of this function must hold the QP security 240 * mutex. 241 */ 242 static int port_pkey_list_insert(struct ib_port_pkey *pp) 243 { 244 struct pkey_index_qp_list *tmp_pkey; 245 struct pkey_index_qp_list *pkey; 246 struct ib_device *dev; 247 u8 port_num = pp->port_num; 248 int ret = 0; 249 250 if (pp->state != IB_PORT_PKEY_VALID) 251 return 0; 252 253 dev = pp->sec->dev; 254 255 pkey = get_pkey_idx_qp_list(pp); 256 257 if (!pkey) { 258 bool found = false; 259 260 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); 261 if (!pkey) 262 return -ENOMEM; 263 264 spin_lock(&dev->port_pkey_list[port_num].list_lock); 265 /* Check for the PKey again. A racing process may 266 * have created it. 267 */ 268 list_for_each_entry(tmp_pkey, 269 &dev->port_pkey_list[port_num].pkey_list, 270 pkey_index_list) { 271 if (tmp_pkey->pkey_index == pp->pkey_index) { 272 kfree(pkey); 273 pkey = tmp_pkey; 274 found = true; 275 break; 276 } 277 } 278 279 if (!found) { 280 pkey->pkey_index = pp->pkey_index; 281 spin_lock_init(&pkey->qp_list_lock); 282 INIT_LIST_HEAD(&pkey->qp_list); 283 list_add(&pkey->pkey_index_list, 284 &dev->port_pkey_list[port_num].pkey_list); 285 } 286 spin_unlock(&dev->port_pkey_list[port_num].list_lock); 287 } 288 289 spin_lock(&pkey->qp_list_lock); 290 list_add(&pp->qp_list, &pkey->qp_list); 291 spin_unlock(&pkey->qp_list_lock); 292 293 pp->state = IB_PORT_PKEY_LISTED; 294 295 return ret; 296 } 297 298 /* The caller of this function must hold the QP security 299 * mutex. 300 */ 301 static void port_pkey_list_remove(struct ib_port_pkey *pp) 302 { 303 struct pkey_index_qp_list *pkey; 304 305 if (pp->state != IB_PORT_PKEY_LISTED) 306 return; 307 308 pkey = get_pkey_idx_qp_list(pp); 309 310 spin_lock(&pkey->qp_list_lock); 311 list_del(&pp->qp_list); 312 spin_unlock(&pkey->qp_list_lock); 313 314 /* The setting may still be valid, i.e. after 315 * a destroy has failed for example. 316 */ 317 pp->state = IB_PORT_PKEY_VALID; 318 } 319 320 static void destroy_qp_security(struct ib_qp_security *sec) 321 { 322 security_ib_free_security(sec->security); 323 kfree(sec->ports_pkeys); 324 kfree(sec); 325 } 326 327 /* The caller of this function must hold the QP security 328 * mutex. 329 */ 330 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, 331 const struct ib_qp_attr *qp_attr, 332 int qp_attr_mask) 333 { 334 struct ib_ports_pkeys *new_pps; 335 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; 336 337 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); 338 if (!new_pps) 339 return NULL; 340 341 if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { 342 if (!qp_pps) { 343 new_pps->main.port_num = qp_attr->port_num; 344 new_pps->main.pkey_index = qp_attr->pkey_index; 345 } else { 346 new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? 347 qp_attr->port_num : 348 qp_pps->main.port_num; 349 350 new_pps->main.pkey_index = 351 (qp_attr_mask & IB_QP_PKEY_INDEX) ? 352 qp_attr->pkey_index : 353 qp_pps->main.pkey_index; 354 } 355 new_pps->main.state = IB_PORT_PKEY_VALID; 356 } else if (qp_pps) { 357 new_pps->main.port_num = qp_pps->main.port_num; 358 new_pps->main.pkey_index = qp_pps->main.pkey_index; 359 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) 360 new_pps->main.state = IB_PORT_PKEY_VALID; 361 } 362 363 if (qp_attr_mask & IB_QP_ALT_PATH) { 364 new_pps->alt.port_num = qp_attr->alt_port_num; 365 new_pps->alt.pkey_index = qp_attr->alt_pkey_index; 366 new_pps->alt.state = IB_PORT_PKEY_VALID; 367 } else if (qp_pps) { 368 new_pps->alt.port_num = qp_pps->alt.port_num; 369 new_pps->alt.pkey_index = qp_pps->alt.pkey_index; 370 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) 371 new_pps->alt.state = IB_PORT_PKEY_VALID; 372 } 373 374 new_pps->main.sec = qp->qp_sec; 375 new_pps->alt.sec = qp->qp_sec; 376 return new_pps; 377 } 378 379 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) 380 { 381 struct ib_qp *real_qp = qp->real_qp; 382 int ret; 383 384 ret = ib_create_qp_security(qp, dev); 385 386 if (ret) 387 return ret; 388 389 if (!qp->qp_sec) 390 return 0; 391 392 mutex_lock(&real_qp->qp_sec->mutex); 393 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, 394 qp->qp_sec); 395 396 if (ret) 397 goto ret; 398 399 if (qp != real_qp) 400 list_add(&qp->qp_sec->shared_qp_list, 401 &real_qp->qp_sec->shared_qp_list); 402 ret: 403 mutex_unlock(&real_qp->qp_sec->mutex); 404 if (ret) 405 destroy_qp_security(qp->qp_sec); 406 407 return ret; 408 } 409 410 void ib_close_shared_qp_security(struct ib_qp_security *sec) 411 { 412 struct ib_qp *real_qp = sec->qp->real_qp; 413 414 mutex_lock(&real_qp->qp_sec->mutex); 415 list_del(&sec->shared_qp_list); 416 mutex_unlock(&real_qp->qp_sec->mutex); 417 418 destroy_qp_security(sec); 419 } 420 421 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) 422 { 423 u8 i = rdma_start_port(dev); 424 bool is_ib = false; 425 int ret; 426 427 while (i <= rdma_end_port(dev) && !is_ib) 428 is_ib = rdma_protocol_ib(dev, i++); 429 430 /* If this isn't an IB device don't create the security context */ 431 if (!is_ib) 432 return 0; 433 434 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); 435 if (!qp->qp_sec) 436 return -ENOMEM; 437 438 qp->qp_sec->qp = qp; 439 qp->qp_sec->dev = dev; 440 mutex_init(&qp->qp_sec->mutex); 441 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); 442 atomic_set(&qp->qp_sec->error_list_count, 0); 443 init_completion(&qp->qp_sec->error_complete); 444 ret = security_ib_alloc_security(&qp->qp_sec->security); 445 if (ret) { 446 kfree(qp->qp_sec); 447 qp->qp_sec = NULL; 448 } 449 450 return ret; 451 } 452 EXPORT_SYMBOL(ib_create_qp_security); 453 454 void ib_destroy_qp_security_begin(struct ib_qp_security *sec) 455 { 456 /* Return if not IB */ 457 if (!sec) 458 return; 459 460 mutex_lock(&sec->mutex); 461 462 /* Remove the QP from the lists so it won't get added to 463 * a to_error_list during the destroy process. 464 */ 465 if (sec->ports_pkeys) { 466 port_pkey_list_remove(&sec->ports_pkeys->main); 467 port_pkey_list_remove(&sec->ports_pkeys->alt); 468 } 469 470 /* If the QP is already in one or more of those lists 471 * the destroying flag will ensure the to error flow 472 * doesn't operate on an undefined QP. 473 */ 474 sec->destroying = true; 475 476 /* Record the error list count to know how many completions 477 * to wait for. 478 */ 479 sec->error_comps_pending = atomic_read(&sec->error_list_count); 480 481 mutex_unlock(&sec->mutex); 482 } 483 484 void ib_destroy_qp_security_abort(struct ib_qp_security *sec) 485 { 486 int ret; 487 int i; 488 489 /* Return if not IB */ 490 if (!sec) 491 return; 492 493 /* If a concurrent cache update is in progress this 494 * QP security could be marked for an error state 495 * transition. Wait for this to complete. 496 */ 497 for (i = 0; i < sec->error_comps_pending; i++) 498 wait_for_completion(&sec->error_complete); 499 500 mutex_lock(&sec->mutex); 501 sec->destroying = false; 502 503 /* Restore the position in the lists and verify 504 * access is still allowed in case a cache update 505 * occurred while attempting to destroy. 506 * 507 * Because these setting were listed already 508 * and removed during ib_destroy_qp_security_begin 509 * we know the pkey_index_qp_list for the PKey 510 * already exists so port_pkey_list_insert won't fail. 511 */ 512 if (sec->ports_pkeys) { 513 port_pkey_list_insert(&sec->ports_pkeys->main); 514 port_pkey_list_insert(&sec->ports_pkeys->alt); 515 } 516 517 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); 518 if (ret) 519 qp_to_error(sec); 520 521 mutex_unlock(&sec->mutex); 522 } 523 524 void ib_destroy_qp_security_end(struct ib_qp_security *sec) 525 { 526 int i; 527 528 /* Return if not IB */ 529 if (!sec) 530 return; 531 532 /* If a concurrent cache update is occurring we must 533 * wait until this QP security structure is processed 534 * in the QP to error flow before destroying it because 535 * the to_error_list is in use. 536 */ 537 for (i = 0; i < sec->error_comps_pending; i++) 538 wait_for_completion(&sec->error_complete); 539 540 destroy_qp_security(sec); 541 } 542 543 void ib_security_cache_change(struct ib_device *device, 544 u8 port_num, 545 u64 subnet_prefix) 546 { 547 struct pkey_index_qp_list *pkey; 548 549 list_for_each_entry(pkey, 550 &device->port_pkey_list[port_num].pkey_list, 551 pkey_index_list) { 552 check_pkey_qps(pkey, 553 device, 554 port_num, 555 subnet_prefix); 556 } 557 } 558 559 void ib_security_destroy_port_pkey_list(struct ib_device *device) 560 { 561 struct pkey_index_qp_list *pkey, *tmp_pkey; 562 int i; 563 564 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 565 spin_lock(&device->port_pkey_list[i].list_lock); 566 list_for_each_entry_safe(pkey, 567 tmp_pkey, 568 &device->port_pkey_list[i].pkey_list, 569 pkey_index_list) { 570 list_del(&pkey->pkey_index_list); 571 kfree(pkey); 572 } 573 spin_unlock(&device->port_pkey_list[i].list_lock); 574 } 575 } 576 577 int ib_security_modify_qp(struct ib_qp *qp, 578 struct ib_qp_attr *qp_attr, 579 int qp_attr_mask, 580 struct ib_udata *udata) 581 { 582 int ret = 0; 583 struct ib_ports_pkeys *tmp_pps; 584 struct ib_ports_pkeys *new_pps = NULL; 585 struct ib_qp *real_qp = qp->real_qp; 586 bool special_qp = (real_qp->qp_type == IB_QPT_SMI || 587 real_qp->qp_type == IB_QPT_GSI || 588 real_qp->qp_type >= IB_QPT_RESERVED1); 589 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || 590 (qp_attr_mask & IB_QP_ALT_PATH)); 591 592 WARN_ONCE((qp_attr_mask & IB_QP_PORT && 593 rdma_protocol_ib(real_qp->device, qp_attr->port_num) && 594 !real_qp->qp_sec), 595 "%s: QP security is not initialized for IB QP: %d\n", 596 __func__, real_qp->qp_num); 597 598 /* The port/pkey settings are maintained only for the real QP. Open 599 * handles on the real QP will be in the shared_qp_list. When 600 * enforcing security on the real QP all the shared QPs will be 601 * checked as well. 602 */ 603 604 if (pps_change && !special_qp && real_qp->qp_sec) { 605 mutex_lock(&real_qp->qp_sec->mutex); 606 new_pps = get_new_pps(real_qp, 607 qp_attr, 608 qp_attr_mask); 609 if (!new_pps) { 610 mutex_unlock(&real_qp->qp_sec->mutex); 611 return -ENOMEM; 612 } 613 /* Add this QP to the lists for the new port 614 * and pkey settings before checking for permission 615 * in case there is a concurrent cache update 616 * occurring. Walking the list for a cache change 617 * doesn't acquire the security mutex unless it's 618 * sending the QP to error. 619 */ 620 ret = port_pkey_list_insert(&new_pps->main); 621 622 if (!ret) 623 ret = port_pkey_list_insert(&new_pps->alt); 624 625 if (!ret) 626 ret = check_qp_port_pkey_settings(new_pps, 627 real_qp->qp_sec); 628 } 629 630 if (!ret) 631 ret = real_qp->device->modify_qp(real_qp, 632 qp_attr, 633 qp_attr_mask, 634 udata); 635 636 if (new_pps) { 637 /* Clean up the lists and free the appropriate 638 * ports_pkeys structure. 639 */ 640 if (ret) { 641 tmp_pps = new_pps; 642 } else { 643 tmp_pps = real_qp->qp_sec->ports_pkeys; 644 real_qp->qp_sec->ports_pkeys = new_pps; 645 } 646 647 if (tmp_pps) { 648 port_pkey_list_remove(&tmp_pps->main); 649 port_pkey_list_remove(&tmp_pps->alt); 650 } 651 kfree(tmp_pps); 652 mutex_unlock(&real_qp->qp_sec->mutex); 653 } 654 return ret; 655 } 656 EXPORT_SYMBOL(ib_security_modify_qp); 657 658 int ib_security_pkey_access(struct ib_device *dev, 659 u8 port_num, 660 u16 pkey_index, 661 void *sec) 662 { 663 u64 subnet_prefix; 664 u16 pkey; 665 int ret; 666 667 if (!rdma_protocol_ib(dev, port_num)) 668 return 0; 669 670 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); 671 if (ret) 672 return ret; 673 674 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); 675 676 if (ret) 677 return ret; 678 679 return security_ib_pkey_access(sec, subnet_prefix, pkey); 680 } 681 EXPORT_SYMBOL(ib_security_pkey_access); 682 683 static int ib_mad_agent_security_change(struct notifier_block *nb, 684 unsigned long event, 685 void *data) 686 { 687 struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); 688 689 if (event != LSM_POLICY_CHANGE) 690 return NOTIFY_DONE; 691 692 ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, 693 ag->device->name, 694 ag->port_num); 695 696 return NOTIFY_OK; 697 } 698 699 int ib_mad_agent_security_setup(struct ib_mad_agent *agent, 700 enum ib_qp_type qp_type) 701 { 702 int ret; 703 704 if (!rdma_protocol_ib(agent->device, agent->port_num)) 705 return 0; 706 707 ret = security_ib_alloc_security(&agent->security); 708 if (ret) 709 return ret; 710 711 if (qp_type != IB_QPT_SMI) 712 return 0; 713 714 ret = security_ib_endport_manage_subnet(agent->security, 715 agent->device->name, 716 agent->port_num); 717 if (ret) 718 return ret; 719 720 agent->lsm_nb.notifier_call = ib_mad_agent_security_change; 721 ret = register_lsm_notifier(&agent->lsm_nb); 722 if (ret) 723 return ret; 724 725 agent->smp_allowed = true; 726 agent->lsm_nb_reg = true; 727 return 0; 728 } 729 730 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 731 { 732 if (!rdma_protocol_ib(agent->device, agent->port_num)) 733 return; 734 735 security_ib_free_security(agent->security); 736 if (agent->lsm_nb_reg) 737 unregister_lsm_notifier(&agent->lsm_nb); 738 } 739 740 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) 741 { 742 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) 743 return 0; 744 745 if (map->agent.qp->qp_type == IB_QPT_SMI) { 746 if (!map->agent.smp_allowed) 747 return -EACCES; 748 return 0; 749 } 750 751 return ib_security_pkey_access(map->agent.device, 752 map->agent.port_num, 753 pkey_index, 754 map->agent.security); 755 } 756 757 #endif /* CONFIG_SECURITY_INFINIBAND */ 758