1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/ib_addr.h> 53 #include <rdma/opa_addr.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 #define IB_SA_CPI_MAX_RETRY_CNT 3 61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ 62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 63 64 struct ib_sa_sm_ah { 65 struct ib_ah *ah; 66 struct kref ref; 67 u16 pkey_index; 68 u8 src_path_mask; 69 }; 70 71 enum rdma_class_port_info_type { 72 RDMA_CLASS_PORT_INFO_IB, 73 RDMA_CLASS_PORT_INFO_OPA 74 }; 75 76 struct rdma_class_port_info { 77 enum rdma_class_port_info_type type; 78 union { 79 struct ib_class_port_info ib; 80 struct opa_class_port_info opa; 81 }; 82 }; 83 84 struct ib_sa_classport_cache { 85 bool valid; 86 int retry_cnt; 87 struct rdma_class_port_info data; 88 }; 89 90 struct ib_sa_port { 91 struct ib_mad_agent *agent; 92 struct ib_sa_sm_ah *sm_ah; 93 struct work_struct update_task; 94 struct ib_sa_classport_cache classport_info; 95 struct delayed_work ib_cpi_work; 96 spinlock_t classport_lock; /* protects class port info set */ 97 spinlock_t ah_lock; 98 u8 port_num; 99 }; 100 101 struct ib_sa_device { 102 int start_port, end_port; 103 struct ib_event_handler event_handler; 104 struct ib_sa_port port[0]; 105 }; 106 107 struct ib_sa_query { 108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 109 void (*release)(struct ib_sa_query *); 110 struct ib_sa_client *client; 111 struct ib_sa_port *port; 112 struct ib_mad_send_buf *mad_buf; 113 struct ib_sa_sm_ah *sm_ah; 114 int id; 115 u32 flags; 116 struct list_head list; /* Local svc request list */ 117 u32 seq; /* Local svc request sequence number */ 118 unsigned long timeout; /* Local svc timeout */ 119 u8 path_use; /* How will the pathrecord be used */ 120 }; 121 122 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 123 #define IB_SA_CANCEL 0x00000002 124 #define IB_SA_QUERY_OPA 0x00000004 125 126 struct ib_sa_service_query { 127 void (*callback)(int, struct ib_sa_service_rec *, void *); 128 void *context; 129 struct ib_sa_query sa_query; 130 }; 131 132 struct ib_sa_path_query { 133 void (*callback)(int, struct sa_path_rec *, void *); 134 void *context; 135 struct ib_sa_query sa_query; 136 struct sa_path_rec *conv_pr; 137 }; 138 139 struct ib_sa_guidinfo_query { 140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 141 void *context; 142 struct ib_sa_query sa_query; 143 }; 144 145 struct ib_sa_classport_info_query { 146 void (*callback)(void *); 147 void *context; 148 struct ib_sa_query sa_query; 149 }; 150 151 struct ib_sa_mcmember_query { 152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 153 void *context; 154 struct ib_sa_query sa_query; 155 }; 156 157 static LIST_HEAD(ib_nl_request_list); 158 static DEFINE_SPINLOCK(ib_nl_request_lock); 159 static atomic_t ib_nl_sa_request_seq; 160 static struct workqueue_struct *ib_nl_wq; 161 static struct delayed_work ib_nl_timed_work; 162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 164 .len = sizeof(struct ib_path_rec_data)}, 165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 168 .len = sizeof(struct rdma_nla_ls_gid)}, 169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 170 .len = sizeof(struct rdma_nla_ls_gid)}, 171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 174 }; 175 176 177 static void ib_sa_add_one(struct ib_device *device); 178 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 179 180 static struct ib_client sa_client = { 181 .name = "sa", 182 .add = ib_sa_add_one, 183 .remove = ib_sa_remove_one 184 }; 185 186 static DEFINE_SPINLOCK(idr_lock); 187 static DEFINE_IDR(query_idr); 188 189 static DEFINE_SPINLOCK(tid_lock); 190 static u32 tid; 191 192 #define PATH_REC_FIELD(field) \ 193 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ 194 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \ 195 .field_name = "sa_path_rec:" #field 196 197 static const struct ib_field path_rec_table[] = { 198 { PATH_REC_FIELD(service_id), 199 .offset_words = 0, 200 .offset_bits = 0, 201 .size_bits = 64 }, 202 { PATH_REC_FIELD(dgid), 203 .offset_words = 2, 204 .offset_bits = 0, 205 .size_bits = 128 }, 206 { PATH_REC_FIELD(sgid), 207 .offset_words = 6, 208 .offset_bits = 0, 209 .size_bits = 128 }, 210 { PATH_REC_FIELD(ib.dlid), 211 .offset_words = 10, 212 .offset_bits = 0, 213 .size_bits = 16 }, 214 { PATH_REC_FIELD(ib.slid), 215 .offset_words = 10, 216 .offset_bits = 16, 217 .size_bits = 16 }, 218 { PATH_REC_FIELD(ib.raw_traffic), 219 .offset_words = 11, 220 .offset_bits = 0, 221 .size_bits = 1 }, 222 { RESERVED, 223 .offset_words = 11, 224 .offset_bits = 1, 225 .size_bits = 3 }, 226 { PATH_REC_FIELD(flow_label), 227 .offset_words = 11, 228 .offset_bits = 4, 229 .size_bits = 20 }, 230 { PATH_REC_FIELD(hop_limit), 231 .offset_words = 11, 232 .offset_bits = 24, 233 .size_bits = 8 }, 234 { PATH_REC_FIELD(traffic_class), 235 .offset_words = 12, 236 .offset_bits = 0, 237 .size_bits = 8 }, 238 { PATH_REC_FIELD(reversible), 239 .offset_words = 12, 240 .offset_bits = 8, 241 .size_bits = 1 }, 242 { PATH_REC_FIELD(numb_path), 243 .offset_words = 12, 244 .offset_bits = 9, 245 .size_bits = 7 }, 246 { PATH_REC_FIELD(pkey), 247 .offset_words = 12, 248 .offset_bits = 16, 249 .size_bits = 16 }, 250 { PATH_REC_FIELD(qos_class), 251 .offset_words = 13, 252 .offset_bits = 0, 253 .size_bits = 12 }, 254 { PATH_REC_FIELD(sl), 255 .offset_words = 13, 256 .offset_bits = 12, 257 .size_bits = 4 }, 258 { PATH_REC_FIELD(mtu_selector), 259 .offset_words = 13, 260 .offset_bits = 16, 261 .size_bits = 2 }, 262 { PATH_REC_FIELD(mtu), 263 .offset_words = 13, 264 .offset_bits = 18, 265 .size_bits = 6 }, 266 { PATH_REC_FIELD(rate_selector), 267 .offset_words = 13, 268 .offset_bits = 24, 269 .size_bits = 2 }, 270 { PATH_REC_FIELD(rate), 271 .offset_words = 13, 272 .offset_bits = 26, 273 .size_bits = 6 }, 274 { PATH_REC_FIELD(packet_life_time_selector), 275 .offset_words = 14, 276 .offset_bits = 0, 277 .size_bits = 2 }, 278 { PATH_REC_FIELD(packet_life_time), 279 .offset_words = 14, 280 .offset_bits = 2, 281 .size_bits = 6 }, 282 { PATH_REC_FIELD(preference), 283 .offset_words = 14, 284 .offset_bits = 8, 285 .size_bits = 8 }, 286 { RESERVED, 287 .offset_words = 14, 288 .offset_bits = 16, 289 .size_bits = 48 }, 290 }; 291 292 #define OPA_PATH_REC_FIELD(field) \ 293 .struct_offset_bytes = \ 294 offsetof(struct sa_path_rec, field), \ 295 .struct_size_bytes = \ 296 sizeof((struct sa_path_rec *)0)->field, \ 297 .field_name = "sa_path_rec:" #field 298 299 static const struct ib_field opa_path_rec_table[] = { 300 { OPA_PATH_REC_FIELD(service_id), 301 .offset_words = 0, 302 .offset_bits = 0, 303 .size_bits = 64 }, 304 { OPA_PATH_REC_FIELD(dgid), 305 .offset_words = 2, 306 .offset_bits = 0, 307 .size_bits = 128 }, 308 { OPA_PATH_REC_FIELD(sgid), 309 .offset_words = 6, 310 .offset_bits = 0, 311 .size_bits = 128 }, 312 { OPA_PATH_REC_FIELD(opa.dlid), 313 .offset_words = 10, 314 .offset_bits = 0, 315 .size_bits = 32 }, 316 { OPA_PATH_REC_FIELD(opa.slid), 317 .offset_words = 11, 318 .offset_bits = 0, 319 .size_bits = 32 }, 320 { OPA_PATH_REC_FIELD(opa.raw_traffic), 321 .offset_words = 12, 322 .offset_bits = 0, 323 .size_bits = 1 }, 324 { RESERVED, 325 .offset_words = 12, 326 .offset_bits = 1, 327 .size_bits = 3 }, 328 { OPA_PATH_REC_FIELD(flow_label), 329 .offset_words = 12, 330 .offset_bits = 4, 331 .size_bits = 20 }, 332 { OPA_PATH_REC_FIELD(hop_limit), 333 .offset_words = 12, 334 .offset_bits = 24, 335 .size_bits = 8 }, 336 { OPA_PATH_REC_FIELD(traffic_class), 337 .offset_words = 13, 338 .offset_bits = 0, 339 .size_bits = 8 }, 340 { OPA_PATH_REC_FIELD(reversible), 341 .offset_words = 13, 342 .offset_bits = 8, 343 .size_bits = 1 }, 344 { OPA_PATH_REC_FIELD(numb_path), 345 .offset_words = 13, 346 .offset_bits = 9, 347 .size_bits = 7 }, 348 { OPA_PATH_REC_FIELD(pkey), 349 .offset_words = 13, 350 .offset_bits = 16, 351 .size_bits = 16 }, 352 { OPA_PATH_REC_FIELD(opa.l2_8B), 353 .offset_words = 14, 354 .offset_bits = 0, 355 .size_bits = 1 }, 356 { OPA_PATH_REC_FIELD(opa.l2_10B), 357 .offset_words = 14, 358 .offset_bits = 1, 359 .size_bits = 1 }, 360 { OPA_PATH_REC_FIELD(opa.l2_9B), 361 .offset_words = 14, 362 .offset_bits = 2, 363 .size_bits = 1 }, 364 { OPA_PATH_REC_FIELD(opa.l2_16B), 365 .offset_words = 14, 366 .offset_bits = 3, 367 .size_bits = 1 }, 368 { RESERVED, 369 .offset_words = 14, 370 .offset_bits = 4, 371 .size_bits = 2 }, 372 { OPA_PATH_REC_FIELD(opa.qos_type), 373 .offset_words = 14, 374 .offset_bits = 6, 375 .size_bits = 2 }, 376 { OPA_PATH_REC_FIELD(opa.qos_priority), 377 .offset_words = 14, 378 .offset_bits = 8, 379 .size_bits = 8 }, 380 { RESERVED, 381 .offset_words = 14, 382 .offset_bits = 16, 383 .size_bits = 3 }, 384 { OPA_PATH_REC_FIELD(sl), 385 .offset_words = 14, 386 .offset_bits = 19, 387 .size_bits = 5 }, 388 { RESERVED, 389 .offset_words = 14, 390 .offset_bits = 24, 391 .size_bits = 8 }, 392 { OPA_PATH_REC_FIELD(mtu_selector), 393 .offset_words = 15, 394 .offset_bits = 0, 395 .size_bits = 2 }, 396 { OPA_PATH_REC_FIELD(mtu), 397 .offset_words = 15, 398 .offset_bits = 2, 399 .size_bits = 6 }, 400 { OPA_PATH_REC_FIELD(rate_selector), 401 .offset_words = 15, 402 .offset_bits = 8, 403 .size_bits = 2 }, 404 { OPA_PATH_REC_FIELD(rate), 405 .offset_words = 15, 406 .offset_bits = 10, 407 .size_bits = 6 }, 408 { OPA_PATH_REC_FIELD(packet_life_time_selector), 409 .offset_words = 15, 410 .offset_bits = 16, 411 .size_bits = 2 }, 412 { OPA_PATH_REC_FIELD(packet_life_time), 413 .offset_words = 15, 414 .offset_bits = 18, 415 .size_bits = 6 }, 416 { OPA_PATH_REC_FIELD(preference), 417 .offset_words = 15, 418 .offset_bits = 24, 419 .size_bits = 8 }, 420 }; 421 422 #define MCMEMBER_REC_FIELD(field) \ 423 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 424 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 425 .field_name = "sa_mcmember_rec:" #field 426 427 static const struct ib_field mcmember_rec_table[] = { 428 { MCMEMBER_REC_FIELD(mgid), 429 .offset_words = 0, 430 .offset_bits = 0, 431 .size_bits = 128 }, 432 { MCMEMBER_REC_FIELD(port_gid), 433 .offset_words = 4, 434 .offset_bits = 0, 435 .size_bits = 128 }, 436 { MCMEMBER_REC_FIELD(qkey), 437 .offset_words = 8, 438 .offset_bits = 0, 439 .size_bits = 32 }, 440 { MCMEMBER_REC_FIELD(mlid), 441 .offset_words = 9, 442 .offset_bits = 0, 443 .size_bits = 16 }, 444 { MCMEMBER_REC_FIELD(mtu_selector), 445 .offset_words = 9, 446 .offset_bits = 16, 447 .size_bits = 2 }, 448 { MCMEMBER_REC_FIELD(mtu), 449 .offset_words = 9, 450 .offset_bits = 18, 451 .size_bits = 6 }, 452 { MCMEMBER_REC_FIELD(traffic_class), 453 .offset_words = 9, 454 .offset_bits = 24, 455 .size_bits = 8 }, 456 { MCMEMBER_REC_FIELD(pkey), 457 .offset_words = 10, 458 .offset_bits = 0, 459 .size_bits = 16 }, 460 { MCMEMBER_REC_FIELD(rate_selector), 461 .offset_words = 10, 462 .offset_bits = 16, 463 .size_bits = 2 }, 464 { MCMEMBER_REC_FIELD(rate), 465 .offset_words = 10, 466 .offset_bits = 18, 467 .size_bits = 6 }, 468 { MCMEMBER_REC_FIELD(packet_life_time_selector), 469 .offset_words = 10, 470 .offset_bits = 24, 471 .size_bits = 2 }, 472 { MCMEMBER_REC_FIELD(packet_life_time), 473 .offset_words = 10, 474 .offset_bits = 26, 475 .size_bits = 6 }, 476 { MCMEMBER_REC_FIELD(sl), 477 .offset_words = 11, 478 .offset_bits = 0, 479 .size_bits = 4 }, 480 { MCMEMBER_REC_FIELD(flow_label), 481 .offset_words = 11, 482 .offset_bits = 4, 483 .size_bits = 20 }, 484 { MCMEMBER_REC_FIELD(hop_limit), 485 .offset_words = 11, 486 .offset_bits = 24, 487 .size_bits = 8 }, 488 { MCMEMBER_REC_FIELD(scope), 489 .offset_words = 12, 490 .offset_bits = 0, 491 .size_bits = 4 }, 492 { MCMEMBER_REC_FIELD(join_state), 493 .offset_words = 12, 494 .offset_bits = 4, 495 .size_bits = 4 }, 496 { MCMEMBER_REC_FIELD(proxy_join), 497 .offset_words = 12, 498 .offset_bits = 8, 499 .size_bits = 1 }, 500 { RESERVED, 501 .offset_words = 12, 502 .offset_bits = 9, 503 .size_bits = 23 }, 504 }; 505 506 #define SERVICE_REC_FIELD(field) \ 507 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 508 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 509 .field_name = "sa_service_rec:" #field 510 511 static const struct ib_field service_rec_table[] = { 512 { SERVICE_REC_FIELD(id), 513 .offset_words = 0, 514 .offset_bits = 0, 515 .size_bits = 64 }, 516 { SERVICE_REC_FIELD(gid), 517 .offset_words = 2, 518 .offset_bits = 0, 519 .size_bits = 128 }, 520 { SERVICE_REC_FIELD(pkey), 521 .offset_words = 6, 522 .offset_bits = 0, 523 .size_bits = 16 }, 524 { SERVICE_REC_FIELD(lease), 525 .offset_words = 7, 526 .offset_bits = 0, 527 .size_bits = 32 }, 528 { SERVICE_REC_FIELD(key), 529 .offset_words = 8, 530 .offset_bits = 0, 531 .size_bits = 128 }, 532 { SERVICE_REC_FIELD(name), 533 .offset_words = 12, 534 .offset_bits = 0, 535 .size_bits = 64*8 }, 536 { SERVICE_REC_FIELD(data8), 537 .offset_words = 28, 538 .offset_bits = 0, 539 .size_bits = 16*8 }, 540 { SERVICE_REC_FIELD(data16), 541 .offset_words = 32, 542 .offset_bits = 0, 543 .size_bits = 8*16 }, 544 { SERVICE_REC_FIELD(data32), 545 .offset_words = 36, 546 .offset_bits = 0, 547 .size_bits = 4*32 }, 548 { SERVICE_REC_FIELD(data64), 549 .offset_words = 40, 550 .offset_bits = 0, 551 .size_bits = 2*64 }, 552 }; 553 554 #define CLASSPORTINFO_REC_FIELD(field) \ 555 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 556 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ 557 .field_name = "ib_class_port_info:" #field 558 559 static const struct ib_field ib_classport_info_rec_table[] = { 560 { CLASSPORTINFO_REC_FIELD(base_version), 561 .offset_words = 0, 562 .offset_bits = 0, 563 .size_bits = 8 }, 564 { CLASSPORTINFO_REC_FIELD(class_version), 565 .offset_words = 0, 566 .offset_bits = 8, 567 .size_bits = 8 }, 568 { CLASSPORTINFO_REC_FIELD(capability_mask), 569 .offset_words = 0, 570 .offset_bits = 16, 571 .size_bits = 16 }, 572 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 573 .offset_words = 1, 574 .offset_bits = 0, 575 .size_bits = 32 }, 576 { CLASSPORTINFO_REC_FIELD(redirect_gid), 577 .offset_words = 2, 578 .offset_bits = 0, 579 .size_bits = 128 }, 580 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 581 .offset_words = 6, 582 .offset_bits = 0, 583 .size_bits = 32 }, 584 { CLASSPORTINFO_REC_FIELD(redirect_lid), 585 .offset_words = 7, 586 .offset_bits = 0, 587 .size_bits = 16 }, 588 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 589 .offset_words = 7, 590 .offset_bits = 16, 591 .size_bits = 16 }, 592 593 { CLASSPORTINFO_REC_FIELD(redirect_qp), 594 .offset_words = 8, 595 .offset_bits = 0, 596 .size_bits = 32 }, 597 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 598 .offset_words = 9, 599 .offset_bits = 0, 600 .size_bits = 32 }, 601 602 { CLASSPORTINFO_REC_FIELD(trap_gid), 603 .offset_words = 10, 604 .offset_bits = 0, 605 .size_bits = 128 }, 606 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 607 .offset_words = 14, 608 .offset_bits = 0, 609 .size_bits = 32 }, 610 611 { CLASSPORTINFO_REC_FIELD(trap_lid), 612 .offset_words = 15, 613 .offset_bits = 0, 614 .size_bits = 16 }, 615 { CLASSPORTINFO_REC_FIELD(trap_pkey), 616 .offset_words = 15, 617 .offset_bits = 16, 618 .size_bits = 16 }, 619 620 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 621 .offset_words = 16, 622 .offset_bits = 0, 623 .size_bits = 32 }, 624 { CLASSPORTINFO_REC_FIELD(trap_qkey), 625 .offset_words = 17, 626 .offset_bits = 0, 627 .size_bits = 32 }, 628 }; 629 630 #define OPA_CLASSPORTINFO_REC_FIELD(field) \ 631 .struct_offset_bytes =\ 632 offsetof(struct opa_class_port_info, field), \ 633 .struct_size_bytes = \ 634 sizeof((struct opa_class_port_info *)0)->field, \ 635 .field_name = "opa_class_port_info:" #field 636 637 static const struct ib_field opa_classport_info_rec_table[] = { 638 { OPA_CLASSPORTINFO_REC_FIELD(base_version), 639 .offset_words = 0, 640 .offset_bits = 0, 641 .size_bits = 8 }, 642 { OPA_CLASSPORTINFO_REC_FIELD(class_version), 643 .offset_words = 0, 644 .offset_bits = 8, 645 .size_bits = 8 }, 646 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), 647 .offset_words = 0, 648 .offset_bits = 16, 649 .size_bits = 16 }, 650 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 651 .offset_words = 1, 652 .offset_bits = 0, 653 .size_bits = 32 }, 654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), 655 .offset_words = 2, 656 .offset_bits = 0, 657 .size_bits = 128 }, 658 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), 659 .offset_words = 6, 660 .offset_bits = 0, 661 .size_bits = 32 }, 662 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), 663 .offset_words = 7, 664 .offset_bits = 0, 665 .size_bits = 32 }, 666 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), 667 .offset_words = 8, 668 .offset_bits = 0, 669 .size_bits = 32 }, 670 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), 671 .offset_words = 9, 672 .offset_bits = 0, 673 .size_bits = 32 }, 674 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), 675 .offset_words = 10, 676 .offset_bits = 0, 677 .size_bits = 128 }, 678 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), 679 .offset_words = 14, 680 .offset_bits = 0, 681 .size_bits = 32 }, 682 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), 683 .offset_words = 15, 684 .offset_bits = 0, 685 .size_bits = 32 }, 686 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), 687 .offset_words = 16, 688 .offset_bits = 0, 689 .size_bits = 32 }, 690 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), 691 .offset_words = 17, 692 .offset_bits = 0, 693 .size_bits = 32 }, 694 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), 695 .offset_words = 18, 696 .offset_bits = 0, 697 .size_bits = 16 }, 698 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), 699 .offset_words = 18, 700 .offset_bits = 16, 701 .size_bits = 16 }, 702 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), 703 .offset_words = 19, 704 .offset_bits = 0, 705 .size_bits = 8 }, 706 { RESERVED, 707 .offset_words = 19, 708 .offset_bits = 8, 709 .size_bits = 24 }, 710 }; 711 712 #define GUIDINFO_REC_FIELD(field) \ 713 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 714 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 715 .field_name = "sa_guidinfo_rec:" #field 716 717 static const struct ib_field guidinfo_rec_table[] = { 718 { GUIDINFO_REC_FIELD(lid), 719 .offset_words = 0, 720 .offset_bits = 0, 721 .size_bits = 16 }, 722 { GUIDINFO_REC_FIELD(block_num), 723 .offset_words = 0, 724 .offset_bits = 16, 725 .size_bits = 8 }, 726 { GUIDINFO_REC_FIELD(res1), 727 .offset_words = 0, 728 .offset_bits = 24, 729 .size_bits = 8 }, 730 { GUIDINFO_REC_FIELD(res2), 731 .offset_words = 1, 732 .offset_bits = 0, 733 .size_bits = 32 }, 734 { GUIDINFO_REC_FIELD(guid_info_list), 735 .offset_words = 2, 736 .offset_bits = 0, 737 .size_bits = 512 }, 738 }; 739 740 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 741 { 742 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 743 } 744 745 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 746 { 747 return (query->flags & IB_SA_CANCEL); 748 } 749 750 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 751 struct ib_sa_query *query) 752 { 753 struct sa_path_rec *sa_rec = query->mad_buf->context[1]; 754 struct ib_sa_mad *mad = query->mad_buf->mad; 755 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 756 u16 val16; 757 u64 val64; 758 struct rdma_ls_resolve_header *header; 759 760 query->mad_buf->context[1] = NULL; 761 762 /* Construct the family header first */ 763 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 764 memcpy(header->device_name, query->port->agent->device->name, 765 LS_DEVICE_NAME_MAX); 766 header->port_num = query->port->port_num; 767 768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 769 sa_rec->reversible != 0) 770 query->path_use = LS_RESOLVE_PATH_USE_GMP; 771 else 772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 773 header->path_use = query->path_use; 774 775 /* Now build the attributes */ 776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 777 val64 = be64_to_cpu(sa_rec->service_id); 778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 779 sizeof(val64), &val64); 780 } 781 if (comp_mask & IB_SA_PATH_REC_DGID) 782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 783 sizeof(sa_rec->dgid), &sa_rec->dgid); 784 if (comp_mask & IB_SA_PATH_REC_SGID) 785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 786 sizeof(sa_rec->sgid), &sa_rec->sgid); 787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 790 791 if (comp_mask & IB_SA_PATH_REC_PKEY) { 792 val16 = be16_to_cpu(sa_rec->pkey); 793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 794 sizeof(val16), &val16); 795 } 796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 797 val16 = be16_to_cpu(sa_rec->qos_class); 798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 799 sizeof(val16), &val16); 800 } 801 } 802 803 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 804 { 805 int len = 0; 806 807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 808 len += nla_total_size(sizeof(u64)); 809 if (comp_mask & IB_SA_PATH_REC_DGID) 810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 811 if (comp_mask & IB_SA_PATH_REC_SGID) 812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 814 len += nla_total_size(sizeof(u8)); 815 if (comp_mask & IB_SA_PATH_REC_PKEY) 816 len += nla_total_size(sizeof(u16)); 817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 818 len += nla_total_size(sizeof(u16)); 819 820 /* 821 * Make sure that at least some of the required comp_mask bits are 822 * set. 823 */ 824 if (WARN_ON(len == 0)) 825 return len; 826 827 /* Add the family header */ 828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 829 830 return len; 831 } 832 833 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 834 { 835 struct sk_buff *skb = NULL; 836 struct nlmsghdr *nlh; 837 void *data; 838 int ret = 0; 839 struct ib_sa_mad *mad; 840 int len; 841 842 mad = query->mad_buf->mad; 843 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 844 if (len <= 0) 845 return -EMSGSIZE; 846 847 skb = nlmsg_new(len, gfp_mask); 848 if (!skb) 849 return -ENOMEM; 850 851 /* Put nlmsg header only for now */ 852 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 853 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 854 if (!data) { 855 nlmsg_free(skb); 856 return -EMSGSIZE; 857 } 858 859 /* Add attributes */ 860 ib_nl_set_path_rec_attrs(skb, query); 861 862 /* Repair the nlmsg header length */ 863 nlmsg_end(skb, nlh); 864 865 ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask); 866 if (!ret) 867 ret = len; 868 else 869 ret = 0; 870 871 return ret; 872 } 873 874 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 875 { 876 unsigned long flags; 877 unsigned long delay; 878 int ret; 879 880 INIT_LIST_HEAD(&query->list); 881 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 882 883 /* Put the request on the list first.*/ 884 spin_lock_irqsave(&ib_nl_request_lock, flags); 885 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 886 query->timeout = delay + jiffies; 887 list_add_tail(&query->list, &ib_nl_request_list); 888 /* Start the timeout if this is the only request */ 889 if (ib_nl_request_list.next == &query->list) 890 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 891 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 892 893 ret = ib_nl_send_msg(query, gfp_mask); 894 if (ret <= 0) { 895 ret = -EIO; 896 /* Remove the request */ 897 spin_lock_irqsave(&ib_nl_request_lock, flags); 898 list_del(&query->list); 899 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 900 } else { 901 ret = 0; 902 } 903 904 return ret; 905 } 906 907 static int ib_nl_cancel_request(struct ib_sa_query *query) 908 { 909 unsigned long flags; 910 struct ib_sa_query *wait_query; 911 int found = 0; 912 913 spin_lock_irqsave(&ib_nl_request_lock, flags); 914 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 915 /* Let the timeout to take care of the callback */ 916 if (query == wait_query) { 917 query->flags |= IB_SA_CANCEL; 918 query->timeout = jiffies; 919 list_move(&query->list, &ib_nl_request_list); 920 found = 1; 921 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 922 break; 923 } 924 } 925 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 926 927 return found; 928 } 929 930 static void send_handler(struct ib_mad_agent *agent, 931 struct ib_mad_send_wc *mad_send_wc); 932 933 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 934 const struct nlmsghdr *nlh) 935 { 936 struct ib_mad_send_wc mad_send_wc; 937 struct ib_sa_mad *mad = NULL; 938 const struct nlattr *head, *curr; 939 struct ib_path_rec_data *rec; 940 int len, rem; 941 u32 mask = 0; 942 int status = -EIO; 943 944 if (query->callback) { 945 head = (const struct nlattr *) nlmsg_data(nlh); 946 len = nlmsg_len(nlh); 947 switch (query->path_use) { 948 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 949 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 950 break; 951 952 case LS_RESOLVE_PATH_USE_ALL: 953 case LS_RESOLVE_PATH_USE_GMP: 954 default: 955 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 956 IB_PATH_BIDIRECTIONAL; 957 break; 958 } 959 nla_for_each_attr(curr, head, len, rem) { 960 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 961 rec = nla_data(curr); 962 /* 963 * Get the first one. In the future, we may 964 * need to get up to 6 pathrecords. 965 */ 966 if ((rec->flags & mask) == mask) { 967 mad = query->mad_buf->mad; 968 mad->mad_hdr.method |= 969 IB_MGMT_METHOD_RESP; 970 memcpy(mad->data, rec->path_rec, 971 sizeof(rec->path_rec)); 972 status = 0; 973 break; 974 } 975 } 976 } 977 query->callback(query, status, mad); 978 } 979 980 mad_send_wc.send_buf = query->mad_buf; 981 mad_send_wc.status = IB_WC_SUCCESS; 982 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 983 } 984 985 static void ib_nl_request_timeout(struct work_struct *work) 986 { 987 unsigned long flags; 988 struct ib_sa_query *query; 989 unsigned long delay; 990 struct ib_mad_send_wc mad_send_wc; 991 int ret; 992 993 spin_lock_irqsave(&ib_nl_request_lock, flags); 994 while (!list_empty(&ib_nl_request_list)) { 995 query = list_entry(ib_nl_request_list.next, 996 struct ib_sa_query, list); 997 998 if (time_after(query->timeout, jiffies)) { 999 delay = query->timeout - jiffies; 1000 if ((long)delay <= 0) 1001 delay = 1; 1002 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 1003 break; 1004 } 1005 1006 list_del(&query->list); 1007 ib_sa_disable_local_svc(query); 1008 /* Hold the lock to protect against query cancellation */ 1009 if (ib_sa_query_cancelled(query)) 1010 ret = -1; 1011 else 1012 ret = ib_post_send_mad(query->mad_buf, NULL); 1013 if (ret) { 1014 mad_send_wc.send_buf = query->mad_buf; 1015 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 1016 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1017 send_handler(query->port->agent, &mad_send_wc); 1018 spin_lock_irqsave(&ib_nl_request_lock, flags); 1019 } 1020 } 1021 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1022 } 1023 1024 int ib_nl_handle_set_timeout(struct sk_buff *skb, 1025 struct nlmsghdr *nlh, 1026 struct netlink_ext_ack *extack) 1027 { 1028 int timeout, delta, abs_delta; 1029 const struct nlattr *attr; 1030 unsigned long flags; 1031 struct ib_sa_query *query; 1032 long delay = 0; 1033 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1034 int ret; 1035 1036 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 1037 !(NETLINK_CB(skb).sk)) 1038 return -EPERM; 1039 1040 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1041 nlmsg_len(nlh), ib_nl_policy, NULL); 1042 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 1043 if (ret || !attr) 1044 goto settimeout_out; 1045 1046 timeout = *(int *) nla_data(attr); 1047 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 1048 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 1049 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 1050 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 1051 1052 delta = timeout - sa_local_svc_timeout_ms; 1053 if (delta < 0) 1054 abs_delta = -delta; 1055 else 1056 abs_delta = delta; 1057 1058 if (delta != 0) { 1059 spin_lock_irqsave(&ib_nl_request_lock, flags); 1060 sa_local_svc_timeout_ms = timeout; 1061 list_for_each_entry(query, &ib_nl_request_list, list) { 1062 if (delta < 0 && abs_delta > query->timeout) 1063 query->timeout = 0; 1064 else 1065 query->timeout += delta; 1066 1067 /* Get the new delay from the first entry */ 1068 if (!delay) { 1069 delay = query->timeout - jiffies; 1070 if (delay <= 0) 1071 delay = 1; 1072 } 1073 } 1074 if (delay) 1075 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1076 (unsigned long)delay); 1077 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1078 } 1079 1080 settimeout_out: 1081 return skb->len; 1082 } 1083 1084 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 1085 { 1086 struct nlattr *tb[LS_NLA_TYPE_MAX]; 1087 int ret; 1088 1089 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 1090 return 0; 1091 1092 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 1093 nlmsg_len(nlh), ib_nl_policy, NULL); 1094 if (ret) 1095 return 0; 1096 1097 return 1; 1098 } 1099 1100 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 1101 struct nlmsghdr *nlh, 1102 struct netlink_ext_ack *extack) 1103 { 1104 unsigned long flags; 1105 struct ib_sa_query *query; 1106 struct ib_mad_send_buf *send_buf; 1107 struct ib_mad_send_wc mad_send_wc; 1108 int found = 0; 1109 int ret; 1110 1111 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 1112 !(NETLINK_CB(skb).sk)) 1113 return -EPERM; 1114 1115 spin_lock_irqsave(&ib_nl_request_lock, flags); 1116 list_for_each_entry(query, &ib_nl_request_list, list) { 1117 /* 1118 * If the query is cancelled, let the timeout routine 1119 * take care of it. 1120 */ 1121 if (nlh->nlmsg_seq == query->seq) { 1122 found = !ib_sa_query_cancelled(query); 1123 if (found) 1124 list_del(&query->list); 1125 break; 1126 } 1127 } 1128 1129 if (!found) { 1130 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1131 goto resp_out; 1132 } 1133 1134 send_buf = query->mad_buf; 1135 1136 if (!ib_nl_is_good_resolve_resp(nlh)) { 1137 /* if the result is a failure, send out the packet via IB */ 1138 ib_sa_disable_local_svc(query); 1139 ret = ib_post_send_mad(query->mad_buf, NULL); 1140 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1141 if (ret) { 1142 mad_send_wc.send_buf = send_buf; 1143 mad_send_wc.status = IB_WC_GENERAL_ERR; 1144 send_handler(query->port->agent, &mad_send_wc); 1145 } 1146 } else { 1147 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 1148 ib_nl_process_good_resolve_rsp(query, nlh); 1149 } 1150 1151 resp_out: 1152 return skb->len; 1153 } 1154 1155 static void free_sm_ah(struct kref *kref) 1156 { 1157 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 1158 1159 rdma_destroy_ah(sm_ah->ah); 1160 kfree(sm_ah); 1161 } 1162 1163 void ib_sa_register_client(struct ib_sa_client *client) 1164 { 1165 atomic_set(&client->users, 1); 1166 init_completion(&client->comp); 1167 } 1168 EXPORT_SYMBOL(ib_sa_register_client); 1169 1170 void ib_sa_unregister_client(struct ib_sa_client *client) 1171 { 1172 ib_sa_client_put(client); 1173 wait_for_completion(&client->comp); 1174 } 1175 EXPORT_SYMBOL(ib_sa_unregister_client); 1176 1177 /** 1178 * ib_sa_cancel_query - try to cancel an SA query 1179 * @id:ID of query to cancel 1180 * @query:query pointer to cancel 1181 * 1182 * Try to cancel an SA query. If the id and query don't match up or 1183 * the query has already completed, nothing is done. Otherwise the 1184 * query is canceled and will complete with a status of -EINTR. 1185 */ 1186 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1187 { 1188 unsigned long flags; 1189 struct ib_mad_agent *agent; 1190 struct ib_mad_send_buf *mad_buf; 1191 1192 spin_lock_irqsave(&idr_lock, flags); 1193 if (idr_find(&query_idr, id) != query) { 1194 spin_unlock_irqrestore(&idr_lock, flags); 1195 return; 1196 } 1197 agent = query->port->agent; 1198 mad_buf = query->mad_buf; 1199 spin_unlock_irqrestore(&idr_lock, flags); 1200 1201 /* 1202 * If the query is still on the netlink request list, schedule 1203 * it to be cancelled by the timeout routine. Otherwise, it has been 1204 * sent to the MAD layer and has to be cancelled from there. 1205 */ 1206 if (!ib_nl_cancel_request(query)) 1207 ib_cancel_mad(agent, mad_buf); 1208 } 1209 EXPORT_SYMBOL(ib_sa_cancel_query); 1210 1211 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 1212 { 1213 struct ib_sa_device *sa_dev; 1214 struct ib_sa_port *port; 1215 unsigned long flags; 1216 u8 src_path_mask; 1217 1218 sa_dev = ib_get_client_data(device, &sa_client); 1219 if (!sa_dev) 1220 return 0x7f; 1221 1222 port = &sa_dev->port[port_num - sa_dev->start_port]; 1223 spin_lock_irqsave(&port->ah_lock, flags); 1224 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 1225 spin_unlock_irqrestore(&port->ah_lock, flags); 1226 1227 return src_path_mask; 1228 } 1229 1230 static int roce_resolve_route_from_path(struct sa_path_rec *rec, 1231 const struct ib_gid_attr *attr) 1232 { 1233 struct rdma_dev_addr dev_addr = {}; 1234 union { 1235 struct sockaddr _sockaddr; 1236 struct sockaddr_in _sockaddr_in; 1237 struct sockaddr_in6 _sockaddr_in6; 1238 } sgid_addr, dgid_addr; 1239 int ret; 1240 1241 if (rec->roce.route_resolved) 1242 return 0; 1243 if (!attr || !attr->ndev) 1244 return -EINVAL; 1245 1246 dev_addr.bound_dev_if = attr->ndev->ifindex; 1247 /* TODO: Use net from the ib_gid_attr once it is added to it, 1248 * until than, limit itself to init_net. 1249 */ 1250 dev_addr.net = &init_net; 1251 1252 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); 1253 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); 1254 1255 /* validate the route */ 1256 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, 1257 &dgid_addr._sockaddr, &dev_addr); 1258 if (ret) 1259 return ret; 1260 1261 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 1262 dev_addr.network == RDMA_NETWORK_IPV6) && 1263 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) 1264 return -EINVAL; 1265 1266 rec->roce.route_resolved = true; 1267 return 0; 1268 } 1269 1270 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, 1271 struct sa_path_rec *rec, 1272 struct rdma_ah_attr *ah_attr, 1273 const struct ib_gid_attr *gid_attr) 1274 { 1275 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); 1276 1277 if (!gid_attr) { 1278 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, 1279 port_num, NULL); 1280 if (IS_ERR(gid_attr)) 1281 return PTR_ERR(gid_attr); 1282 } else 1283 rdma_hold_gid_attr(gid_attr); 1284 1285 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, 1286 be32_to_cpu(rec->flow_label), 1287 rec->hop_limit, rec->traffic_class, 1288 gid_attr); 1289 return 0; 1290 } 1291 1292 /** 1293 * ib_init_ah_attr_from_path - Initialize address handle attributes based on 1294 * an SA path record. 1295 * @device: Device associated ah attributes initialization. 1296 * @port_num: Port on the specified device. 1297 * @rec: path record entry to use for ah attributes initialization. 1298 * @ah_attr: address handle attributes to initialization from path record. 1299 * @sgid_attr: SGID attribute to consider during initialization. 1300 * 1301 * When ib_init_ah_attr_from_path() returns success, 1302 * (a) for IB link layer it optionally contains a reference to SGID attribute 1303 * when GRH is present for IB link layer. 1304 * (b) for RoCE link layer it contains a reference to SGID attribute. 1305 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1306 * attributes which are initialized using ib_init_ah_attr_from_path(). 1307 */ 1308 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, 1309 struct sa_path_rec *rec, 1310 struct rdma_ah_attr *ah_attr, 1311 const struct ib_gid_attr *gid_attr) 1312 { 1313 int ret = 0; 1314 1315 memset(ah_attr, 0, sizeof(*ah_attr)); 1316 ah_attr->type = rdma_ah_find_type(device, port_num); 1317 rdma_ah_set_sl(ah_attr, rec->sl); 1318 rdma_ah_set_port_num(ah_attr, port_num); 1319 rdma_ah_set_static_rate(ah_attr, rec->rate); 1320 1321 if (sa_path_is_roce(rec)) { 1322 ret = roce_resolve_route_from_path(rec, gid_attr); 1323 if (ret) 1324 return ret; 1325 1326 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); 1327 } else { 1328 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); 1329 if (sa_path_is_opa(rec) && 1330 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) 1331 rdma_ah_set_make_grd(ah_attr, true); 1332 1333 rdma_ah_set_path_bits(ah_attr, 1334 be32_to_cpu(sa_path_get_slid(rec)) & 1335 get_src_path_mask(device, port_num)); 1336 } 1337 1338 if (rec->hop_limit > 0 || sa_path_is_roce(rec)) 1339 ret = init_ah_attr_grh_fields(device, port_num, 1340 rec, ah_attr, gid_attr); 1341 return ret; 1342 } 1343 EXPORT_SYMBOL(ib_init_ah_attr_from_path); 1344 1345 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1346 { 1347 struct rdma_ah_attr ah_attr; 1348 unsigned long flags; 1349 1350 spin_lock_irqsave(&query->port->ah_lock, flags); 1351 if (!query->port->sm_ah) { 1352 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1353 return -EAGAIN; 1354 } 1355 kref_get(&query->port->sm_ah->ref); 1356 query->sm_ah = query->port->sm_ah; 1357 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1358 1359 /* 1360 * Always check if sm_ah has valid dlid assigned, 1361 * before querying for class port info 1362 */ 1363 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || 1364 !rdma_is_valid_unicast_lid(&ah_attr)) { 1365 kref_put(&query->sm_ah->ref, free_sm_ah); 1366 return -EAGAIN; 1367 } 1368 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1369 query->sm_ah->pkey_index, 1370 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1371 gfp_mask, 1372 ((query->flags & IB_SA_QUERY_OPA) ? 1373 OPA_MGMT_BASE_VERSION : 1374 IB_MGMT_BASE_VERSION)); 1375 if (IS_ERR(query->mad_buf)) { 1376 kref_put(&query->sm_ah->ref, free_sm_ah); 1377 return -ENOMEM; 1378 } 1379 1380 query->mad_buf->ah = query->sm_ah->ah; 1381 1382 return 0; 1383 } 1384 1385 static void free_mad(struct ib_sa_query *query) 1386 { 1387 ib_free_send_mad(query->mad_buf); 1388 kref_put(&query->sm_ah->ref, free_sm_ah); 1389 } 1390 1391 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) 1392 { 1393 struct ib_sa_mad *mad = query->mad_buf->mad; 1394 unsigned long flags; 1395 1396 memset(mad, 0, sizeof *mad); 1397 1398 if (query->flags & IB_SA_QUERY_OPA) { 1399 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; 1400 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; 1401 } else { 1402 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1403 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1404 } 1405 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1406 spin_lock_irqsave(&tid_lock, flags); 1407 mad->mad_hdr.tid = 1408 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1409 spin_unlock_irqrestore(&tid_lock, flags); 1410 } 1411 1412 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1413 { 1414 bool preload = gfpflags_allow_blocking(gfp_mask); 1415 unsigned long flags; 1416 int ret, id; 1417 1418 if (preload) 1419 idr_preload(gfp_mask); 1420 spin_lock_irqsave(&idr_lock, flags); 1421 1422 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1423 1424 spin_unlock_irqrestore(&idr_lock, flags); 1425 if (preload) 1426 idr_preload_end(); 1427 if (id < 0) 1428 return id; 1429 1430 query->mad_buf->timeout_ms = timeout_ms; 1431 query->mad_buf->context[0] = query; 1432 query->id = id; 1433 1434 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && 1435 (!(query->flags & IB_SA_QUERY_OPA))) { 1436 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { 1437 if (!ib_nl_make_request(query, gfp_mask)) 1438 return id; 1439 } 1440 ib_sa_disable_local_svc(query); 1441 } 1442 1443 ret = ib_post_send_mad(query->mad_buf, NULL); 1444 if (ret) { 1445 spin_lock_irqsave(&idr_lock, flags); 1446 idr_remove(&query_idr, id); 1447 spin_unlock_irqrestore(&idr_lock, flags); 1448 } 1449 1450 /* 1451 * It's not safe to dereference query any more, because the 1452 * send may already have completed and freed the query in 1453 * another context. 1454 */ 1455 return ret ? ret : id; 1456 } 1457 1458 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) 1459 { 1460 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1461 } 1462 EXPORT_SYMBOL(ib_sa_unpack_path); 1463 1464 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) 1465 { 1466 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1467 } 1468 EXPORT_SYMBOL(ib_sa_pack_path); 1469 1470 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1471 struct ib_device *device, 1472 u8 port_num) 1473 { 1474 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1475 struct ib_sa_port *port; 1476 unsigned long flags; 1477 bool ret = false; 1478 1479 if (!sa_dev) 1480 return ret; 1481 1482 port = &sa_dev->port[port_num - sa_dev->start_port]; 1483 spin_lock_irqsave(&port->classport_lock, flags); 1484 if (!port->classport_info.valid) 1485 goto ret; 1486 1487 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) 1488 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & 1489 OPA_CLASS_PORT_INFO_PR_SUPPORT; 1490 ret: 1491 spin_unlock_irqrestore(&port->classport_lock, flags); 1492 return ret; 1493 } 1494 1495 enum opa_pr_supported { 1496 PR_NOT_SUPPORTED, 1497 PR_OPA_SUPPORTED, 1498 PR_IB_SUPPORTED 1499 }; 1500 1501 /** 1502 * Check if current PR query can be an OPA query. 1503 * Retuns PR_NOT_SUPPORTED if a path record query is not 1504 * possible, PR_OPA_SUPPORTED if an OPA path record query 1505 * is possible and PR_IB_SUPPORTED if an IB path record 1506 * query is possible. 1507 */ 1508 static int opa_pr_query_possible(struct ib_sa_client *client, 1509 struct ib_device *device, 1510 u8 port_num, 1511 struct sa_path_rec *rec) 1512 { 1513 struct ib_port_attr port_attr; 1514 1515 if (ib_query_port(device, port_num, &port_attr)) 1516 return PR_NOT_SUPPORTED; 1517 1518 if (ib_sa_opa_pathrecord_support(client, device, port_num)) 1519 return PR_OPA_SUPPORTED; 1520 1521 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) 1522 return PR_NOT_SUPPORTED; 1523 else 1524 return PR_IB_SUPPORTED; 1525 } 1526 1527 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1528 int status, 1529 struct ib_sa_mad *mad) 1530 { 1531 struct ib_sa_path_query *query = 1532 container_of(sa_query, struct ib_sa_path_query, sa_query); 1533 1534 if (mad) { 1535 struct sa_path_rec rec; 1536 1537 if (sa_query->flags & IB_SA_QUERY_OPA) { 1538 ib_unpack(opa_path_rec_table, 1539 ARRAY_SIZE(opa_path_rec_table), 1540 mad->data, &rec); 1541 rec.rec_type = SA_PATH_REC_TYPE_OPA; 1542 query->callback(status, &rec, query->context); 1543 } else { 1544 ib_unpack(path_rec_table, 1545 ARRAY_SIZE(path_rec_table), 1546 mad->data, &rec); 1547 rec.rec_type = SA_PATH_REC_TYPE_IB; 1548 sa_path_set_dmac_zero(&rec); 1549 1550 if (query->conv_pr) { 1551 struct sa_path_rec opa; 1552 1553 memset(&opa, 0, sizeof(struct sa_path_rec)); 1554 sa_convert_path_ib_to_opa(&opa, &rec); 1555 query->callback(status, &opa, query->context); 1556 } else { 1557 query->callback(status, &rec, query->context); 1558 } 1559 } 1560 } else 1561 query->callback(status, NULL, query->context); 1562 } 1563 1564 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1565 { 1566 struct ib_sa_path_query *query = 1567 container_of(sa_query, struct ib_sa_path_query, sa_query); 1568 1569 kfree(query->conv_pr); 1570 kfree(query); 1571 } 1572 1573 /** 1574 * ib_sa_path_rec_get - Start a Path get query 1575 * @client:SA client 1576 * @device:device to send query on 1577 * @port_num: port number to send query on 1578 * @rec:Path Record to send in query 1579 * @comp_mask:component mask to send in query 1580 * @timeout_ms:time to wait for response 1581 * @gfp_mask:GFP mask to use for internal allocations 1582 * @callback:function called when query completes, times out or is 1583 * canceled 1584 * @context:opaque user context passed to callback 1585 * @sa_query:query context, used to cancel query 1586 * 1587 * Send a Path Record Get query to the SA to look up a path. The 1588 * callback function will be called when the query completes (or 1589 * fails); status is 0 for a successful response, -EINTR if the query 1590 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1591 * occurred sending the query. The resp parameter of the callback is 1592 * only valid if status is 0. 1593 * 1594 * If the return value of ib_sa_path_rec_get() is negative, it is an 1595 * error code. Otherwise it is a query ID that can be used to cancel 1596 * the query. 1597 */ 1598 int ib_sa_path_rec_get(struct ib_sa_client *client, 1599 struct ib_device *device, u8 port_num, 1600 struct sa_path_rec *rec, 1601 ib_sa_comp_mask comp_mask, 1602 int timeout_ms, gfp_t gfp_mask, 1603 void (*callback)(int status, 1604 struct sa_path_rec *resp, 1605 void *context), 1606 void *context, 1607 struct ib_sa_query **sa_query) 1608 { 1609 struct ib_sa_path_query *query; 1610 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1611 struct ib_sa_port *port; 1612 struct ib_mad_agent *agent; 1613 struct ib_sa_mad *mad; 1614 enum opa_pr_supported status; 1615 int ret; 1616 1617 if (!sa_dev) 1618 return -ENODEV; 1619 1620 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && 1621 (rec->rec_type != SA_PATH_REC_TYPE_OPA)) 1622 return -EINVAL; 1623 1624 port = &sa_dev->port[port_num - sa_dev->start_port]; 1625 agent = port->agent; 1626 1627 query = kzalloc(sizeof(*query), gfp_mask); 1628 if (!query) 1629 return -ENOMEM; 1630 1631 query->sa_query.port = port; 1632 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { 1633 status = opa_pr_query_possible(client, device, port_num, rec); 1634 if (status == PR_NOT_SUPPORTED) { 1635 ret = -EINVAL; 1636 goto err1; 1637 } else if (status == PR_OPA_SUPPORTED) { 1638 query->sa_query.flags |= IB_SA_QUERY_OPA; 1639 } else { 1640 query->conv_pr = 1641 kmalloc(sizeof(*query->conv_pr), gfp_mask); 1642 if (!query->conv_pr) { 1643 ret = -ENOMEM; 1644 goto err1; 1645 } 1646 } 1647 } 1648 1649 ret = alloc_mad(&query->sa_query, gfp_mask); 1650 if (ret) 1651 goto err2; 1652 1653 ib_sa_client_get(client); 1654 query->sa_query.client = client; 1655 query->callback = callback; 1656 query->context = context; 1657 1658 mad = query->sa_query.mad_buf->mad; 1659 init_mad(&query->sa_query, agent); 1660 1661 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1662 query->sa_query.release = ib_sa_path_rec_release; 1663 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1664 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1665 mad->sa_hdr.comp_mask = comp_mask; 1666 1667 if (query->sa_query.flags & IB_SA_QUERY_OPA) { 1668 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), 1669 rec, mad->data); 1670 } else if (query->conv_pr) { 1671 sa_convert_path_opa_to_ib(query->conv_pr, rec); 1672 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1673 query->conv_pr, mad->data); 1674 } else { 1675 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 1676 rec, mad->data); 1677 } 1678 1679 *sa_query = &query->sa_query; 1680 1681 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1682 query->sa_query.mad_buf->context[1] = (query->conv_pr) ? 1683 query->conv_pr : rec; 1684 1685 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1686 if (ret < 0) 1687 goto err3; 1688 1689 return ret; 1690 1691 err3: 1692 *sa_query = NULL; 1693 ib_sa_client_put(query->sa_query.client); 1694 free_mad(&query->sa_query); 1695 err2: 1696 kfree(query->conv_pr); 1697 err1: 1698 kfree(query); 1699 return ret; 1700 } 1701 EXPORT_SYMBOL(ib_sa_path_rec_get); 1702 1703 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1704 int status, 1705 struct ib_sa_mad *mad) 1706 { 1707 struct ib_sa_service_query *query = 1708 container_of(sa_query, struct ib_sa_service_query, sa_query); 1709 1710 if (mad) { 1711 struct ib_sa_service_rec rec; 1712 1713 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1714 mad->data, &rec); 1715 query->callback(status, &rec, query->context); 1716 } else 1717 query->callback(status, NULL, query->context); 1718 } 1719 1720 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1721 { 1722 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1723 } 1724 1725 /** 1726 * ib_sa_service_rec_query - Start Service Record operation 1727 * @client:SA client 1728 * @device:device to send request on 1729 * @port_num: port number to send request on 1730 * @method:SA method - should be get, set, or delete 1731 * @rec:Service Record to send in request 1732 * @comp_mask:component mask to send in request 1733 * @timeout_ms:time to wait for response 1734 * @gfp_mask:GFP mask to use for internal allocations 1735 * @callback:function called when request completes, times out or is 1736 * canceled 1737 * @context:opaque user context passed to callback 1738 * @sa_query:request context, used to cancel request 1739 * 1740 * Send a Service Record set/get/delete to the SA to register, 1741 * unregister or query a service record. 1742 * The callback function will be called when the request completes (or 1743 * fails); status is 0 for a successful response, -EINTR if the query 1744 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1745 * occurred sending the query. The resp parameter of the callback is 1746 * only valid if status is 0. 1747 * 1748 * If the return value of ib_sa_service_rec_query() is negative, it is an 1749 * error code. Otherwise it is a request ID that can be used to cancel 1750 * the query. 1751 */ 1752 int ib_sa_service_rec_query(struct ib_sa_client *client, 1753 struct ib_device *device, u8 port_num, u8 method, 1754 struct ib_sa_service_rec *rec, 1755 ib_sa_comp_mask comp_mask, 1756 int timeout_ms, gfp_t gfp_mask, 1757 void (*callback)(int status, 1758 struct ib_sa_service_rec *resp, 1759 void *context), 1760 void *context, 1761 struct ib_sa_query **sa_query) 1762 { 1763 struct ib_sa_service_query *query; 1764 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1765 struct ib_sa_port *port; 1766 struct ib_mad_agent *agent; 1767 struct ib_sa_mad *mad; 1768 int ret; 1769 1770 if (!sa_dev) 1771 return -ENODEV; 1772 1773 port = &sa_dev->port[port_num - sa_dev->start_port]; 1774 agent = port->agent; 1775 1776 if (method != IB_MGMT_METHOD_GET && 1777 method != IB_MGMT_METHOD_SET && 1778 method != IB_SA_METHOD_DELETE) 1779 return -EINVAL; 1780 1781 query = kzalloc(sizeof(*query), gfp_mask); 1782 if (!query) 1783 return -ENOMEM; 1784 1785 query->sa_query.port = port; 1786 ret = alloc_mad(&query->sa_query, gfp_mask); 1787 if (ret) 1788 goto err1; 1789 1790 ib_sa_client_get(client); 1791 query->sa_query.client = client; 1792 query->callback = callback; 1793 query->context = context; 1794 1795 mad = query->sa_query.mad_buf->mad; 1796 init_mad(&query->sa_query, agent); 1797 1798 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1799 query->sa_query.release = ib_sa_service_rec_release; 1800 mad->mad_hdr.method = method; 1801 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1802 mad->sa_hdr.comp_mask = comp_mask; 1803 1804 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1805 rec, mad->data); 1806 1807 *sa_query = &query->sa_query; 1808 1809 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1810 if (ret < 0) 1811 goto err2; 1812 1813 return ret; 1814 1815 err2: 1816 *sa_query = NULL; 1817 ib_sa_client_put(query->sa_query.client); 1818 free_mad(&query->sa_query); 1819 1820 err1: 1821 kfree(query); 1822 return ret; 1823 } 1824 EXPORT_SYMBOL(ib_sa_service_rec_query); 1825 1826 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1827 int status, 1828 struct ib_sa_mad *mad) 1829 { 1830 struct ib_sa_mcmember_query *query = 1831 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1832 1833 if (mad) { 1834 struct ib_sa_mcmember_rec rec; 1835 1836 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1837 mad->data, &rec); 1838 query->callback(status, &rec, query->context); 1839 } else 1840 query->callback(status, NULL, query->context); 1841 } 1842 1843 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1844 { 1845 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1846 } 1847 1848 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1849 struct ib_device *device, u8 port_num, 1850 u8 method, 1851 struct ib_sa_mcmember_rec *rec, 1852 ib_sa_comp_mask comp_mask, 1853 int timeout_ms, gfp_t gfp_mask, 1854 void (*callback)(int status, 1855 struct ib_sa_mcmember_rec *resp, 1856 void *context), 1857 void *context, 1858 struct ib_sa_query **sa_query) 1859 { 1860 struct ib_sa_mcmember_query *query; 1861 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1862 struct ib_sa_port *port; 1863 struct ib_mad_agent *agent; 1864 struct ib_sa_mad *mad; 1865 int ret; 1866 1867 if (!sa_dev) 1868 return -ENODEV; 1869 1870 port = &sa_dev->port[port_num - sa_dev->start_port]; 1871 agent = port->agent; 1872 1873 query = kzalloc(sizeof(*query), gfp_mask); 1874 if (!query) 1875 return -ENOMEM; 1876 1877 query->sa_query.port = port; 1878 ret = alloc_mad(&query->sa_query, gfp_mask); 1879 if (ret) 1880 goto err1; 1881 1882 ib_sa_client_get(client); 1883 query->sa_query.client = client; 1884 query->callback = callback; 1885 query->context = context; 1886 1887 mad = query->sa_query.mad_buf->mad; 1888 init_mad(&query->sa_query, agent); 1889 1890 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1891 query->sa_query.release = ib_sa_mcmember_rec_release; 1892 mad->mad_hdr.method = method; 1893 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1894 mad->sa_hdr.comp_mask = comp_mask; 1895 1896 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1897 rec, mad->data); 1898 1899 *sa_query = &query->sa_query; 1900 1901 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1902 if (ret < 0) 1903 goto err2; 1904 1905 return ret; 1906 1907 err2: 1908 *sa_query = NULL; 1909 ib_sa_client_put(query->sa_query.client); 1910 free_mad(&query->sa_query); 1911 1912 err1: 1913 kfree(query); 1914 return ret; 1915 } 1916 1917 /* Support GuidInfoRecord */ 1918 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1919 int status, 1920 struct ib_sa_mad *mad) 1921 { 1922 struct ib_sa_guidinfo_query *query = 1923 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1924 1925 if (mad) { 1926 struct ib_sa_guidinfo_rec rec; 1927 1928 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1929 mad->data, &rec); 1930 query->callback(status, &rec, query->context); 1931 } else 1932 query->callback(status, NULL, query->context); 1933 } 1934 1935 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1936 { 1937 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1938 } 1939 1940 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1941 struct ib_device *device, u8 port_num, 1942 struct ib_sa_guidinfo_rec *rec, 1943 ib_sa_comp_mask comp_mask, u8 method, 1944 int timeout_ms, gfp_t gfp_mask, 1945 void (*callback)(int status, 1946 struct ib_sa_guidinfo_rec *resp, 1947 void *context), 1948 void *context, 1949 struct ib_sa_query **sa_query) 1950 { 1951 struct ib_sa_guidinfo_query *query; 1952 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1953 struct ib_sa_port *port; 1954 struct ib_mad_agent *agent; 1955 struct ib_sa_mad *mad; 1956 int ret; 1957 1958 if (!sa_dev) 1959 return -ENODEV; 1960 1961 if (method != IB_MGMT_METHOD_GET && 1962 method != IB_MGMT_METHOD_SET && 1963 method != IB_SA_METHOD_DELETE) { 1964 return -EINVAL; 1965 } 1966 1967 port = &sa_dev->port[port_num - sa_dev->start_port]; 1968 agent = port->agent; 1969 1970 query = kzalloc(sizeof(*query), gfp_mask); 1971 if (!query) 1972 return -ENOMEM; 1973 1974 query->sa_query.port = port; 1975 ret = alloc_mad(&query->sa_query, gfp_mask); 1976 if (ret) 1977 goto err1; 1978 1979 ib_sa_client_get(client); 1980 query->sa_query.client = client; 1981 query->callback = callback; 1982 query->context = context; 1983 1984 mad = query->sa_query.mad_buf->mad; 1985 init_mad(&query->sa_query, agent); 1986 1987 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1988 query->sa_query.release = ib_sa_guidinfo_rec_release; 1989 1990 mad->mad_hdr.method = method; 1991 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1992 mad->sa_hdr.comp_mask = comp_mask; 1993 1994 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1995 mad->data); 1996 1997 *sa_query = &query->sa_query; 1998 1999 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2000 if (ret < 0) 2001 goto err2; 2002 2003 return ret; 2004 2005 err2: 2006 *sa_query = NULL; 2007 ib_sa_client_put(query->sa_query.client); 2008 free_mad(&query->sa_query); 2009 2010 err1: 2011 kfree(query); 2012 return ret; 2013 } 2014 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 2015 2016 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, 2017 struct ib_device *device, 2018 u8 port_num) 2019 { 2020 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 2021 struct ib_sa_port *port; 2022 bool ret = false; 2023 unsigned long flags; 2024 2025 if (!sa_dev) 2026 return ret; 2027 2028 port = &sa_dev->port[port_num - sa_dev->start_port]; 2029 2030 spin_lock_irqsave(&port->classport_lock, flags); 2031 if ((port->classport_info.valid) && 2032 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB)) 2033 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib) 2034 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT; 2035 spin_unlock_irqrestore(&port->classport_lock, flags); 2036 return ret; 2037 } 2038 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support); 2039 2040 struct ib_classport_info_context { 2041 struct completion done; 2042 struct ib_sa_query *sa_query; 2043 }; 2044 2045 static void ib_classportinfo_cb(void *context) 2046 { 2047 struct ib_classport_info_context *cb_ctx = context; 2048 2049 complete(&cb_ctx->done); 2050 } 2051 2052 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 2053 int status, 2054 struct ib_sa_mad *mad) 2055 { 2056 unsigned long flags; 2057 struct ib_sa_classport_info_query *query = 2058 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 2059 struct ib_sa_classport_cache *info = &sa_query->port->classport_info; 2060 2061 if (mad) { 2062 if (sa_query->flags & IB_SA_QUERY_OPA) { 2063 struct opa_class_port_info rec; 2064 2065 ib_unpack(opa_classport_info_rec_table, 2066 ARRAY_SIZE(opa_classport_info_rec_table), 2067 mad->data, &rec); 2068 2069 spin_lock_irqsave(&sa_query->port->classport_lock, 2070 flags); 2071 if (!status && !info->valid) { 2072 memcpy(&info->data.opa, &rec, 2073 sizeof(info->data.opa)); 2074 2075 info->valid = true; 2076 info->data.type = RDMA_CLASS_PORT_INFO_OPA; 2077 } 2078 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2079 flags); 2080 2081 } else { 2082 struct ib_class_port_info rec; 2083 2084 ib_unpack(ib_classport_info_rec_table, 2085 ARRAY_SIZE(ib_classport_info_rec_table), 2086 mad->data, &rec); 2087 2088 spin_lock_irqsave(&sa_query->port->classport_lock, 2089 flags); 2090 if (!status && !info->valid) { 2091 memcpy(&info->data.ib, &rec, 2092 sizeof(info->data.ib)); 2093 2094 info->valid = true; 2095 info->data.type = RDMA_CLASS_PORT_INFO_IB; 2096 } 2097 spin_unlock_irqrestore(&sa_query->port->classport_lock, 2098 flags); 2099 } 2100 } 2101 query->callback(query->context); 2102 } 2103 2104 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) 2105 { 2106 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 2107 sa_query)); 2108 } 2109 2110 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 2111 int timeout_ms, 2112 void (*callback)(void *context), 2113 void *context, 2114 struct ib_sa_query **sa_query) 2115 { 2116 struct ib_mad_agent *agent; 2117 struct ib_sa_classport_info_query *query; 2118 struct ib_sa_mad *mad; 2119 gfp_t gfp_mask = GFP_KERNEL; 2120 int ret; 2121 2122 agent = port->agent; 2123 2124 query = kzalloc(sizeof(*query), gfp_mask); 2125 if (!query) 2126 return -ENOMEM; 2127 2128 query->sa_query.port = port; 2129 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, 2130 port->port_num) ? 2131 IB_SA_QUERY_OPA : 0; 2132 ret = alloc_mad(&query->sa_query, gfp_mask); 2133 if (ret) 2134 goto err_free; 2135 2136 query->callback = callback; 2137 query->context = context; 2138 2139 mad = query->sa_query.mad_buf->mad; 2140 init_mad(&query->sa_query, agent); 2141 2142 query->sa_query.callback = ib_sa_classport_info_rec_callback; 2143 query->sa_query.release = ib_sa_classport_info_rec_release; 2144 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 2145 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 2146 mad->sa_hdr.comp_mask = 0; 2147 *sa_query = &query->sa_query; 2148 2149 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 2150 if (ret < 0) 2151 goto err_free_mad; 2152 2153 return ret; 2154 2155 err_free_mad: 2156 *sa_query = NULL; 2157 free_mad(&query->sa_query); 2158 2159 err_free: 2160 kfree(query); 2161 return ret; 2162 } 2163 2164 static void update_ib_cpi(struct work_struct *work) 2165 { 2166 struct ib_sa_port *port = 2167 container_of(work, struct ib_sa_port, ib_cpi_work.work); 2168 struct ib_classport_info_context *cb_context; 2169 unsigned long flags; 2170 int ret; 2171 2172 /* If the classport info is valid, nothing 2173 * to do here. 2174 */ 2175 spin_lock_irqsave(&port->classport_lock, flags); 2176 if (port->classport_info.valid) { 2177 spin_unlock_irqrestore(&port->classport_lock, flags); 2178 return; 2179 } 2180 spin_unlock_irqrestore(&port->classport_lock, flags); 2181 2182 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); 2183 if (!cb_context) 2184 goto err_nomem; 2185 2186 init_completion(&cb_context->done); 2187 2188 ret = ib_sa_classport_info_rec_query(port, 3000, 2189 ib_classportinfo_cb, cb_context, 2190 &cb_context->sa_query); 2191 if (ret < 0) 2192 goto free_cb_err; 2193 wait_for_completion(&cb_context->done); 2194 free_cb_err: 2195 kfree(cb_context); 2196 spin_lock_irqsave(&port->classport_lock, flags); 2197 2198 /* If the classport info is still not valid, the query should have 2199 * failed for some reason. Retry issuing the query 2200 */ 2201 if (!port->classport_info.valid) { 2202 port->classport_info.retry_cnt++; 2203 if (port->classport_info.retry_cnt <= 2204 IB_SA_CPI_MAX_RETRY_CNT) { 2205 unsigned long delay = 2206 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2207 2208 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); 2209 } 2210 } 2211 spin_unlock_irqrestore(&port->classport_lock, flags); 2212 2213 err_nomem: 2214 return; 2215 } 2216 2217 static void send_handler(struct ib_mad_agent *agent, 2218 struct ib_mad_send_wc *mad_send_wc) 2219 { 2220 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 2221 unsigned long flags; 2222 2223 if (query->callback) 2224 switch (mad_send_wc->status) { 2225 case IB_WC_SUCCESS: 2226 /* No callback -- already got recv */ 2227 break; 2228 case IB_WC_RESP_TIMEOUT_ERR: 2229 query->callback(query, -ETIMEDOUT, NULL); 2230 break; 2231 case IB_WC_WR_FLUSH_ERR: 2232 query->callback(query, -EINTR, NULL); 2233 break; 2234 default: 2235 query->callback(query, -EIO, NULL); 2236 break; 2237 } 2238 2239 spin_lock_irqsave(&idr_lock, flags); 2240 idr_remove(&query_idr, query->id); 2241 spin_unlock_irqrestore(&idr_lock, flags); 2242 2243 free_mad(query); 2244 if (query->client) 2245 ib_sa_client_put(query->client); 2246 query->release(query); 2247 } 2248 2249 static void recv_handler(struct ib_mad_agent *mad_agent, 2250 struct ib_mad_send_buf *send_buf, 2251 struct ib_mad_recv_wc *mad_recv_wc) 2252 { 2253 struct ib_sa_query *query; 2254 2255 if (!send_buf) 2256 return; 2257 2258 query = send_buf->context[0]; 2259 if (query->callback) { 2260 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2261 query->callback(query, 2262 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2263 -EINVAL : 0, 2264 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2265 else 2266 query->callback(query, -EIO, NULL); 2267 } 2268 2269 ib_free_recv_mad(mad_recv_wc); 2270 } 2271 2272 static void update_sm_ah(struct work_struct *work) 2273 { 2274 struct ib_sa_port *port = 2275 container_of(work, struct ib_sa_port, update_task); 2276 struct ib_sa_sm_ah *new_ah; 2277 struct ib_port_attr port_attr; 2278 struct rdma_ah_attr ah_attr; 2279 bool grh_required; 2280 2281 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 2282 pr_warn("Couldn't query port\n"); 2283 return; 2284 } 2285 2286 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); 2287 if (!new_ah) 2288 return; 2289 2290 kref_init(&new_ah->ref); 2291 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 2292 2293 new_ah->pkey_index = 0; 2294 if (ib_find_pkey(port->agent->device, port->port_num, 2295 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 2296 pr_err("Couldn't find index for default PKey\n"); 2297 2298 memset(&ah_attr, 0, sizeof(ah_attr)); 2299 ah_attr.type = rdma_ah_find_type(port->agent->device, 2300 port->port_num); 2301 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); 2302 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); 2303 rdma_ah_set_port_num(&ah_attr, port->port_num); 2304 2305 grh_required = rdma_is_grh_required(port->agent->device, 2306 port->port_num); 2307 2308 /* 2309 * The OPA sm_lid of 0xFFFF needs special handling so that it can be 2310 * differentiated from a permissive LID of 0xFFFF. We set the 2311 * grh_required flag here so the SA can program the DGID in the 2312 * address handle appropriately 2313 */ 2314 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA && 2315 (grh_required || 2316 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))) 2317 rdma_ah_set_make_grd(&ah_attr, true); 2318 2319 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) { 2320 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); 2321 rdma_ah_set_subnet_prefix(&ah_attr, 2322 cpu_to_be64(port_attr.subnet_prefix)); 2323 rdma_ah_set_interface_id(&ah_attr, 2324 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); 2325 } 2326 2327 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr); 2328 if (IS_ERR(new_ah->ah)) { 2329 pr_warn("Couldn't create new SM AH\n"); 2330 kfree(new_ah); 2331 return; 2332 } 2333 2334 spin_lock_irq(&port->ah_lock); 2335 if (port->sm_ah) 2336 kref_put(&port->sm_ah->ref, free_sm_ah); 2337 port->sm_ah = new_ah; 2338 spin_unlock_irq(&port->ah_lock); 2339 } 2340 2341 static void ib_sa_event(struct ib_event_handler *handler, 2342 struct ib_event *event) 2343 { 2344 if (event->event == IB_EVENT_PORT_ERR || 2345 event->event == IB_EVENT_PORT_ACTIVE || 2346 event->event == IB_EVENT_LID_CHANGE || 2347 event->event == IB_EVENT_PKEY_CHANGE || 2348 event->event == IB_EVENT_SM_CHANGE || 2349 event->event == IB_EVENT_CLIENT_REREGISTER) { 2350 unsigned long flags; 2351 struct ib_sa_device *sa_dev = 2352 container_of(handler, typeof(*sa_dev), event_handler); 2353 u8 port_num = event->element.port_num - sa_dev->start_port; 2354 struct ib_sa_port *port = &sa_dev->port[port_num]; 2355 2356 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 2357 return; 2358 2359 spin_lock_irqsave(&port->ah_lock, flags); 2360 if (port->sm_ah) 2361 kref_put(&port->sm_ah->ref, free_sm_ah); 2362 port->sm_ah = NULL; 2363 spin_unlock_irqrestore(&port->ah_lock, flags); 2364 2365 if (event->event == IB_EVENT_SM_CHANGE || 2366 event->event == IB_EVENT_CLIENT_REREGISTER || 2367 event->event == IB_EVENT_LID_CHANGE || 2368 event->event == IB_EVENT_PORT_ACTIVE) { 2369 unsigned long delay = 2370 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); 2371 2372 spin_lock_irqsave(&port->classport_lock, flags); 2373 port->classport_info.valid = false; 2374 port->classport_info.retry_cnt = 0; 2375 spin_unlock_irqrestore(&port->classport_lock, flags); 2376 queue_delayed_work(ib_wq, 2377 &port->ib_cpi_work, delay); 2378 } 2379 queue_work(ib_wq, &sa_dev->port[port_num].update_task); 2380 } 2381 } 2382 2383 static void ib_sa_add_one(struct ib_device *device) 2384 { 2385 struct ib_sa_device *sa_dev; 2386 int s, e, i; 2387 int count = 0; 2388 2389 s = rdma_start_port(device); 2390 e = rdma_end_port(device); 2391 2392 sa_dev = kzalloc(sizeof *sa_dev + 2393 (e - s + 1) * sizeof (struct ib_sa_port), 2394 GFP_KERNEL); 2395 if (!sa_dev) 2396 return; 2397 2398 sa_dev->start_port = s; 2399 sa_dev->end_port = e; 2400 2401 for (i = 0; i <= e - s; ++i) { 2402 spin_lock_init(&sa_dev->port[i].ah_lock); 2403 if (!rdma_cap_ib_sa(device, i + 1)) 2404 continue; 2405 2406 sa_dev->port[i].sm_ah = NULL; 2407 sa_dev->port[i].port_num = i + s; 2408 2409 spin_lock_init(&sa_dev->port[i].classport_lock); 2410 sa_dev->port[i].classport_info.valid = false; 2411 2412 sa_dev->port[i].agent = 2413 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2414 NULL, 0, send_handler, 2415 recv_handler, sa_dev, 0); 2416 if (IS_ERR(sa_dev->port[i].agent)) 2417 goto err; 2418 2419 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2420 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, 2421 update_ib_cpi); 2422 2423 count++; 2424 } 2425 2426 if (!count) 2427 goto free; 2428 2429 ib_set_client_data(device, &sa_client, sa_dev); 2430 2431 /* 2432 * We register our event handler after everything is set up, 2433 * and then update our cached info after the event handler is 2434 * registered to avoid any problems if a port changes state 2435 * during our initialization. 2436 */ 2437 2438 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 2439 ib_register_event_handler(&sa_dev->event_handler); 2440 2441 for (i = 0; i <= e - s; ++i) { 2442 if (rdma_cap_ib_sa(device, i + 1)) 2443 update_sm_ah(&sa_dev->port[i].update_task); 2444 } 2445 2446 return; 2447 2448 err: 2449 while (--i >= 0) { 2450 if (rdma_cap_ib_sa(device, i + 1)) 2451 ib_unregister_mad_agent(sa_dev->port[i].agent); 2452 } 2453 free: 2454 kfree(sa_dev); 2455 return; 2456 } 2457 2458 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2459 { 2460 struct ib_sa_device *sa_dev = client_data; 2461 int i; 2462 2463 if (!sa_dev) 2464 return; 2465 2466 ib_unregister_event_handler(&sa_dev->event_handler); 2467 flush_workqueue(ib_wq); 2468 2469 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 2470 if (rdma_cap_ib_sa(device, i + 1)) { 2471 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); 2472 ib_unregister_mad_agent(sa_dev->port[i].agent); 2473 if (sa_dev->port[i].sm_ah) 2474 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 2475 } 2476 2477 } 2478 2479 kfree(sa_dev); 2480 } 2481 2482 int ib_sa_init(void) 2483 { 2484 int ret; 2485 2486 get_random_bytes(&tid, sizeof tid); 2487 2488 atomic_set(&ib_nl_sa_request_seq, 0); 2489 2490 ret = ib_register_client(&sa_client); 2491 if (ret) { 2492 pr_err("Couldn't register ib_sa client\n"); 2493 goto err1; 2494 } 2495 2496 ret = mcast_init(); 2497 if (ret) { 2498 pr_err("Couldn't initialize multicast handling\n"); 2499 goto err2; 2500 } 2501 2502 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); 2503 if (!ib_nl_wq) { 2504 ret = -ENOMEM; 2505 goto err3; 2506 } 2507 2508 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 2509 2510 return 0; 2511 2512 err3: 2513 mcast_cleanup(); 2514 err2: 2515 ib_unregister_client(&sa_client); 2516 err1: 2517 return ret; 2518 } 2519 2520 void ib_sa_cleanup(void) 2521 { 2522 cancel_delayed_work(&ib_nl_timed_work); 2523 flush_workqueue(ib_nl_wq); 2524 destroy_workqueue(ib_nl_wq); 2525 mcast_cleanup(); 2526 ib_unregister_client(&sa_client); 2527 idr_destroy(&query_idr); 2528 } 2529