1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/err.h> 38 #include <linux/random.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 #include <linux/workqueue.h> 45 #include <uapi/linux/if_ether.h> 46 #include <rdma/ib_pack.h> 47 #include <rdma/ib_cache.h> 48 #include <rdma/rdma_netlink.h> 49 #include <net/netlink.h> 50 #include <uapi/rdma/ib_user_sa.h> 51 #include <rdma/ib_marshall.h> 52 #include <rdma/ib_addr.h> 53 #include "sa.h" 54 #include "core_priv.h" 55 56 MODULE_AUTHOR("Roland Dreier"); 57 MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 60 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 61 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 62 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 63 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 64 65 struct ib_sa_sm_ah { 66 struct ib_ah *ah; 67 struct kref ref; 68 u16 pkey_index; 69 u8 src_path_mask; 70 }; 71 72 struct ib_sa_port { 73 struct ib_mad_agent *agent; 74 struct ib_sa_sm_ah *sm_ah; 75 struct work_struct update_task; 76 spinlock_t ah_lock; 77 u8 port_num; 78 }; 79 80 struct ib_sa_device { 81 int start_port, end_port; 82 struct ib_event_handler event_handler; 83 struct ib_sa_port port[0]; 84 }; 85 86 struct ib_sa_query { 87 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 88 void (*release)(struct ib_sa_query *); 89 struct ib_sa_client *client; 90 struct ib_sa_port *port; 91 struct ib_mad_send_buf *mad_buf; 92 struct ib_sa_sm_ah *sm_ah; 93 int id; 94 u32 flags; 95 struct list_head list; /* Local svc request list */ 96 u32 seq; /* Local svc request sequence number */ 97 unsigned long timeout; /* Local svc timeout */ 98 u8 path_use; /* How will the pathrecord be used */ 99 }; 100 101 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 102 #define IB_SA_CANCEL 0x00000002 103 104 struct ib_sa_service_query { 105 void (*callback)(int, struct ib_sa_service_rec *, void *); 106 void *context; 107 struct ib_sa_query sa_query; 108 }; 109 110 struct ib_sa_path_query { 111 void (*callback)(int, struct ib_sa_path_rec *, void *); 112 void *context; 113 struct ib_sa_query sa_query; 114 }; 115 116 struct ib_sa_guidinfo_query { 117 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 118 void *context; 119 struct ib_sa_query sa_query; 120 }; 121 122 struct ib_sa_mcmember_query { 123 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 124 void *context; 125 struct ib_sa_query sa_query; 126 }; 127 128 static LIST_HEAD(ib_nl_request_list); 129 static DEFINE_SPINLOCK(ib_nl_request_lock); 130 static atomic_t ib_nl_sa_request_seq; 131 static struct workqueue_struct *ib_nl_wq; 132 static struct delayed_work ib_nl_timed_work; 133 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 134 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 135 .len = sizeof(struct ib_path_rec_data)}, 136 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 137 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 138 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 139 .len = sizeof(struct rdma_nla_ls_gid)}, 140 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 141 .len = sizeof(struct rdma_nla_ls_gid)}, 142 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 143 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 144 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 145 }; 146 147 148 static void ib_sa_add_one(struct ib_device *device); 149 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 150 151 static struct ib_client sa_client = { 152 .name = "sa", 153 .add = ib_sa_add_one, 154 .remove = ib_sa_remove_one 155 }; 156 157 static DEFINE_SPINLOCK(idr_lock); 158 static DEFINE_IDR(query_idr); 159 160 static DEFINE_SPINLOCK(tid_lock); 161 static u32 tid; 162 163 #define PATH_REC_FIELD(field) \ 164 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 165 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ 166 .field_name = "sa_path_rec:" #field 167 168 static const struct ib_field path_rec_table[] = { 169 { PATH_REC_FIELD(service_id), 170 .offset_words = 0, 171 .offset_bits = 0, 172 .size_bits = 64 }, 173 { PATH_REC_FIELD(dgid), 174 .offset_words = 2, 175 .offset_bits = 0, 176 .size_bits = 128 }, 177 { PATH_REC_FIELD(sgid), 178 .offset_words = 6, 179 .offset_bits = 0, 180 .size_bits = 128 }, 181 { PATH_REC_FIELD(dlid), 182 .offset_words = 10, 183 .offset_bits = 0, 184 .size_bits = 16 }, 185 { PATH_REC_FIELD(slid), 186 .offset_words = 10, 187 .offset_bits = 16, 188 .size_bits = 16 }, 189 { PATH_REC_FIELD(raw_traffic), 190 .offset_words = 11, 191 .offset_bits = 0, 192 .size_bits = 1 }, 193 { RESERVED, 194 .offset_words = 11, 195 .offset_bits = 1, 196 .size_bits = 3 }, 197 { PATH_REC_FIELD(flow_label), 198 .offset_words = 11, 199 .offset_bits = 4, 200 .size_bits = 20 }, 201 { PATH_REC_FIELD(hop_limit), 202 .offset_words = 11, 203 .offset_bits = 24, 204 .size_bits = 8 }, 205 { PATH_REC_FIELD(traffic_class), 206 .offset_words = 12, 207 .offset_bits = 0, 208 .size_bits = 8 }, 209 { PATH_REC_FIELD(reversible), 210 .offset_words = 12, 211 .offset_bits = 8, 212 .size_bits = 1 }, 213 { PATH_REC_FIELD(numb_path), 214 .offset_words = 12, 215 .offset_bits = 9, 216 .size_bits = 7 }, 217 { PATH_REC_FIELD(pkey), 218 .offset_words = 12, 219 .offset_bits = 16, 220 .size_bits = 16 }, 221 { PATH_REC_FIELD(qos_class), 222 .offset_words = 13, 223 .offset_bits = 0, 224 .size_bits = 12 }, 225 { PATH_REC_FIELD(sl), 226 .offset_words = 13, 227 .offset_bits = 12, 228 .size_bits = 4 }, 229 { PATH_REC_FIELD(mtu_selector), 230 .offset_words = 13, 231 .offset_bits = 16, 232 .size_bits = 2 }, 233 { PATH_REC_FIELD(mtu), 234 .offset_words = 13, 235 .offset_bits = 18, 236 .size_bits = 6 }, 237 { PATH_REC_FIELD(rate_selector), 238 .offset_words = 13, 239 .offset_bits = 24, 240 .size_bits = 2 }, 241 { PATH_REC_FIELD(rate), 242 .offset_words = 13, 243 .offset_bits = 26, 244 .size_bits = 6 }, 245 { PATH_REC_FIELD(packet_life_time_selector), 246 .offset_words = 14, 247 .offset_bits = 0, 248 .size_bits = 2 }, 249 { PATH_REC_FIELD(packet_life_time), 250 .offset_words = 14, 251 .offset_bits = 2, 252 .size_bits = 6 }, 253 { PATH_REC_FIELD(preference), 254 .offset_words = 14, 255 .offset_bits = 8, 256 .size_bits = 8 }, 257 { RESERVED, 258 .offset_words = 14, 259 .offset_bits = 16, 260 .size_bits = 48 }, 261 }; 262 263 #define MCMEMBER_REC_FIELD(field) \ 264 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 265 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 266 .field_name = "sa_mcmember_rec:" #field 267 268 static const struct ib_field mcmember_rec_table[] = { 269 { MCMEMBER_REC_FIELD(mgid), 270 .offset_words = 0, 271 .offset_bits = 0, 272 .size_bits = 128 }, 273 { MCMEMBER_REC_FIELD(port_gid), 274 .offset_words = 4, 275 .offset_bits = 0, 276 .size_bits = 128 }, 277 { MCMEMBER_REC_FIELD(qkey), 278 .offset_words = 8, 279 .offset_bits = 0, 280 .size_bits = 32 }, 281 { MCMEMBER_REC_FIELD(mlid), 282 .offset_words = 9, 283 .offset_bits = 0, 284 .size_bits = 16 }, 285 { MCMEMBER_REC_FIELD(mtu_selector), 286 .offset_words = 9, 287 .offset_bits = 16, 288 .size_bits = 2 }, 289 { MCMEMBER_REC_FIELD(mtu), 290 .offset_words = 9, 291 .offset_bits = 18, 292 .size_bits = 6 }, 293 { MCMEMBER_REC_FIELD(traffic_class), 294 .offset_words = 9, 295 .offset_bits = 24, 296 .size_bits = 8 }, 297 { MCMEMBER_REC_FIELD(pkey), 298 .offset_words = 10, 299 .offset_bits = 0, 300 .size_bits = 16 }, 301 { MCMEMBER_REC_FIELD(rate_selector), 302 .offset_words = 10, 303 .offset_bits = 16, 304 .size_bits = 2 }, 305 { MCMEMBER_REC_FIELD(rate), 306 .offset_words = 10, 307 .offset_bits = 18, 308 .size_bits = 6 }, 309 { MCMEMBER_REC_FIELD(packet_life_time_selector), 310 .offset_words = 10, 311 .offset_bits = 24, 312 .size_bits = 2 }, 313 { MCMEMBER_REC_FIELD(packet_life_time), 314 .offset_words = 10, 315 .offset_bits = 26, 316 .size_bits = 6 }, 317 { MCMEMBER_REC_FIELD(sl), 318 .offset_words = 11, 319 .offset_bits = 0, 320 .size_bits = 4 }, 321 { MCMEMBER_REC_FIELD(flow_label), 322 .offset_words = 11, 323 .offset_bits = 4, 324 .size_bits = 20 }, 325 { MCMEMBER_REC_FIELD(hop_limit), 326 .offset_words = 11, 327 .offset_bits = 24, 328 .size_bits = 8 }, 329 { MCMEMBER_REC_FIELD(scope), 330 .offset_words = 12, 331 .offset_bits = 0, 332 .size_bits = 4 }, 333 { MCMEMBER_REC_FIELD(join_state), 334 .offset_words = 12, 335 .offset_bits = 4, 336 .size_bits = 4 }, 337 { MCMEMBER_REC_FIELD(proxy_join), 338 .offset_words = 12, 339 .offset_bits = 8, 340 .size_bits = 1 }, 341 { RESERVED, 342 .offset_words = 12, 343 .offset_bits = 9, 344 .size_bits = 23 }, 345 }; 346 347 #define SERVICE_REC_FIELD(field) \ 348 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 349 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 350 .field_name = "sa_service_rec:" #field 351 352 static const struct ib_field service_rec_table[] = { 353 { SERVICE_REC_FIELD(id), 354 .offset_words = 0, 355 .offset_bits = 0, 356 .size_bits = 64 }, 357 { SERVICE_REC_FIELD(gid), 358 .offset_words = 2, 359 .offset_bits = 0, 360 .size_bits = 128 }, 361 { SERVICE_REC_FIELD(pkey), 362 .offset_words = 6, 363 .offset_bits = 0, 364 .size_bits = 16 }, 365 { SERVICE_REC_FIELD(lease), 366 .offset_words = 7, 367 .offset_bits = 0, 368 .size_bits = 32 }, 369 { SERVICE_REC_FIELD(key), 370 .offset_words = 8, 371 .offset_bits = 0, 372 .size_bits = 128 }, 373 { SERVICE_REC_FIELD(name), 374 .offset_words = 12, 375 .offset_bits = 0, 376 .size_bits = 64*8 }, 377 { SERVICE_REC_FIELD(data8), 378 .offset_words = 28, 379 .offset_bits = 0, 380 .size_bits = 16*8 }, 381 { SERVICE_REC_FIELD(data16), 382 .offset_words = 32, 383 .offset_bits = 0, 384 .size_bits = 8*16 }, 385 { SERVICE_REC_FIELD(data32), 386 .offset_words = 36, 387 .offset_bits = 0, 388 .size_bits = 4*32 }, 389 { SERVICE_REC_FIELD(data64), 390 .offset_words = 40, 391 .offset_bits = 0, 392 .size_bits = 2*64 }, 393 }; 394 395 #define GUIDINFO_REC_FIELD(field) \ 396 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 397 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 398 .field_name = "sa_guidinfo_rec:" #field 399 400 static const struct ib_field guidinfo_rec_table[] = { 401 { GUIDINFO_REC_FIELD(lid), 402 .offset_words = 0, 403 .offset_bits = 0, 404 .size_bits = 16 }, 405 { GUIDINFO_REC_FIELD(block_num), 406 .offset_words = 0, 407 .offset_bits = 16, 408 .size_bits = 8 }, 409 { GUIDINFO_REC_FIELD(res1), 410 .offset_words = 0, 411 .offset_bits = 24, 412 .size_bits = 8 }, 413 { GUIDINFO_REC_FIELD(res2), 414 .offset_words = 1, 415 .offset_bits = 0, 416 .size_bits = 32 }, 417 { GUIDINFO_REC_FIELD(guid_info_list), 418 .offset_words = 2, 419 .offset_bits = 0, 420 .size_bits = 512 }, 421 }; 422 423 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 424 { 425 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 426 } 427 428 static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 429 { 430 return (query->flags & IB_SA_CANCEL); 431 } 432 433 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 434 struct ib_sa_query *query) 435 { 436 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1]; 437 struct ib_sa_mad *mad = query->mad_buf->mad; 438 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 439 u16 val16; 440 u64 val64; 441 struct rdma_ls_resolve_header *header; 442 443 query->mad_buf->context[1] = NULL; 444 445 /* Construct the family header first */ 446 header = (struct rdma_ls_resolve_header *) 447 skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 448 memcpy(header->device_name, query->port->agent->device->name, 449 LS_DEVICE_NAME_MAX); 450 header->port_num = query->port->port_num; 451 452 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 453 sa_rec->reversible != 0) 454 query->path_use = LS_RESOLVE_PATH_USE_GMP; 455 else 456 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 457 header->path_use = query->path_use; 458 459 /* Now build the attributes */ 460 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 461 val64 = be64_to_cpu(sa_rec->service_id); 462 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 463 sizeof(val64), &val64); 464 } 465 if (comp_mask & IB_SA_PATH_REC_DGID) 466 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 467 sizeof(sa_rec->dgid), &sa_rec->dgid); 468 if (comp_mask & IB_SA_PATH_REC_SGID) 469 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 470 sizeof(sa_rec->sgid), &sa_rec->sgid); 471 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 472 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 473 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 474 475 if (comp_mask & IB_SA_PATH_REC_PKEY) { 476 val16 = be16_to_cpu(sa_rec->pkey); 477 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 478 sizeof(val16), &val16); 479 } 480 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 481 val16 = be16_to_cpu(sa_rec->qos_class); 482 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 483 sizeof(val16), &val16); 484 } 485 } 486 487 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 488 { 489 int len = 0; 490 491 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 492 len += nla_total_size(sizeof(u64)); 493 if (comp_mask & IB_SA_PATH_REC_DGID) 494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 495 if (comp_mask & IB_SA_PATH_REC_SGID) 496 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 497 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 498 len += nla_total_size(sizeof(u8)); 499 if (comp_mask & IB_SA_PATH_REC_PKEY) 500 len += nla_total_size(sizeof(u16)); 501 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 502 len += nla_total_size(sizeof(u16)); 503 504 /* 505 * Make sure that at least some of the required comp_mask bits are 506 * set. 507 */ 508 if (WARN_ON(len == 0)) 509 return len; 510 511 /* Add the family header */ 512 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 513 514 return len; 515 } 516 517 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 518 { 519 struct sk_buff *skb = NULL; 520 struct nlmsghdr *nlh; 521 void *data; 522 int ret = 0; 523 struct ib_sa_mad *mad; 524 int len; 525 526 mad = query->mad_buf->mad; 527 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 528 if (len <= 0) 529 return -EMSGSIZE; 530 531 skb = nlmsg_new(len, gfp_mask); 532 if (!skb) 533 return -ENOMEM; 534 535 /* Put nlmsg header only for now */ 536 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 537 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 538 if (!data) { 539 kfree_skb(skb); 540 return -EMSGSIZE; 541 } 542 543 /* Add attributes */ 544 ib_nl_set_path_rec_attrs(skb, query); 545 546 /* Repair the nlmsg header length */ 547 nlmsg_end(skb, nlh); 548 549 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); 550 if (!ret) 551 ret = len; 552 else 553 ret = 0; 554 555 return ret; 556 } 557 558 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 559 { 560 unsigned long flags; 561 unsigned long delay; 562 int ret; 563 564 INIT_LIST_HEAD(&query->list); 565 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 566 567 /* Put the request on the list first.*/ 568 spin_lock_irqsave(&ib_nl_request_lock, flags); 569 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 570 query->timeout = delay + jiffies; 571 list_add_tail(&query->list, &ib_nl_request_list); 572 /* Start the timeout if this is the only request */ 573 if (ib_nl_request_list.next == &query->list) 574 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 575 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 576 577 ret = ib_nl_send_msg(query, gfp_mask); 578 if (ret <= 0) { 579 ret = -EIO; 580 /* Remove the request */ 581 spin_lock_irqsave(&ib_nl_request_lock, flags); 582 list_del(&query->list); 583 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 584 } else { 585 ret = 0; 586 } 587 588 return ret; 589 } 590 591 static int ib_nl_cancel_request(struct ib_sa_query *query) 592 { 593 unsigned long flags; 594 struct ib_sa_query *wait_query; 595 int found = 0; 596 597 spin_lock_irqsave(&ib_nl_request_lock, flags); 598 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 599 /* Let the timeout to take care of the callback */ 600 if (query == wait_query) { 601 query->flags |= IB_SA_CANCEL; 602 query->timeout = jiffies; 603 list_move(&query->list, &ib_nl_request_list); 604 found = 1; 605 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 606 break; 607 } 608 } 609 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 610 611 return found; 612 } 613 614 static void send_handler(struct ib_mad_agent *agent, 615 struct ib_mad_send_wc *mad_send_wc); 616 617 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 618 const struct nlmsghdr *nlh) 619 { 620 struct ib_mad_send_wc mad_send_wc; 621 struct ib_sa_mad *mad = NULL; 622 const struct nlattr *head, *curr; 623 struct ib_path_rec_data *rec; 624 int len, rem; 625 u32 mask = 0; 626 int status = -EIO; 627 628 if (query->callback) { 629 head = (const struct nlattr *) nlmsg_data(nlh); 630 len = nlmsg_len(nlh); 631 switch (query->path_use) { 632 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 633 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 634 break; 635 636 case LS_RESOLVE_PATH_USE_ALL: 637 case LS_RESOLVE_PATH_USE_GMP: 638 default: 639 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 640 IB_PATH_BIDIRECTIONAL; 641 break; 642 } 643 nla_for_each_attr(curr, head, len, rem) { 644 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 645 rec = nla_data(curr); 646 /* 647 * Get the first one. In the future, we may 648 * need to get up to 6 pathrecords. 649 */ 650 if ((rec->flags & mask) == mask) { 651 mad = query->mad_buf->mad; 652 mad->mad_hdr.method |= 653 IB_MGMT_METHOD_RESP; 654 memcpy(mad->data, rec->path_rec, 655 sizeof(rec->path_rec)); 656 status = 0; 657 break; 658 } 659 } 660 } 661 query->callback(query, status, mad); 662 } 663 664 mad_send_wc.send_buf = query->mad_buf; 665 mad_send_wc.status = IB_WC_SUCCESS; 666 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 667 } 668 669 static void ib_nl_request_timeout(struct work_struct *work) 670 { 671 unsigned long flags; 672 struct ib_sa_query *query; 673 unsigned long delay; 674 struct ib_mad_send_wc mad_send_wc; 675 int ret; 676 677 spin_lock_irqsave(&ib_nl_request_lock, flags); 678 while (!list_empty(&ib_nl_request_list)) { 679 query = list_entry(ib_nl_request_list.next, 680 struct ib_sa_query, list); 681 682 if (time_after(query->timeout, jiffies)) { 683 delay = query->timeout - jiffies; 684 if ((long)delay <= 0) 685 delay = 1; 686 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 687 break; 688 } 689 690 list_del(&query->list); 691 ib_sa_disable_local_svc(query); 692 /* Hold the lock to protect against query cancellation */ 693 if (ib_sa_query_cancelled(query)) 694 ret = -1; 695 else 696 ret = ib_post_send_mad(query->mad_buf, NULL); 697 if (ret) { 698 mad_send_wc.send_buf = query->mad_buf; 699 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 700 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 701 send_handler(query->port->agent, &mad_send_wc); 702 spin_lock_irqsave(&ib_nl_request_lock, flags); 703 } 704 } 705 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 706 } 707 708 static int ib_nl_handle_set_timeout(struct sk_buff *skb, 709 struct netlink_callback *cb) 710 { 711 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 712 int timeout, delta, abs_delta; 713 const struct nlattr *attr; 714 unsigned long flags; 715 struct ib_sa_query *query; 716 long delay = 0; 717 struct nlattr *tb[LS_NLA_TYPE_MAX]; 718 int ret; 719 720 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || 721 !(NETLINK_CB(skb).sk) || 722 !netlink_capable(skb, CAP_NET_ADMIN)) 723 return -EPERM; 724 725 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 726 nlmsg_len(nlh), ib_nl_policy); 727 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 728 if (ret || !attr) 729 goto settimeout_out; 730 731 timeout = *(int *) nla_data(attr); 732 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 733 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 734 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 735 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 736 737 delta = timeout - sa_local_svc_timeout_ms; 738 if (delta < 0) 739 abs_delta = -delta; 740 else 741 abs_delta = delta; 742 743 if (delta != 0) { 744 spin_lock_irqsave(&ib_nl_request_lock, flags); 745 sa_local_svc_timeout_ms = timeout; 746 list_for_each_entry(query, &ib_nl_request_list, list) { 747 if (delta < 0 && abs_delta > query->timeout) 748 query->timeout = 0; 749 else 750 query->timeout += delta; 751 752 /* Get the new delay from the first entry */ 753 if (!delay) { 754 delay = query->timeout - jiffies; 755 if (delay <= 0) 756 delay = 1; 757 } 758 } 759 if (delay) 760 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 761 (unsigned long)delay); 762 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 763 } 764 765 settimeout_out: 766 return skb->len; 767 } 768 769 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 770 { 771 struct nlattr *tb[LS_NLA_TYPE_MAX]; 772 int ret; 773 774 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 775 return 0; 776 777 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 778 nlmsg_len(nlh), ib_nl_policy); 779 if (ret) 780 return 0; 781 782 return 1; 783 } 784 785 static int ib_nl_handle_resolve_resp(struct sk_buff *skb, 786 struct netlink_callback *cb) 787 { 788 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 789 unsigned long flags; 790 struct ib_sa_query *query; 791 struct ib_mad_send_buf *send_buf; 792 struct ib_mad_send_wc mad_send_wc; 793 int found = 0; 794 int ret; 795 796 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 797 !(NETLINK_CB(skb).sk) || 798 !netlink_capable(skb, CAP_NET_ADMIN)) 799 return -EPERM; 800 801 spin_lock_irqsave(&ib_nl_request_lock, flags); 802 list_for_each_entry(query, &ib_nl_request_list, list) { 803 /* 804 * If the query is cancelled, let the timeout routine 805 * take care of it. 806 */ 807 if (nlh->nlmsg_seq == query->seq) { 808 found = !ib_sa_query_cancelled(query); 809 if (found) 810 list_del(&query->list); 811 break; 812 } 813 } 814 815 if (!found) { 816 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 817 goto resp_out; 818 } 819 820 send_buf = query->mad_buf; 821 822 if (!ib_nl_is_good_resolve_resp(nlh)) { 823 /* if the result is a failure, send out the packet via IB */ 824 ib_sa_disable_local_svc(query); 825 ret = ib_post_send_mad(query->mad_buf, NULL); 826 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 827 if (ret) { 828 mad_send_wc.send_buf = send_buf; 829 mad_send_wc.status = IB_WC_GENERAL_ERR; 830 send_handler(query->port->agent, &mad_send_wc); 831 } 832 } else { 833 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 834 ib_nl_process_good_resolve_rsp(query, nlh); 835 } 836 837 resp_out: 838 return skb->len; 839 } 840 841 static struct ibnl_client_cbs ib_sa_cb_table[] = { 842 [RDMA_NL_LS_OP_RESOLVE] = { 843 .dump = ib_nl_handle_resolve_resp, 844 .module = THIS_MODULE }, 845 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 846 .dump = ib_nl_handle_set_timeout, 847 .module = THIS_MODULE }, 848 }; 849 850 static void free_sm_ah(struct kref *kref) 851 { 852 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 853 854 ib_destroy_ah(sm_ah->ah); 855 kfree(sm_ah); 856 } 857 858 static void update_sm_ah(struct work_struct *work) 859 { 860 struct ib_sa_port *port = 861 container_of(work, struct ib_sa_port, update_task); 862 struct ib_sa_sm_ah *new_ah; 863 struct ib_port_attr port_attr; 864 struct ib_ah_attr ah_attr; 865 866 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 867 pr_warn("Couldn't query port\n"); 868 return; 869 } 870 871 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); 872 if (!new_ah) { 873 return; 874 } 875 876 kref_init(&new_ah->ref); 877 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 878 879 new_ah->pkey_index = 0; 880 if (ib_find_pkey(port->agent->device, port->port_num, 881 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 882 pr_err("Couldn't find index for default PKey\n"); 883 884 memset(&ah_attr, 0, sizeof ah_attr); 885 ah_attr.dlid = port_attr.sm_lid; 886 ah_attr.sl = port_attr.sm_sl; 887 ah_attr.port_num = port->port_num; 888 if (port_attr.grh_required) { 889 ah_attr.ah_flags = IB_AH_GRH; 890 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix); 891 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID); 892 } 893 894 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); 895 if (IS_ERR(new_ah->ah)) { 896 pr_warn("Couldn't create new SM AH\n"); 897 kfree(new_ah); 898 return; 899 } 900 901 spin_lock_irq(&port->ah_lock); 902 if (port->sm_ah) 903 kref_put(&port->sm_ah->ref, free_sm_ah); 904 port->sm_ah = new_ah; 905 spin_unlock_irq(&port->ah_lock); 906 907 } 908 909 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) 910 { 911 if (event->event == IB_EVENT_PORT_ERR || 912 event->event == IB_EVENT_PORT_ACTIVE || 913 event->event == IB_EVENT_LID_CHANGE || 914 event->event == IB_EVENT_PKEY_CHANGE || 915 event->event == IB_EVENT_SM_CHANGE || 916 event->event == IB_EVENT_CLIENT_REREGISTER) { 917 unsigned long flags; 918 struct ib_sa_device *sa_dev = 919 container_of(handler, typeof(*sa_dev), event_handler); 920 struct ib_sa_port *port = 921 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 922 923 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 924 return; 925 926 spin_lock_irqsave(&port->ah_lock, flags); 927 if (port->sm_ah) 928 kref_put(&port->sm_ah->ref, free_sm_ah); 929 port->sm_ah = NULL; 930 spin_unlock_irqrestore(&port->ah_lock, flags); 931 932 queue_work(ib_wq, &sa_dev->port[event->element.port_num - 933 sa_dev->start_port].update_task); 934 } 935 } 936 937 void ib_sa_register_client(struct ib_sa_client *client) 938 { 939 atomic_set(&client->users, 1); 940 init_completion(&client->comp); 941 } 942 EXPORT_SYMBOL(ib_sa_register_client); 943 944 void ib_sa_unregister_client(struct ib_sa_client *client) 945 { 946 ib_sa_client_put(client); 947 wait_for_completion(&client->comp); 948 } 949 EXPORT_SYMBOL(ib_sa_unregister_client); 950 951 /** 952 * ib_sa_cancel_query - try to cancel an SA query 953 * @id:ID of query to cancel 954 * @query:query pointer to cancel 955 * 956 * Try to cancel an SA query. If the id and query don't match up or 957 * the query has already completed, nothing is done. Otherwise the 958 * query is canceled and will complete with a status of -EINTR. 959 */ 960 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 961 { 962 unsigned long flags; 963 struct ib_mad_agent *agent; 964 struct ib_mad_send_buf *mad_buf; 965 966 spin_lock_irqsave(&idr_lock, flags); 967 if (idr_find(&query_idr, id) != query) { 968 spin_unlock_irqrestore(&idr_lock, flags); 969 return; 970 } 971 agent = query->port->agent; 972 mad_buf = query->mad_buf; 973 spin_unlock_irqrestore(&idr_lock, flags); 974 975 /* 976 * If the query is still on the netlink request list, schedule 977 * it to be cancelled by the timeout routine. Otherwise, it has been 978 * sent to the MAD layer and has to be cancelled from there. 979 */ 980 if (!ib_nl_cancel_request(query)) 981 ib_cancel_mad(agent, mad_buf); 982 } 983 EXPORT_SYMBOL(ib_sa_cancel_query); 984 985 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 986 { 987 struct ib_sa_device *sa_dev; 988 struct ib_sa_port *port; 989 unsigned long flags; 990 u8 src_path_mask; 991 992 sa_dev = ib_get_client_data(device, &sa_client); 993 if (!sa_dev) 994 return 0x7f; 995 996 port = &sa_dev->port[port_num - sa_dev->start_port]; 997 spin_lock_irqsave(&port->ah_lock, flags); 998 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 999 spin_unlock_irqrestore(&port->ah_lock, flags); 1000 1001 return src_path_mask; 1002 } 1003 1004 int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 1005 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 1006 { 1007 int ret; 1008 u16 gid_index; 1009 int use_roce; 1010 struct net_device *ndev = NULL; 1011 1012 memset(ah_attr, 0, sizeof *ah_attr); 1013 ah_attr->dlid = be16_to_cpu(rec->dlid); 1014 ah_attr->sl = rec->sl; 1015 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 1016 get_src_path_mask(device, port_num); 1017 ah_attr->port_num = port_num; 1018 ah_attr->static_rate = rec->rate; 1019 1020 use_roce = rdma_cap_eth_ah(device, port_num); 1021 1022 if (use_roce) { 1023 struct net_device *idev; 1024 struct net_device *resolved_dev; 1025 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex, 1026 .net = rec->net ? rec->net : 1027 &init_net}; 1028 union { 1029 struct sockaddr _sockaddr; 1030 struct sockaddr_in _sockaddr_in; 1031 struct sockaddr_in6 _sockaddr_in6; 1032 } sgid_addr, dgid_addr; 1033 1034 if (!device->get_netdev) 1035 return -EOPNOTSUPP; 1036 1037 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); 1038 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); 1039 1040 /* validate the route */ 1041 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, 1042 &dgid_addr._sockaddr, &dev_addr); 1043 if (ret) 1044 return ret; 1045 1046 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 1047 dev_addr.network == RDMA_NETWORK_IPV6) && 1048 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 1049 return -EINVAL; 1050 1051 idev = device->get_netdev(device, port_num); 1052 if (!idev) 1053 return -ENODEV; 1054 1055 resolved_dev = dev_get_by_index(dev_addr.net, 1056 dev_addr.bound_dev_if); 1057 if (resolved_dev->flags & IFF_LOOPBACK) { 1058 dev_put(resolved_dev); 1059 resolved_dev = idev; 1060 dev_hold(resolved_dev); 1061 } 1062 ndev = ib_get_ndev_from_path(rec); 1063 rcu_read_lock(); 1064 if ((ndev && ndev != resolved_dev) || 1065 (resolved_dev != idev && 1066 !rdma_is_upper_dev_rcu(idev, resolved_dev))) 1067 ret = -EHOSTUNREACH; 1068 rcu_read_unlock(); 1069 dev_put(idev); 1070 dev_put(resolved_dev); 1071 if (ret) { 1072 if (ndev) 1073 dev_put(ndev); 1074 return ret; 1075 } 1076 } 1077 1078 if (rec->hop_limit > 0 || use_roce) { 1079 ah_attr->ah_flags = IB_AH_GRH; 1080 ah_attr->grh.dgid = rec->dgid; 1081 1082 ret = ib_find_cached_gid_by_port(device, &rec->sgid, 1083 rec->gid_type, port_num, ndev, 1084 &gid_index); 1085 if (ret) { 1086 if (ndev) 1087 dev_put(ndev); 1088 return ret; 1089 } 1090 1091 ah_attr->grh.sgid_index = gid_index; 1092 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 1093 ah_attr->grh.hop_limit = rec->hop_limit; 1094 ah_attr->grh.traffic_class = rec->traffic_class; 1095 if (ndev) 1096 dev_put(ndev); 1097 } 1098 1099 if (use_roce) 1100 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); 1101 1102 return 0; 1103 } 1104 EXPORT_SYMBOL(ib_init_ah_from_path); 1105 1106 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1107 { 1108 unsigned long flags; 1109 1110 spin_lock_irqsave(&query->port->ah_lock, flags); 1111 if (!query->port->sm_ah) { 1112 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1113 return -EAGAIN; 1114 } 1115 kref_get(&query->port->sm_ah->ref); 1116 query->sm_ah = query->port->sm_ah; 1117 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1118 1119 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1120 query->sm_ah->pkey_index, 1121 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1122 gfp_mask, 1123 IB_MGMT_BASE_VERSION); 1124 if (IS_ERR(query->mad_buf)) { 1125 kref_put(&query->sm_ah->ref, free_sm_ah); 1126 return -ENOMEM; 1127 } 1128 1129 query->mad_buf->ah = query->sm_ah->ah; 1130 1131 return 0; 1132 } 1133 1134 static void free_mad(struct ib_sa_query *query) 1135 { 1136 ib_free_send_mad(query->mad_buf); 1137 kref_put(&query->sm_ah->ref, free_sm_ah); 1138 } 1139 1140 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) 1141 { 1142 unsigned long flags; 1143 1144 memset(mad, 0, sizeof *mad); 1145 1146 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1147 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1148 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1149 1150 spin_lock_irqsave(&tid_lock, flags); 1151 mad->mad_hdr.tid = 1152 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1153 spin_unlock_irqrestore(&tid_lock, flags); 1154 } 1155 1156 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1157 { 1158 bool preload = gfpflags_allow_blocking(gfp_mask); 1159 unsigned long flags; 1160 int ret, id; 1161 1162 if (preload) 1163 idr_preload(gfp_mask); 1164 spin_lock_irqsave(&idr_lock, flags); 1165 1166 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1167 1168 spin_unlock_irqrestore(&idr_lock, flags); 1169 if (preload) 1170 idr_preload_end(); 1171 if (id < 0) 1172 return id; 1173 1174 query->mad_buf->timeout_ms = timeout_ms; 1175 query->mad_buf->context[0] = query; 1176 query->id = id; 1177 1178 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1179 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1180 if (!ib_nl_make_request(query, gfp_mask)) 1181 return id; 1182 } 1183 ib_sa_disable_local_svc(query); 1184 } 1185 1186 ret = ib_post_send_mad(query->mad_buf, NULL); 1187 if (ret) { 1188 spin_lock_irqsave(&idr_lock, flags); 1189 idr_remove(&query_idr, id); 1190 spin_unlock_irqrestore(&idr_lock, flags); 1191 } 1192 1193 /* 1194 * It's not safe to dereference query any more, because the 1195 * send may already have completed and freed the query in 1196 * another context. 1197 */ 1198 return ret ? ret : id; 1199 } 1200 1201 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) 1202 { 1203 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1204 } 1205 EXPORT_SYMBOL(ib_sa_unpack_path); 1206 1207 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) 1208 { 1209 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1210 } 1211 EXPORT_SYMBOL(ib_sa_pack_path); 1212 1213 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1214 int status, 1215 struct ib_sa_mad *mad) 1216 { 1217 struct ib_sa_path_query *query = 1218 container_of(sa_query, struct ib_sa_path_query, sa_query); 1219 1220 if (mad) { 1221 struct ib_sa_path_rec rec; 1222 1223 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1224 mad->data, &rec); 1225 rec.net = NULL; 1226 rec.ifindex = 0; 1227 rec.gid_type = IB_GID_TYPE_IB; 1228 eth_zero_addr(rec.dmac); 1229 query->callback(status, &rec, query->context); 1230 } else 1231 query->callback(status, NULL, query->context); 1232 } 1233 1234 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1235 { 1236 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 1237 } 1238 1239 /** 1240 * ib_sa_path_rec_get - Start a Path get query 1241 * @client:SA client 1242 * @device:device to send query on 1243 * @port_num: port number to send query on 1244 * @rec:Path Record to send in query 1245 * @comp_mask:component mask to send in query 1246 * @timeout_ms:time to wait for response 1247 * @gfp_mask:GFP mask to use for internal allocations 1248 * @callback:function called when query completes, times out or is 1249 * canceled 1250 * @context:opaque user context passed to callback 1251 * @sa_query:query context, used to cancel query 1252 * 1253 * Send a Path Record Get query to the SA to look up a path. The 1254 * callback function will be called when the query completes (or 1255 * fails); status is 0 for a successful response, -EINTR if the query 1256 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1257 * occurred sending the query. The resp parameter of the callback is 1258 * only valid if status is 0. 1259 * 1260 * If the return value of ib_sa_path_rec_get() is negative, it is an 1261 * error code. Otherwise it is a query ID that can be used to cancel 1262 * the query. 1263 */ 1264 int ib_sa_path_rec_get(struct ib_sa_client *client, 1265 struct ib_device *device, u8 port_num, 1266 struct ib_sa_path_rec *rec, 1267 ib_sa_comp_mask comp_mask, 1268 int timeout_ms, gfp_t gfp_mask, 1269 void (*callback)(int status, 1270 struct ib_sa_path_rec *resp, 1271 void *context), 1272 void *context, 1273 struct ib_sa_query **sa_query) 1274 { 1275 struct ib_sa_path_query *query; 1276 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1277 struct ib_sa_port *port; 1278 struct ib_mad_agent *agent; 1279 struct ib_sa_mad *mad; 1280 int ret; 1281 1282 if (!sa_dev) 1283 return -ENODEV; 1284 1285 port = &sa_dev->port[port_num - sa_dev->start_port]; 1286 agent = port->agent; 1287 1288 query = kzalloc(sizeof(*query), gfp_mask); 1289 if (!query) 1290 return -ENOMEM; 1291 1292 query->sa_query.port = port; 1293 ret = alloc_mad(&query->sa_query, gfp_mask); 1294 if (ret) 1295 goto err1; 1296 1297 ib_sa_client_get(client); 1298 query->sa_query.client = client; 1299 query->callback = callback; 1300 query->context = context; 1301 1302 mad = query->sa_query.mad_buf->mad; 1303 init_mad(mad, agent); 1304 1305 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1306 query->sa_query.release = ib_sa_path_rec_release; 1307 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1308 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1309 mad->sa_hdr.comp_mask = comp_mask; 1310 1311 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 1312 1313 *sa_query = &query->sa_query; 1314 1315 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1316 query->sa_query.mad_buf->context[1] = rec; 1317 1318 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1319 if (ret < 0) 1320 goto err2; 1321 1322 return ret; 1323 1324 err2: 1325 *sa_query = NULL; 1326 ib_sa_client_put(query->sa_query.client); 1327 free_mad(&query->sa_query); 1328 1329 err1: 1330 kfree(query); 1331 return ret; 1332 } 1333 EXPORT_SYMBOL(ib_sa_path_rec_get); 1334 1335 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1336 int status, 1337 struct ib_sa_mad *mad) 1338 { 1339 struct ib_sa_service_query *query = 1340 container_of(sa_query, struct ib_sa_service_query, sa_query); 1341 1342 if (mad) { 1343 struct ib_sa_service_rec rec; 1344 1345 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1346 mad->data, &rec); 1347 query->callback(status, &rec, query->context); 1348 } else 1349 query->callback(status, NULL, query->context); 1350 } 1351 1352 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1353 { 1354 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1355 } 1356 1357 /** 1358 * ib_sa_service_rec_query - Start Service Record operation 1359 * @client:SA client 1360 * @device:device to send request on 1361 * @port_num: port number to send request on 1362 * @method:SA method - should be get, set, or delete 1363 * @rec:Service Record to send in request 1364 * @comp_mask:component mask to send in request 1365 * @timeout_ms:time to wait for response 1366 * @gfp_mask:GFP mask to use for internal allocations 1367 * @callback:function called when request completes, times out or is 1368 * canceled 1369 * @context:opaque user context passed to callback 1370 * @sa_query:request context, used to cancel request 1371 * 1372 * Send a Service Record set/get/delete to the SA to register, 1373 * unregister or query a service record. 1374 * The callback function will be called when the request completes (or 1375 * fails); status is 0 for a successful response, -EINTR if the query 1376 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1377 * occurred sending the query. The resp parameter of the callback is 1378 * only valid if status is 0. 1379 * 1380 * If the return value of ib_sa_service_rec_query() is negative, it is an 1381 * error code. Otherwise it is a request ID that can be used to cancel 1382 * the query. 1383 */ 1384 int ib_sa_service_rec_query(struct ib_sa_client *client, 1385 struct ib_device *device, u8 port_num, u8 method, 1386 struct ib_sa_service_rec *rec, 1387 ib_sa_comp_mask comp_mask, 1388 int timeout_ms, gfp_t gfp_mask, 1389 void (*callback)(int status, 1390 struct ib_sa_service_rec *resp, 1391 void *context), 1392 void *context, 1393 struct ib_sa_query **sa_query) 1394 { 1395 struct ib_sa_service_query *query; 1396 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1397 struct ib_sa_port *port; 1398 struct ib_mad_agent *agent; 1399 struct ib_sa_mad *mad; 1400 int ret; 1401 1402 if (!sa_dev) 1403 return -ENODEV; 1404 1405 port = &sa_dev->port[port_num - sa_dev->start_port]; 1406 agent = port->agent; 1407 1408 if (method != IB_MGMT_METHOD_GET && 1409 method != IB_MGMT_METHOD_SET && 1410 method != IB_SA_METHOD_DELETE) 1411 return -EINVAL; 1412 1413 query = kzalloc(sizeof(*query), gfp_mask); 1414 if (!query) 1415 return -ENOMEM; 1416 1417 query->sa_query.port = port; 1418 ret = alloc_mad(&query->sa_query, gfp_mask); 1419 if (ret) 1420 goto err1; 1421 1422 ib_sa_client_get(client); 1423 query->sa_query.client = client; 1424 query->callback = callback; 1425 query->context = context; 1426 1427 mad = query->sa_query.mad_buf->mad; 1428 init_mad(mad, agent); 1429 1430 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1431 query->sa_query.release = ib_sa_service_rec_release; 1432 mad->mad_hdr.method = method; 1433 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1434 mad->sa_hdr.comp_mask = comp_mask; 1435 1436 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1437 rec, mad->data); 1438 1439 *sa_query = &query->sa_query; 1440 1441 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1442 if (ret < 0) 1443 goto err2; 1444 1445 return ret; 1446 1447 err2: 1448 *sa_query = NULL; 1449 ib_sa_client_put(query->sa_query.client); 1450 free_mad(&query->sa_query); 1451 1452 err1: 1453 kfree(query); 1454 return ret; 1455 } 1456 EXPORT_SYMBOL(ib_sa_service_rec_query); 1457 1458 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1459 int status, 1460 struct ib_sa_mad *mad) 1461 { 1462 struct ib_sa_mcmember_query *query = 1463 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1464 1465 if (mad) { 1466 struct ib_sa_mcmember_rec rec; 1467 1468 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1469 mad->data, &rec); 1470 query->callback(status, &rec, query->context); 1471 } else 1472 query->callback(status, NULL, query->context); 1473 } 1474 1475 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1476 { 1477 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1478 } 1479 1480 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1481 struct ib_device *device, u8 port_num, 1482 u8 method, 1483 struct ib_sa_mcmember_rec *rec, 1484 ib_sa_comp_mask comp_mask, 1485 int timeout_ms, gfp_t gfp_mask, 1486 void (*callback)(int status, 1487 struct ib_sa_mcmember_rec *resp, 1488 void *context), 1489 void *context, 1490 struct ib_sa_query **sa_query) 1491 { 1492 struct ib_sa_mcmember_query *query; 1493 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1494 struct ib_sa_port *port; 1495 struct ib_mad_agent *agent; 1496 struct ib_sa_mad *mad; 1497 int ret; 1498 1499 if (!sa_dev) 1500 return -ENODEV; 1501 1502 port = &sa_dev->port[port_num - sa_dev->start_port]; 1503 agent = port->agent; 1504 1505 query = kzalloc(sizeof(*query), gfp_mask); 1506 if (!query) 1507 return -ENOMEM; 1508 1509 query->sa_query.port = port; 1510 ret = alloc_mad(&query->sa_query, gfp_mask); 1511 if (ret) 1512 goto err1; 1513 1514 ib_sa_client_get(client); 1515 query->sa_query.client = client; 1516 query->callback = callback; 1517 query->context = context; 1518 1519 mad = query->sa_query.mad_buf->mad; 1520 init_mad(mad, agent); 1521 1522 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1523 query->sa_query.release = ib_sa_mcmember_rec_release; 1524 mad->mad_hdr.method = method; 1525 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1526 mad->sa_hdr.comp_mask = comp_mask; 1527 1528 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1529 rec, mad->data); 1530 1531 *sa_query = &query->sa_query; 1532 1533 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1534 if (ret < 0) 1535 goto err2; 1536 1537 return ret; 1538 1539 err2: 1540 *sa_query = NULL; 1541 ib_sa_client_put(query->sa_query.client); 1542 free_mad(&query->sa_query); 1543 1544 err1: 1545 kfree(query); 1546 return ret; 1547 } 1548 1549 /* Support GuidInfoRecord */ 1550 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1551 int status, 1552 struct ib_sa_mad *mad) 1553 { 1554 struct ib_sa_guidinfo_query *query = 1555 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1556 1557 if (mad) { 1558 struct ib_sa_guidinfo_rec rec; 1559 1560 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1561 mad->data, &rec); 1562 query->callback(status, &rec, query->context); 1563 } else 1564 query->callback(status, NULL, query->context); 1565 } 1566 1567 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1568 { 1569 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1570 } 1571 1572 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1573 struct ib_device *device, u8 port_num, 1574 struct ib_sa_guidinfo_rec *rec, 1575 ib_sa_comp_mask comp_mask, u8 method, 1576 int timeout_ms, gfp_t gfp_mask, 1577 void (*callback)(int status, 1578 struct ib_sa_guidinfo_rec *resp, 1579 void *context), 1580 void *context, 1581 struct ib_sa_query **sa_query) 1582 { 1583 struct ib_sa_guidinfo_query *query; 1584 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1585 struct ib_sa_port *port; 1586 struct ib_mad_agent *agent; 1587 struct ib_sa_mad *mad; 1588 int ret; 1589 1590 if (!sa_dev) 1591 return -ENODEV; 1592 1593 if (method != IB_MGMT_METHOD_GET && 1594 method != IB_MGMT_METHOD_SET && 1595 method != IB_SA_METHOD_DELETE) { 1596 return -EINVAL; 1597 } 1598 1599 port = &sa_dev->port[port_num - sa_dev->start_port]; 1600 agent = port->agent; 1601 1602 query = kzalloc(sizeof(*query), gfp_mask); 1603 if (!query) 1604 return -ENOMEM; 1605 1606 query->sa_query.port = port; 1607 ret = alloc_mad(&query->sa_query, gfp_mask); 1608 if (ret) 1609 goto err1; 1610 1611 ib_sa_client_get(client); 1612 query->sa_query.client = client; 1613 query->callback = callback; 1614 query->context = context; 1615 1616 mad = query->sa_query.mad_buf->mad; 1617 init_mad(mad, agent); 1618 1619 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1620 query->sa_query.release = ib_sa_guidinfo_rec_release; 1621 1622 mad->mad_hdr.method = method; 1623 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1624 mad->sa_hdr.comp_mask = comp_mask; 1625 1626 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1627 mad->data); 1628 1629 *sa_query = &query->sa_query; 1630 1631 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1632 if (ret < 0) 1633 goto err2; 1634 1635 return ret; 1636 1637 err2: 1638 *sa_query = NULL; 1639 ib_sa_client_put(query->sa_query.client); 1640 free_mad(&query->sa_query); 1641 1642 err1: 1643 kfree(query); 1644 return ret; 1645 } 1646 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1647 1648 static void send_handler(struct ib_mad_agent *agent, 1649 struct ib_mad_send_wc *mad_send_wc) 1650 { 1651 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1652 unsigned long flags; 1653 1654 if (query->callback) 1655 switch (mad_send_wc->status) { 1656 case IB_WC_SUCCESS: 1657 /* No callback -- already got recv */ 1658 break; 1659 case IB_WC_RESP_TIMEOUT_ERR: 1660 query->callback(query, -ETIMEDOUT, NULL); 1661 break; 1662 case IB_WC_WR_FLUSH_ERR: 1663 query->callback(query, -EINTR, NULL); 1664 break; 1665 default: 1666 query->callback(query, -EIO, NULL); 1667 break; 1668 } 1669 1670 spin_lock_irqsave(&idr_lock, flags); 1671 idr_remove(&query_idr, query->id); 1672 spin_unlock_irqrestore(&idr_lock, flags); 1673 1674 free_mad(query); 1675 ib_sa_client_put(query->client); 1676 query->release(query); 1677 } 1678 1679 static void recv_handler(struct ib_mad_agent *mad_agent, 1680 struct ib_mad_send_buf *send_buf, 1681 struct ib_mad_recv_wc *mad_recv_wc) 1682 { 1683 struct ib_sa_query *query; 1684 1685 if (!send_buf) 1686 return; 1687 1688 query = send_buf->context[0]; 1689 if (query->callback) { 1690 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 1691 query->callback(query, 1692 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 1693 -EINVAL : 0, 1694 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 1695 else 1696 query->callback(query, -EIO, NULL); 1697 } 1698 1699 ib_free_recv_mad(mad_recv_wc); 1700 } 1701 1702 static void ib_sa_add_one(struct ib_device *device) 1703 { 1704 struct ib_sa_device *sa_dev; 1705 int s, e, i; 1706 int count = 0; 1707 1708 s = rdma_start_port(device); 1709 e = rdma_end_port(device); 1710 1711 sa_dev = kzalloc(sizeof *sa_dev + 1712 (e - s + 1) * sizeof (struct ib_sa_port), 1713 GFP_KERNEL); 1714 if (!sa_dev) 1715 return; 1716 1717 sa_dev->start_port = s; 1718 sa_dev->end_port = e; 1719 1720 for (i = 0; i <= e - s; ++i) { 1721 spin_lock_init(&sa_dev->port[i].ah_lock); 1722 if (!rdma_cap_ib_sa(device, i + 1)) 1723 continue; 1724 1725 sa_dev->port[i].sm_ah = NULL; 1726 sa_dev->port[i].port_num = i + s; 1727 1728 sa_dev->port[i].agent = 1729 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1730 NULL, 0, send_handler, 1731 recv_handler, sa_dev, 0); 1732 if (IS_ERR(sa_dev->port[i].agent)) 1733 goto err; 1734 1735 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 1736 1737 count++; 1738 } 1739 1740 if (!count) 1741 goto free; 1742 1743 ib_set_client_data(device, &sa_client, sa_dev); 1744 1745 /* 1746 * We register our event handler after everything is set up, 1747 * and then update our cached info after the event handler is 1748 * registered to avoid any problems if a port changes state 1749 * during our initialization. 1750 */ 1751 1752 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 1753 if (ib_register_event_handler(&sa_dev->event_handler)) 1754 goto err; 1755 1756 for (i = 0; i <= e - s; ++i) { 1757 if (rdma_cap_ib_sa(device, i + 1)) 1758 update_sm_ah(&sa_dev->port[i].update_task); 1759 } 1760 1761 return; 1762 1763 err: 1764 while (--i >= 0) { 1765 if (rdma_cap_ib_sa(device, i + 1)) 1766 ib_unregister_mad_agent(sa_dev->port[i].agent); 1767 } 1768 free: 1769 kfree(sa_dev); 1770 return; 1771 } 1772 1773 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 1774 { 1775 struct ib_sa_device *sa_dev = client_data; 1776 int i; 1777 1778 if (!sa_dev) 1779 return; 1780 1781 ib_unregister_event_handler(&sa_dev->event_handler); 1782 1783 flush_workqueue(ib_wq); 1784 1785 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1786 if (rdma_cap_ib_sa(device, i + 1)) { 1787 ib_unregister_mad_agent(sa_dev->port[i].agent); 1788 if (sa_dev->port[i].sm_ah) 1789 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1790 } 1791 1792 } 1793 1794 kfree(sa_dev); 1795 } 1796 1797 static int __init ib_sa_init(void) 1798 { 1799 int ret; 1800 1801 get_random_bytes(&tid, sizeof tid); 1802 1803 atomic_set(&ib_nl_sa_request_seq, 0); 1804 1805 ret = ib_register_client(&sa_client); 1806 if (ret) { 1807 pr_err("Couldn't register ib_sa client\n"); 1808 goto err1; 1809 } 1810 1811 ret = mcast_init(); 1812 if (ret) { 1813 pr_err("Couldn't initialize multicast handling\n"); 1814 goto err2; 1815 } 1816 1817 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq"); 1818 if (!ib_nl_wq) { 1819 ret = -ENOMEM; 1820 goto err3; 1821 } 1822 1823 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS, 1824 ib_sa_cb_table)) { 1825 pr_err("Failed to add netlink callback\n"); 1826 ret = -EINVAL; 1827 goto err4; 1828 } 1829 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 1830 1831 return 0; 1832 err4: 1833 destroy_workqueue(ib_nl_wq); 1834 err3: 1835 mcast_cleanup(); 1836 err2: 1837 ib_unregister_client(&sa_client); 1838 err1: 1839 return ret; 1840 } 1841 1842 static void __exit ib_sa_cleanup(void) 1843 { 1844 ibnl_remove_client(RDMA_NL_LS); 1845 cancel_delayed_work(&ib_nl_timed_work); 1846 flush_workqueue(ib_nl_wq); 1847 destroy_workqueue(ib_nl_wq); 1848 mcast_cleanup(); 1849 ib_unregister_client(&sa_client); 1850 idr_destroy(&query_idr); 1851 } 1852 1853 module_init(ib_sa_init); 1854 module_exit(ib_sa_cleanup); 1855