1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/mutex.h> 37 #include <linux/inetdevice.h> 38 #include <linux/slab.h> 39 #include <linux/workqueue.h> 40 #include <linux/module.h> 41 #include <net/arp.h> 42 #include <net/neighbour.h> 43 #include <net/route.h> 44 #include <net/netevent.h> 45 #include <net/ipv6_stubs.h> 46 #include <net/ip6_route.h> 47 #include <rdma/ib_addr.h> 48 #include <rdma/ib_cache.h> 49 #include <rdma/ib_sa.h> 50 #include <rdma/ib.h> 51 #include <rdma/rdma_netlink.h> 52 #include <net/netlink.h> 53 54 #include "core_priv.h" 55 56 struct addr_req { 57 struct list_head list; 58 struct sockaddr_storage src_addr; 59 struct sockaddr_storage dst_addr; 60 struct rdma_dev_addr *addr; 61 void *context; 62 void (*callback)(int status, struct sockaddr *src_addr, 63 struct rdma_dev_addr *addr, void *context); 64 unsigned long timeout; 65 struct delayed_work work; 66 bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */ 67 int status; 68 u32 seq; 69 }; 70 71 static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); 72 73 static DEFINE_SPINLOCK(lock); 74 static LIST_HEAD(req_list); 75 static struct workqueue_struct *addr_wq; 76 77 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { 78 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 79 .len = sizeof(struct rdma_nla_ls_gid)}, 80 }; 81 82 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) 83 { 84 struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; 85 int ret; 86 87 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 88 return false; 89 90 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 91 nlmsg_len(nlh), ib_nl_addr_policy, NULL); 92 if (ret) 93 return false; 94 95 return true; 96 } 97 98 static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) 99 { 100 const struct nlattr *head, *curr; 101 union ib_gid gid; 102 struct addr_req *req; 103 int len, rem; 104 int found = 0; 105 106 head = (const struct nlattr *)nlmsg_data(nlh); 107 len = nlmsg_len(nlh); 108 109 nla_for_each_attr(curr, head, len, rem) { 110 if (curr->nla_type == LS_NLA_TYPE_DGID) 111 memcpy(&gid, nla_data(curr), nla_len(curr)); 112 } 113 114 spin_lock_bh(&lock); 115 list_for_each_entry(req, &req_list, list) { 116 if (nlh->nlmsg_seq != req->seq) 117 continue; 118 /* We set the DGID part, the rest was set earlier */ 119 rdma_addr_set_dgid(req->addr, &gid); 120 req->status = 0; 121 found = 1; 122 break; 123 } 124 spin_unlock_bh(&lock); 125 126 if (!found) 127 pr_info("Couldn't find request waiting for DGID: %pI6\n", 128 &gid); 129 } 130 131 int ib_nl_handle_ip_res_resp(struct sk_buff *skb, 132 struct nlmsghdr *nlh, 133 struct netlink_ext_ack *extack) 134 { 135 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 136 !(NETLINK_CB(skb).sk)) 137 return -EPERM; 138 139 if (ib_nl_is_good_ip_resp(nlh)) 140 ib_nl_process_good_ip_rsep(nlh); 141 142 return 0; 143 } 144 145 static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, 146 const void *daddr, 147 u32 seq, u16 family) 148 { 149 struct sk_buff *skb = NULL; 150 struct nlmsghdr *nlh; 151 struct rdma_ls_ip_resolve_header *header; 152 void *data; 153 size_t size; 154 int attrtype; 155 int len; 156 157 if (family == AF_INET) { 158 size = sizeof(struct in_addr); 159 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; 160 } else { 161 size = sizeof(struct in6_addr); 162 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; 163 } 164 165 len = nla_total_size(sizeof(size)); 166 len += NLMSG_ALIGN(sizeof(*header)); 167 168 skb = nlmsg_new(len, GFP_KERNEL); 169 if (!skb) 170 return -ENOMEM; 171 172 data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS, 173 RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); 174 if (!data) { 175 nlmsg_free(skb); 176 return -ENODATA; 177 } 178 179 /* Construct the family header first */ 180 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 181 header->ifindex = dev_addr->bound_dev_if; 182 nla_put(skb, attrtype, size, daddr); 183 184 /* Repair the nlmsg header length */ 185 nlmsg_end(skb, nlh); 186 rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL); 187 188 /* Make the request retry, so when we get the response from userspace 189 * we will have something. 190 */ 191 return -ENODATA; 192 } 193 194 int rdma_addr_size(const struct sockaddr *addr) 195 { 196 switch (addr->sa_family) { 197 case AF_INET: 198 return sizeof(struct sockaddr_in); 199 case AF_INET6: 200 return sizeof(struct sockaddr_in6); 201 case AF_IB: 202 return sizeof(struct sockaddr_ib); 203 default: 204 return 0; 205 } 206 } 207 EXPORT_SYMBOL(rdma_addr_size); 208 209 int rdma_addr_size_in6(struct sockaddr_in6 *addr) 210 { 211 int ret = rdma_addr_size((struct sockaddr *) addr); 212 213 return ret <= sizeof(*addr) ? ret : 0; 214 } 215 EXPORT_SYMBOL(rdma_addr_size_in6); 216 217 int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) 218 { 219 int ret = rdma_addr_size((struct sockaddr *) addr); 220 221 return ret <= sizeof(*addr) ? ret : 0; 222 } 223 EXPORT_SYMBOL(rdma_addr_size_kss); 224 225 /** 226 * rdma_copy_src_l2_addr - Copy netdevice source addresses 227 * @dev_addr: Destination address pointer where to copy the addresses 228 * @dev: Netdevice whose source addresses to copy 229 * 230 * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice. 231 * This includes unicast address, broadcast address, device type and 232 * interface index. 233 */ 234 void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr, 235 const struct net_device *dev) 236 { 237 dev_addr->dev_type = dev->type; 238 memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); 239 memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); 240 dev_addr->bound_dev_if = dev->ifindex; 241 } 242 EXPORT_SYMBOL(rdma_copy_src_l2_addr); 243 244 static struct net_device * 245 rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in) 246 { 247 struct net_device *dev = NULL; 248 int ret = -EADDRNOTAVAIL; 249 250 switch (src_in->sa_family) { 251 case AF_INET: 252 dev = __ip_dev_find(net, 253 ((const struct sockaddr_in *)src_in)->sin_addr.s_addr, 254 false); 255 if (dev) 256 ret = 0; 257 break; 258 #if IS_ENABLED(CONFIG_IPV6) 259 case AF_INET6: 260 for_each_netdev_rcu(net, dev) { 261 if (ipv6_chk_addr(net, 262 &((const struct sockaddr_in6 *)src_in)->sin6_addr, 263 dev, 1)) { 264 ret = 0; 265 break; 266 } 267 } 268 break; 269 #endif 270 } 271 return ret ? ERR_PTR(ret) : dev; 272 } 273 274 int rdma_translate_ip(const struct sockaddr *addr, 275 struct rdma_dev_addr *dev_addr) 276 { 277 struct net_device *dev; 278 279 if (dev_addr->bound_dev_if) { 280 dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 281 if (!dev) 282 return -ENODEV; 283 rdma_copy_src_l2_addr(dev_addr, dev); 284 dev_put(dev); 285 return 0; 286 } 287 288 rcu_read_lock(); 289 dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr); 290 if (!IS_ERR(dev)) 291 rdma_copy_src_l2_addr(dev_addr, dev); 292 rcu_read_unlock(); 293 return PTR_ERR_OR_ZERO(dev); 294 } 295 EXPORT_SYMBOL(rdma_translate_ip); 296 297 static void set_timeout(struct addr_req *req, unsigned long time) 298 { 299 unsigned long delay; 300 301 delay = time - jiffies; 302 if ((long)delay < 0) 303 delay = 0; 304 305 mod_delayed_work(addr_wq, &req->work, delay); 306 } 307 308 static void queue_req(struct addr_req *req) 309 { 310 spin_lock_bh(&lock); 311 list_add_tail(&req->list, &req_list); 312 set_timeout(req, req->timeout); 313 spin_unlock_bh(&lock); 314 } 315 316 static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr, 317 const void *daddr, u32 seq, u16 family) 318 { 319 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) 320 return -EADDRNOTAVAIL; 321 322 return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); 323 } 324 325 static int dst_fetch_ha(const struct dst_entry *dst, 326 struct rdma_dev_addr *dev_addr, 327 const void *daddr) 328 { 329 struct neighbour *n; 330 int ret = 0; 331 332 n = dst_neigh_lookup(dst, daddr); 333 if (!n) 334 return -ENODATA; 335 336 if (!(n->nud_state & NUD_VALID)) { 337 neigh_event_send(n, NULL); 338 ret = -ENODATA; 339 } else { 340 neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev); 341 } 342 343 neigh_release(n); 344 345 return ret; 346 } 347 348 static bool has_gateway(const struct dst_entry *dst, sa_family_t family) 349 { 350 struct rtable *rt; 351 struct rt6_info *rt6; 352 353 if (family == AF_INET) { 354 rt = container_of(dst, struct rtable, dst); 355 return rt->rt_uses_gateway; 356 } 357 358 rt6 = container_of(dst, struct rt6_info, dst); 359 return rt6->rt6i_flags & RTF_GATEWAY; 360 } 361 362 static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, 363 const struct sockaddr *dst_in, u32 seq) 364 { 365 const struct sockaddr_in *dst_in4 = 366 (const struct sockaddr_in *)dst_in; 367 const struct sockaddr_in6 *dst_in6 = 368 (const struct sockaddr_in6 *)dst_in; 369 const void *daddr = (dst_in->sa_family == AF_INET) ? 370 (const void *)&dst_in4->sin_addr.s_addr : 371 (const void *)&dst_in6->sin6_addr; 372 sa_family_t family = dst_in->sa_family; 373 374 might_sleep(); 375 376 /* If we have a gateway in IB mode then it must be an IB network */ 377 if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB) 378 return ib_nl_fetch_ha(dev_addr, daddr, seq, family); 379 else 380 return dst_fetch_ha(dst, dev_addr, daddr); 381 } 382 383 static int addr4_resolve(struct sockaddr *src_sock, 384 const struct sockaddr *dst_sock, 385 struct rdma_dev_addr *addr, 386 struct rtable **prt) 387 { 388 struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock; 389 const struct sockaddr_in *dst_in = 390 (const struct sockaddr_in *)dst_sock; 391 392 __be32 src_ip = src_in->sin_addr.s_addr; 393 __be32 dst_ip = dst_in->sin_addr.s_addr; 394 struct rtable *rt; 395 struct flowi4 fl4; 396 int ret; 397 398 memset(&fl4, 0, sizeof(fl4)); 399 fl4.daddr = dst_ip; 400 fl4.saddr = src_ip; 401 fl4.flowi4_oif = addr->bound_dev_if; 402 rt = ip_route_output_key(addr->net, &fl4); 403 ret = PTR_ERR_OR_ZERO(rt); 404 if (ret) 405 return ret; 406 407 src_in->sin_addr.s_addr = fl4.saddr; 408 409 addr->hoplimit = ip4_dst_hoplimit(&rt->dst); 410 411 *prt = rt; 412 return 0; 413 } 414 415 #if IS_ENABLED(CONFIG_IPV6) 416 static int addr6_resolve(struct sockaddr *src_sock, 417 const struct sockaddr *dst_sock, 418 struct rdma_dev_addr *addr, 419 struct dst_entry **pdst) 420 { 421 struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock; 422 const struct sockaddr_in6 *dst_in = 423 (const struct sockaddr_in6 *)dst_sock; 424 struct flowi6 fl6; 425 struct dst_entry *dst; 426 427 memset(&fl6, 0, sizeof fl6); 428 fl6.daddr = dst_in->sin6_addr; 429 fl6.saddr = src_in->sin6_addr; 430 fl6.flowi6_oif = addr->bound_dev_if; 431 432 dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); 433 if (IS_ERR(dst)) 434 return PTR_ERR(dst); 435 436 if (ipv6_addr_any(&src_in->sin6_addr)) 437 src_in->sin6_addr = fl6.saddr; 438 439 addr->hoplimit = ip6_dst_hoplimit(dst); 440 441 *pdst = dst; 442 return 0; 443 } 444 #else 445 static int addr6_resolve(struct sockaddr *src_sock, 446 const struct sockaddr *dst_sock, 447 struct rdma_dev_addr *addr, 448 struct dst_entry **pdst) 449 { 450 return -EADDRNOTAVAIL; 451 } 452 #endif 453 454 static int addr_resolve_neigh(const struct dst_entry *dst, 455 const struct sockaddr *dst_in, 456 struct rdma_dev_addr *addr, 457 unsigned int ndev_flags, 458 u32 seq) 459 { 460 int ret = 0; 461 462 if (ndev_flags & IFF_LOOPBACK) { 463 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 464 } else { 465 if (!(ndev_flags & IFF_NOARP)) { 466 /* If the device doesn't do ARP internally */ 467 ret = fetch_ha(dst, addr, dst_in, seq); 468 } 469 } 470 return ret; 471 } 472 473 static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr, 474 const struct sockaddr *dst_in, 475 const struct dst_entry *dst, 476 const struct net_device *ndev) 477 { 478 int ret = 0; 479 480 if (dst->dev->flags & IFF_LOOPBACK) 481 ret = rdma_translate_ip(dst_in, dev_addr); 482 else 483 rdma_copy_src_l2_addr(dev_addr, dst->dev); 484 485 /* 486 * If there's a gateway and type of device not ARPHRD_INFINIBAND, 487 * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the 488 * network type accordingly. 489 */ 490 if (has_gateway(dst, dst_in->sa_family) && 491 ndev->type != ARPHRD_INFINIBAND) 492 dev_addr->network = dst_in->sa_family == AF_INET ? 493 RDMA_NETWORK_IPV4 : 494 RDMA_NETWORK_IPV6; 495 else 496 dev_addr->network = RDMA_NETWORK_IB; 497 498 return ret; 499 } 500 501 static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr, 502 unsigned int *ndev_flags, 503 const struct sockaddr *dst_in, 504 const struct dst_entry *dst) 505 { 506 struct net_device *ndev = READ_ONCE(dst->dev); 507 508 *ndev_flags = ndev->flags; 509 /* A physical device must be the RDMA device to use */ 510 if (ndev->flags & IFF_LOOPBACK) { 511 /* 512 * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or 513 * loopback IP address. So if route is resolved to loopback 514 * interface, translate that to a real ndev based on non 515 * loopback IP address. 516 */ 517 ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in); 518 if (IS_ERR(ndev)) 519 return -ENODEV; 520 } 521 522 return copy_src_l2_addr(dev_addr, dst_in, dst, ndev); 523 } 524 525 static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr) 526 { 527 struct net_device *ndev; 528 529 ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr); 530 if (IS_ERR(ndev)) 531 return PTR_ERR(ndev); 532 533 /* 534 * Since we are holding the rcu, reading net and ifindex 535 * are safe without any additional reference; because 536 * change_net_namespace() in net/core/dev.c does rcu sync 537 * after it changes the state to IFF_DOWN and before 538 * updating netdev fields {net, ifindex}. 539 */ 540 addr->net = dev_net(ndev); 541 addr->bound_dev_if = ndev->ifindex; 542 return 0; 543 } 544 545 static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr) 546 { 547 addr->net = &init_net; 548 addr->bound_dev_if = 0; 549 } 550 551 static int addr_resolve(struct sockaddr *src_in, 552 const struct sockaddr *dst_in, 553 struct rdma_dev_addr *addr, 554 bool resolve_neigh, 555 bool resolve_by_gid_attr, 556 u32 seq) 557 { 558 struct dst_entry *dst = NULL; 559 unsigned int ndev_flags = 0; 560 struct rtable *rt = NULL; 561 int ret; 562 563 if (!addr->net) { 564 pr_warn_ratelimited("%s: missing namespace\n", __func__); 565 return -EINVAL; 566 } 567 568 rcu_read_lock(); 569 if (resolve_by_gid_attr) { 570 if (!addr->sgid_attr) { 571 rcu_read_unlock(); 572 pr_warn_ratelimited("%s: missing gid_attr\n", __func__); 573 return -EINVAL; 574 } 575 /* 576 * If the request is for a specific gid attribute of the 577 * rdma_dev_addr, derive net from the netdevice of the 578 * GID attribute. 579 */ 580 ret = set_addr_netns_by_gid_rcu(addr); 581 if (ret) { 582 rcu_read_unlock(); 583 return ret; 584 } 585 } 586 if (src_in->sa_family == AF_INET) { 587 ret = addr4_resolve(src_in, dst_in, addr, &rt); 588 dst = &rt->dst; 589 } else { 590 ret = addr6_resolve(src_in, dst_in, addr, &dst); 591 } 592 if (ret) { 593 rcu_read_unlock(); 594 goto done; 595 } 596 ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst); 597 rcu_read_unlock(); 598 599 /* 600 * Resolve neighbor destination address if requested and 601 * only if src addr translation didn't fail. 602 */ 603 if (!ret && resolve_neigh) 604 ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq); 605 606 if (src_in->sa_family == AF_INET) 607 ip_rt_put(rt); 608 else 609 dst_release(dst); 610 done: 611 /* 612 * Clear the addr net to go back to its original state, only if it was 613 * derived from GID attribute in this context. 614 */ 615 if (resolve_by_gid_attr) 616 rdma_addr_set_net_defaults(addr); 617 return ret; 618 } 619 620 static void process_one_req(struct work_struct *_work) 621 { 622 struct addr_req *req; 623 struct sockaddr *src_in, *dst_in; 624 625 req = container_of(_work, struct addr_req, work.work); 626 627 if (req->status == -ENODATA) { 628 src_in = (struct sockaddr *)&req->src_addr; 629 dst_in = (struct sockaddr *)&req->dst_addr; 630 req->status = addr_resolve(src_in, dst_in, req->addr, 631 true, req->resolve_by_gid_attr, 632 req->seq); 633 if (req->status && time_after_eq(jiffies, req->timeout)) { 634 req->status = -ETIMEDOUT; 635 } else if (req->status == -ENODATA) { 636 /* requeue the work for retrying again */ 637 spin_lock_bh(&lock); 638 if (!list_empty(&req->list)) 639 set_timeout(req, req->timeout); 640 spin_unlock_bh(&lock); 641 return; 642 } 643 } 644 645 req->callback(req->status, (struct sockaddr *)&req->src_addr, 646 req->addr, req->context); 647 req->callback = NULL; 648 649 spin_lock_bh(&lock); 650 /* 651 * Although the work will normally have been canceled by the workqueue, 652 * it can still be requeued as long as it is on the req_list. 653 */ 654 cancel_delayed_work(&req->work); 655 if (!list_empty(&req->list)) { 656 list_del_init(&req->list); 657 kfree(req); 658 } 659 spin_unlock_bh(&lock); 660 } 661 662 int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, 663 struct rdma_dev_addr *addr, unsigned long timeout_ms, 664 void (*callback)(int status, struct sockaddr *src_addr, 665 struct rdma_dev_addr *addr, void *context), 666 bool resolve_by_gid_attr, void *context) 667 { 668 struct sockaddr *src_in, *dst_in; 669 struct addr_req *req; 670 int ret = 0; 671 672 req = kzalloc(sizeof *req, GFP_KERNEL); 673 if (!req) 674 return -ENOMEM; 675 676 src_in = (struct sockaddr *) &req->src_addr; 677 dst_in = (struct sockaddr *) &req->dst_addr; 678 679 if (src_addr) { 680 if (src_addr->sa_family != dst_addr->sa_family) { 681 ret = -EINVAL; 682 goto err; 683 } 684 685 memcpy(src_in, src_addr, rdma_addr_size(src_addr)); 686 } else { 687 src_in->sa_family = dst_addr->sa_family; 688 } 689 690 memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); 691 req->addr = addr; 692 req->callback = callback; 693 req->context = context; 694 req->resolve_by_gid_attr = resolve_by_gid_attr; 695 INIT_DELAYED_WORK(&req->work, process_one_req); 696 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 697 698 req->status = addr_resolve(src_in, dst_in, addr, true, 699 req->resolve_by_gid_attr, req->seq); 700 switch (req->status) { 701 case 0: 702 req->timeout = jiffies; 703 queue_req(req); 704 break; 705 case -ENODATA: 706 req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; 707 queue_req(req); 708 break; 709 default: 710 ret = req->status; 711 goto err; 712 } 713 return ret; 714 err: 715 kfree(req); 716 return ret; 717 } 718 EXPORT_SYMBOL(rdma_resolve_ip); 719 720 int roce_resolve_route_from_path(struct sa_path_rec *rec, 721 const struct ib_gid_attr *attr) 722 { 723 union { 724 struct sockaddr _sockaddr; 725 struct sockaddr_in _sockaddr_in; 726 struct sockaddr_in6 _sockaddr_in6; 727 } sgid, dgid; 728 struct rdma_dev_addr dev_addr = {}; 729 int ret; 730 731 might_sleep(); 732 733 if (rec->roce.route_resolved) 734 return 0; 735 736 rdma_gid2ip((struct sockaddr *)&sgid, &rec->sgid); 737 rdma_gid2ip((struct sockaddr *)&dgid, &rec->dgid); 738 739 if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family) 740 return -EINVAL; 741 742 if (!attr || !attr->ndev) 743 return -EINVAL; 744 745 dev_addr.net = &init_net; 746 dev_addr.sgid_attr = attr; 747 748 ret = addr_resolve((struct sockaddr *)&sgid, (struct sockaddr *)&dgid, 749 &dev_addr, false, true, 0); 750 if (ret) 751 return ret; 752 753 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 754 dev_addr.network == RDMA_NETWORK_IPV6) && 755 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) 756 return -EINVAL; 757 758 rec->roce.route_resolved = true; 759 return 0; 760 } 761 762 /** 763 * rdma_addr_cancel - Cancel resolve ip request 764 * @addr: Pointer to address structure given previously 765 * during rdma_resolve_ip(). 766 * rdma_addr_cancel() is synchronous function which cancels any pending 767 * request if there is any. 768 */ 769 void rdma_addr_cancel(struct rdma_dev_addr *addr) 770 { 771 struct addr_req *req, *temp_req; 772 struct addr_req *found = NULL; 773 774 spin_lock_bh(&lock); 775 list_for_each_entry_safe(req, temp_req, &req_list, list) { 776 if (req->addr == addr) { 777 /* 778 * Removing from the list means we take ownership of 779 * the req 780 */ 781 list_del_init(&req->list); 782 found = req; 783 break; 784 } 785 } 786 spin_unlock_bh(&lock); 787 788 if (!found) 789 return; 790 791 /* 792 * sync canceling the work after removing it from the req_list 793 * guarentees no work is running and none will be started. 794 */ 795 cancel_delayed_work_sync(&found->work); 796 kfree(found); 797 } 798 EXPORT_SYMBOL(rdma_addr_cancel); 799 800 struct resolve_cb_context { 801 struct completion comp; 802 int status; 803 }; 804 805 static void resolve_cb(int status, struct sockaddr *src_addr, 806 struct rdma_dev_addr *addr, void *context) 807 { 808 ((struct resolve_cb_context *)context)->status = status; 809 complete(&((struct resolve_cb_context *)context)->comp); 810 } 811 812 int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, 813 const union ib_gid *dgid, 814 u8 *dmac, const struct ib_gid_attr *sgid_attr, 815 int *hoplimit) 816 { 817 struct rdma_dev_addr dev_addr; 818 struct resolve_cb_context ctx; 819 union { 820 struct sockaddr_in _sockaddr_in; 821 struct sockaddr_in6 _sockaddr_in6; 822 } sgid_addr, dgid_addr; 823 int ret; 824 825 rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); 826 rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); 827 828 memset(&dev_addr, 0, sizeof(dev_addr)); 829 dev_addr.net = &init_net; 830 dev_addr.sgid_attr = sgid_attr; 831 832 init_completion(&ctx.comp); 833 ret = rdma_resolve_ip((struct sockaddr *)&sgid_addr, 834 (struct sockaddr *)&dgid_addr, &dev_addr, 1000, 835 resolve_cb, true, &ctx); 836 if (ret) 837 return ret; 838 839 wait_for_completion(&ctx.comp); 840 841 ret = ctx.status; 842 if (ret) 843 return ret; 844 845 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); 846 *hoplimit = dev_addr.hoplimit; 847 return 0; 848 } 849 850 static int netevent_callback(struct notifier_block *self, unsigned long event, 851 void *ctx) 852 { 853 struct addr_req *req; 854 855 if (event == NETEVENT_NEIGH_UPDATE) { 856 struct neighbour *neigh = ctx; 857 858 if (neigh->nud_state & NUD_VALID) { 859 spin_lock_bh(&lock); 860 list_for_each_entry(req, &req_list, list) 861 set_timeout(req, jiffies); 862 spin_unlock_bh(&lock); 863 } 864 } 865 return 0; 866 } 867 868 static struct notifier_block nb = { 869 .notifier_call = netevent_callback 870 }; 871 872 int addr_init(void) 873 { 874 addr_wq = alloc_ordered_workqueue("ib_addr", 0); 875 if (!addr_wq) 876 return -ENOMEM; 877 878 register_netevent_notifier(&nb); 879 880 return 0; 881 } 882 883 void addr_cleanup(void) 884 { 885 unregister_netevent_notifier(&nb); 886 destroy_workqueue(addr_wq); 887 WARN_ON(!list_empty(&req_list)); 888 } 889