1 /* 2 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. 5 * Copyright (c) 2005 Intel Corporation. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/mutex.h> 37 #include <linux/inetdevice.h> 38 #include <linux/slab.h> 39 #include <linux/workqueue.h> 40 #include <linux/module.h> 41 #include <net/arp.h> 42 #include <net/neighbour.h> 43 #include <net/route.h> 44 #include <net/netevent.h> 45 #include <net/addrconf.h> 46 #include <net/ip6_route.h> 47 #include <rdma/ib_addr.h> 48 #include <rdma/ib_sa.h> 49 #include <rdma/ib.h> 50 #include <rdma/rdma_netlink.h> 51 #include <net/netlink.h> 52 53 #include "core_priv.h" 54 55 struct addr_req { 56 struct list_head list; 57 struct sockaddr_storage src_addr; 58 struct sockaddr_storage dst_addr; 59 struct rdma_dev_addr *addr; 60 void *context; 61 void (*callback)(int status, struct sockaddr *src_addr, 62 struct rdma_dev_addr *addr, void *context); 63 unsigned long timeout; 64 struct delayed_work work; 65 bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */ 66 int status; 67 u32 seq; 68 }; 69 70 static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); 71 72 static DEFINE_SPINLOCK(lock); 73 static LIST_HEAD(req_list); 74 static struct workqueue_struct *addr_wq; 75 76 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { 77 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 78 .len = sizeof(struct rdma_nla_ls_gid)}, 79 }; 80 81 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) 82 { 83 struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; 84 int ret; 85 86 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 87 return false; 88 89 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 90 nlmsg_len(nlh), ib_nl_addr_policy, NULL); 91 if (ret) 92 return false; 93 94 return true; 95 } 96 97 static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) 98 { 99 const struct nlattr *head, *curr; 100 union ib_gid gid; 101 struct addr_req *req; 102 int len, rem; 103 int found = 0; 104 105 head = (const struct nlattr *)nlmsg_data(nlh); 106 len = nlmsg_len(nlh); 107 108 nla_for_each_attr(curr, head, len, rem) { 109 if (curr->nla_type == LS_NLA_TYPE_DGID) 110 memcpy(&gid, nla_data(curr), nla_len(curr)); 111 } 112 113 spin_lock_bh(&lock); 114 list_for_each_entry(req, &req_list, list) { 115 if (nlh->nlmsg_seq != req->seq) 116 continue; 117 /* We set the DGID part, the rest was set earlier */ 118 rdma_addr_set_dgid(req->addr, &gid); 119 req->status = 0; 120 found = 1; 121 break; 122 } 123 spin_unlock_bh(&lock); 124 125 if (!found) 126 pr_info("Couldn't find request waiting for DGID: %pI6\n", 127 &gid); 128 } 129 130 int ib_nl_handle_ip_res_resp(struct sk_buff *skb, 131 struct nlmsghdr *nlh, 132 struct netlink_ext_ack *extack) 133 { 134 if ((nlh->nlmsg_flags & NLM_F_REQUEST) || 135 !(NETLINK_CB(skb).sk)) 136 return -EPERM; 137 138 if (ib_nl_is_good_ip_resp(nlh)) 139 ib_nl_process_good_ip_rsep(nlh); 140 141 return skb->len; 142 } 143 144 static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr, 145 const void *daddr, 146 u32 seq, u16 family) 147 { 148 struct sk_buff *skb = NULL; 149 struct nlmsghdr *nlh; 150 struct rdma_ls_ip_resolve_header *header; 151 void *data; 152 size_t size; 153 int attrtype; 154 int len; 155 156 if (family == AF_INET) { 157 size = sizeof(struct in_addr); 158 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4; 159 } else { 160 size = sizeof(struct in6_addr); 161 attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6; 162 } 163 164 len = nla_total_size(sizeof(size)); 165 len += NLMSG_ALIGN(sizeof(*header)); 166 167 skb = nlmsg_new(len, GFP_KERNEL); 168 if (!skb) 169 return -ENOMEM; 170 171 data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS, 172 RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST); 173 if (!data) { 174 nlmsg_free(skb); 175 return -ENODATA; 176 } 177 178 /* Construct the family header first */ 179 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 180 header->ifindex = dev_addr->bound_dev_if; 181 nla_put(skb, attrtype, size, daddr); 182 183 /* Repair the nlmsg header length */ 184 nlmsg_end(skb, nlh); 185 rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL); 186 187 /* Make the request retry, so when we get the response from userspace 188 * we will have something. 189 */ 190 return -ENODATA; 191 } 192 193 int rdma_addr_size(const struct sockaddr *addr) 194 { 195 switch (addr->sa_family) { 196 case AF_INET: 197 return sizeof(struct sockaddr_in); 198 case AF_INET6: 199 return sizeof(struct sockaddr_in6); 200 case AF_IB: 201 return sizeof(struct sockaddr_ib); 202 default: 203 return 0; 204 } 205 } 206 EXPORT_SYMBOL(rdma_addr_size); 207 208 int rdma_addr_size_in6(struct sockaddr_in6 *addr) 209 { 210 int ret = rdma_addr_size((struct sockaddr *) addr); 211 212 return ret <= sizeof(*addr) ? ret : 0; 213 } 214 EXPORT_SYMBOL(rdma_addr_size_in6); 215 216 int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) 217 { 218 int ret = rdma_addr_size((struct sockaddr *) addr); 219 220 return ret <= sizeof(*addr) ? ret : 0; 221 } 222 EXPORT_SYMBOL(rdma_addr_size_kss); 223 224 /** 225 * rdma_copy_src_l2_addr - Copy netdevice source addresses 226 * @dev_addr: Destination address pointer where to copy the addresses 227 * @dev: Netdevice whose source addresses to copy 228 * 229 * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice. 230 * This includes unicast address, broadcast address, device type and 231 * interface index. 232 */ 233 void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr, 234 const struct net_device *dev) 235 { 236 dev_addr->dev_type = dev->type; 237 memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); 238 memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); 239 dev_addr->bound_dev_if = dev->ifindex; 240 } 241 EXPORT_SYMBOL(rdma_copy_src_l2_addr); 242 243 static struct net_device * 244 rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in) 245 { 246 struct net_device *dev = NULL; 247 int ret = -EADDRNOTAVAIL; 248 249 switch (src_in->sa_family) { 250 case AF_INET: 251 dev = __ip_dev_find(net, 252 ((const struct sockaddr_in *)src_in)->sin_addr.s_addr, 253 false); 254 if (dev) 255 ret = 0; 256 break; 257 #if IS_ENABLED(CONFIG_IPV6) 258 case AF_INET6: 259 for_each_netdev_rcu(net, dev) { 260 if (ipv6_chk_addr(net, 261 &((const struct sockaddr_in6 *)src_in)->sin6_addr, 262 dev, 1)) { 263 ret = 0; 264 break; 265 } 266 } 267 break; 268 #endif 269 } 270 return ret ? ERR_PTR(ret) : dev; 271 } 272 273 int rdma_translate_ip(const struct sockaddr *addr, 274 struct rdma_dev_addr *dev_addr) 275 { 276 struct net_device *dev; 277 278 if (dev_addr->bound_dev_if) { 279 dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 280 if (!dev) 281 return -ENODEV; 282 rdma_copy_src_l2_addr(dev_addr, dev); 283 dev_put(dev); 284 return 0; 285 } 286 287 rcu_read_lock(); 288 dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr); 289 if (!IS_ERR(dev)) 290 rdma_copy_src_l2_addr(dev_addr, dev); 291 rcu_read_unlock(); 292 return PTR_ERR_OR_ZERO(dev); 293 } 294 EXPORT_SYMBOL(rdma_translate_ip); 295 296 static void set_timeout(struct addr_req *req, unsigned long time) 297 { 298 unsigned long delay; 299 300 delay = time - jiffies; 301 if ((long)delay < 0) 302 delay = 0; 303 304 mod_delayed_work(addr_wq, &req->work, delay); 305 } 306 307 static void queue_req(struct addr_req *req) 308 { 309 spin_lock_bh(&lock); 310 list_add_tail(&req->list, &req_list); 311 set_timeout(req, req->timeout); 312 spin_unlock_bh(&lock); 313 } 314 315 static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr, 316 const void *daddr, u32 seq, u16 family) 317 { 318 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) 319 return -EADDRNOTAVAIL; 320 321 return ib_nl_ip_send_msg(dev_addr, daddr, seq, family); 322 } 323 324 static int dst_fetch_ha(const struct dst_entry *dst, 325 struct rdma_dev_addr *dev_addr, 326 const void *daddr) 327 { 328 struct neighbour *n; 329 int ret = 0; 330 331 n = dst_neigh_lookup(dst, daddr); 332 if (!n) 333 return -ENODATA; 334 335 if (!(n->nud_state & NUD_VALID)) { 336 neigh_event_send(n, NULL); 337 ret = -ENODATA; 338 } else { 339 memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN); 340 } 341 342 neigh_release(n); 343 344 return ret; 345 } 346 347 static bool has_gateway(const struct dst_entry *dst, sa_family_t family) 348 { 349 struct rtable *rt; 350 struct rt6_info *rt6; 351 352 if (family == AF_INET) { 353 rt = container_of(dst, struct rtable, dst); 354 return rt->rt_uses_gateway; 355 } 356 357 rt6 = container_of(dst, struct rt6_info, dst); 358 return rt6->rt6i_flags & RTF_GATEWAY; 359 } 360 361 static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, 362 const struct sockaddr *dst_in, u32 seq) 363 { 364 const struct sockaddr_in *dst_in4 = 365 (const struct sockaddr_in *)dst_in; 366 const struct sockaddr_in6 *dst_in6 = 367 (const struct sockaddr_in6 *)dst_in; 368 const void *daddr = (dst_in->sa_family == AF_INET) ? 369 (const void *)&dst_in4->sin_addr.s_addr : 370 (const void *)&dst_in6->sin6_addr; 371 sa_family_t family = dst_in->sa_family; 372 373 /* If we have a gateway in IB mode then it must be an IB network */ 374 if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB) 375 return ib_nl_fetch_ha(dev_addr, daddr, seq, family); 376 else 377 return dst_fetch_ha(dst, dev_addr, daddr); 378 } 379 380 static int addr4_resolve(struct sockaddr *src_sock, 381 const struct sockaddr *dst_sock, 382 struct rdma_dev_addr *addr, 383 struct rtable **prt) 384 { 385 struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock; 386 const struct sockaddr_in *dst_in = 387 (const struct sockaddr_in *)dst_sock; 388 389 __be32 src_ip = src_in->sin_addr.s_addr; 390 __be32 dst_ip = dst_in->sin_addr.s_addr; 391 struct rtable *rt; 392 struct flowi4 fl4; 393 int ret; 394 395 memset(&fl4, 0, sizeof(fl4)); 396 fl4.daddr = dst_ip; 397 fl4.saddr = src_ip; 398 fl4.flowi4_oif = addr->bound_dev_if; 399 rt = ip_route_output_key(addr->net, &fl4); 400 ret = PTR_ERR_OR_ZERO(rt); 401 if (ret) 402 return ret; 403 404 src_in->sin_addr.s_addr = fl4.saddr; 405 406 addr->hoplimit = ip4_dst_hoplimit(&rt->dst); 407 408 *prt = rt; 409 return 0; 410 } 411 412 #if IS_ENABLED(CONFIG_IPV6) 413 static int addr6_resolve(struct sockaddr *src_sock, 414 const struct sockaddr *dst_sock, 415 struct rdma_dev_addr *addr, 416 struct dst_entry **pdst) 417 { 418 struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock; 419 const struct sockaddr_in6 *dst_in = 420 (const struct sockaddr_in6 *)dst_sock; 421 struct flowi6 fl6; 422 struct dst_entry *dst; 423 int ret; 424 425 memset(&fl6, 0, sizeof fl6); 426 fl6.daddr = dst_in->sin6_addr; 427 fl6.saddr = src_in->sin6_addr; 428 fl6.flowi6_oif = addr->bound_dev_if; 429 430 ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); 431 if (ret < 0) 432 return ret; 433 434 if (ipv6_addr_any(&src_in->sin6_addr)) 435 src_in->sin6_addr = fl6.saddr; 436 437 addr->hoplimit = ip6_dst_hoplimit(dst); 438 439 *pdst = dst; 440 return 0; 441 } 442 #else 443 static int addr6_resolve(struct sockaddr *src_sock, 444 const struct sockaddr *dst_sock, 445 struct rdma_dev_addr *addr, 446 struct dst_entry **pdst) 447 { 448 return -EADDRNOTAVAIL; 449 } 450 #endif 451 452 static int addr_resolve_neigh(const struct dst_entry *dst, 453 const struct sockaddr *dst_in, 454 struct rdma_dev_addr *addr, 455 unsigned int ndev_flags, 456 u32 seq) 457 { 458 int ret = 0; 459 460 if (ndev_flags & IFF_LOOPBACK) { 461 memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); 462 } else { 463 if (!(ndev_flags & IFF_NOARP)) { 464 /* If the device doesn't do ARP internally */ 465 ret = fetch_ha(dst, addr, dst_in, seq); 466 } 467 } 468 return ret; 469 } 470 471 static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr, 472 const struct sockaddr *dst_in, 473 const struct dst_entry *dst, 474 const struct net_device *ndev) 475 { 476 int ret = 0; 477 478 if (dst->dev->flags & IFF_LOOPBACK) 479 ret = rdma_translate_ip(dst_in, dev_addr); 480 else 481 rdma_copy_src_l2_addr(dev_addr, dst->dev); 482 483 /* 484 * If there's a gateway and type of device not ARPHRD_INFINIBAND, 485 * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the 486 * network type accordingly. 487 */ 488 if (has_gateway(dst, dst_in->sa_family) && 489 ndev->type != ARPHRD_INFINIBAND) 490 dev_addr->network = dst_in->sa_family == AF_INET ? 491 RDMA_NETWORK_IPV4 : 492 RDMA_NETWORK_IPV6; 493 else 494 dev_addr->network = RDMA_NETWORK_IB; 495 496 return ret; 497 } 498 499 static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr, 500 unsigned int *ndev_flags, 501 const struct sockaddr *dst_in, 502 const struct dst_entry *dst) 503 { 504 struct net_device *ndev = READ_ONCE(dst->dev); 505 506 *ndev_flags = ndev->flags; 507 /* A physical device must be the RDMA device to use */ 508 if (ndev->flags & IFF_LOOPBACK) { 509 /* 510 * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or 511 * loopback IP address. So if route is resolved to loopback 512 * interface, translate that to a real ndev based on non 513 * loopback IP address. 514 */ 515 ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in); 516 if (IS_ERR(ndev)) 517 return -ENODEV; 518 } 519 520 return copy_src_l2_addr(dev_addr, dst_in, dst, ndev); 521 } 522 523 static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr) 524 { 525 struct net_device *ndev; 526 527 ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr); 528 if (IS_ERR(ndev)) 529 return PTR_ERR(ndev); 530 531 /* 532 * Since we are holding the rcu, reading net and ifindex 533 * are safe without any additional reference; because 534 * change_net_namespace() in net/core/dev.c does rcu sync 535 * after it changes the state to IFF_DOWN and before 536 * updating netdev fields {net, ifindex}. 537 */ 538 addr->net = dev_net(ndev); 539 addr->bound_dev_if = ndev->ifindex; 540 return 0; 541 } 542 543 static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr) 544 { 545 addr->net = &init_net; 546 addr->bound_dev_if = 0; 547 } 548 549 static int addr_resolve(struct sockaddr *src_in, 550 const struct sockaddr *dst_in, 551 struct rdma_dev_addr *addr, 552 bool resolve_neigh, 553 bool resolve_by_gid_attr, 554 u32 seq) 555 { 556 struct dst_entry *dst = NULL; 557 unsigned int ndev_flags = 0; 558 struct rtable *rt = NULL; 559 int ret; 560 561 if (!addr->net) { 562 pr_warn_ratelimited("%s: missing namespace\n", __func__); 563 return -EINVAL; 564 } 565 566 rcu_read_lock(); 567 if (resolve_by_gid_attr) { 568 if (!addr->sgid_attr) { 569 rcu_read_unlock(); 570 pr_warn_ratelimited("%s: missing gid_attr\n", __func__); 571 return -EINVAL; 572 } 573 /* 574 * If the request is for a specific gid attribute of the 575 * rdma_dev_addr, derive net from the netdevice of the 576 * GID attribute. 577 */ 578 ret = set_addr_netns_by_gid_rcu(addr); 579 if (ret) { 580 rcu_read_unlock(); 581 return ret; 582 } 583 } 584 if (src_in->sa_family == AF_INET) { 585 ret = addr4_resolve(src_in, dst_in, addr, &rt); 586 dst = &rt->dst; 587 } else { 588 ret = addr6_resolve(src_in, dst_in, addr, &dst); 589 } 590 if (ret) { 591 rcu_read_unlock(); 592 goto done; 593 } 594 ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst); 595 rcu_read_unlock(); 596 597 /* 598 * Resolve neighbor destination address if requested and 599 * only if src addr translation didn't fail. 600 */ 601 if (!ret && resolve_neigh) 602 ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq); 603 604 if (src_in->sa_family == AF_INET) 605 ip_rt_put(rt); 606 else 607 dst_release(dst); 608 done: 609 /* 610 * Clear the addr net to go back to its original state, only if it was 611 * derived from GID attribute in this context. 612 */ 613 if (resolve_by_gid_attr) 614 rdma_addr_set_net_defaults(addr); 615 return ret; 616 } 617 618 static void process_one_req(struct work_struct *_work) 619 { 620 struct addr_req *req; 621 struct sockaddr *src_in, *dst_in; 622 623 req = container_of(_work, struct addr_req, work.work); 624 625 if (req->status == -ENODATA) { 626 src_in = (struct sockaddr *)&req->src_addr; 627 dst_in = (struct sockaddr *)&req->dst_addr; 628 req->status = addr_resolve(src_in, dst_in, req->addr, 629 true, req->resolve_by_gid_attr, 630 req->seq); 631 if (req->status && time_after_eq(jiffies, req->timeout)) { 632 req->status = -ETIMEDOUT; 633 } else if (req->status == -ENODATA) { 634 /* requeue the work for retrying again */ 635 spin_lock_bh(&lock); 636 if (!list_empty(&req->list)) 637 set_timeout(req, req->timeout); 638 spin_unlock_bh(&lock); 639 return; 640 } 641 } 642 643 req->callback(req->status, (struct sockaddr *)&req->src_addr, 644 req->addr, req->context); 645 req->callback = NULL; 646 647 spin_lock_bh(&lock); 648 if (!list_empty(&req->list)) { 649 /* 650 * Although the work will normally have been canceled by the 651 * workqueue, it can still be requeued as long as it is on the 652 * req_list. 653 */ 654 cancel_delayed_work(&req->work); 655 list_del_init(&req->list); 656 kfree(req); 657 } 658 spin_unlock_bh(&lock); 659 } 660 661 int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, 662 struct rdma_dev_addr *addr, unsigned long timeout_ms, 663 void (*callback)(int status, struct sockaddr *src_addr, 664 struct rdma_dev_addr *addr, void *context), 665 bool resolve_by_gid_attr, void *context) 666 { 667 struct sockaddr *src_in, *dst_in; 668 struct addr_req *req; 669 int ret = 0; 670 671 req = kzalloc(sizeof *req, GFP_KERNEL); 672 if (!req) 673 return -ENOMEM; 674 675 src_in = (struct sockaddr *) &req->src_addr; 676 dst_in = (struct sockaddr *) &req->dst_addr; 677 678 if (src_addr) { 679 if (src_addr->sa_family != dst_addr->sa_family) { 680 ret = -EINVAL; 681 goto err; 682 } 683 684 memcpy(src_in, src_addr, rdma_addr_size(src_addr)); 685 } else { 686 src_in->sa_family = dst_addr->sa_family; 687 } 688 689 memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); 690 req->addr = addr; 691 req->callback = callback; 692 req->context = context; 693 req->resolve_by_gid_attr = resolve_by_gid_attr; 694 INIT_DELAYED_WORK(&req->work, process_one_req); 695 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 696 697 req->status = addr_resolve(src_in, dst_in, addr, true, 698 req->resolve_by_gid_attr, req->seq); 699 switch (req->status) { 700 case 0: 701 req->timeout = jiffies; 702 queue_req(req); 703 break; 704 case -ENODATA: 705 req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; 706 queue_req(req); 707 break; 708 default: 709 ret = req->status; 710 goto err; 711 } 712 return ret; 713 err: 714 kfree(req); 715 return ret; 716 } 717 EXPORT_SYMBOL(rdma_resolve_ip); 718 719 int roce_resolve_route_from_path(struct sa_path_rec *rec, 720 const struct ib_gid_attr *attr) 721 { 722 union { 723 struct sockaddr _sockaddr; 724 struct sockaddr_in _sockaddr_in; 725 struct sockaddr_in6 _sockaddr_in6; 726 } sgid, dgid; 727 struct rdma_dev_addr dev_addr = {}; 728 int ret; 729 730 if (rec->roce.route_resolved) 731 return 0; 732 733 rdma_gid2ip(&sgid._sockaddr, &rec->sgid); 734 rdma_gid2ip(&dgid._sockaddr, &rec->dgid); 735 736 if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family) 737 return -EINVAL; 738 739 if (!attr || !attr->ndev) 740 return -EINVAL; 741 742 dev_addr.net = &init_net; 743 dev_addr.sgid_attr = attr; 744 745 ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr, 746 &dev_addr, false, true, 0); 747 if (ret) 748 return ret; 749 750 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 751 dev_addr.network == RDMA_NETWORK_IPV6) && 752 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) 753 return -EINVAL; 754 755 rec->roce.route_resolved = true; 756 return 0; 757 } 758 759 /** 760 * rdma_addr_cancel - Cancel resolve ip request 761 * @addr: Pointer to address structure given previously 762 * during rdma_resolve_ip(). 763 * rdma_addr_cancel() is synchronous function which cancels any pending 764 * request if there is any. 765 */ 766 void rdma_addr_cancel(struct rdma_dev_addr *addr) 767 { 768 struct addr_req *req, *temp_req; 769 struct addr_req *found = NULL; 770 771 spin_lock_bh(&lock); 772 list_for_each_entry_safe(req, temp_req, &req_list, list) { 773 if (req->addr == addr) { 774 /* 775 * Removing from the list means we take ownership of 776 * the req 777 */ 778 list_del_init(&req->list); 779 found = req; 780 break; 781 } 782 } 783 spin_unlock_bh(&lock); 784 785 if (!found) 786 return; 787 788 /* 789 * sync canceling the work after removing it from the req_list 790 * guarentees no work is running and none will be started. 791 */ 792 cancel_delayed_work_sync(&found->work); 793 kfree(found); 794 } 795 EXPORT_SYMBOL(rdma_addr_cancel); 796 797 struct resolve_cb_context { 798 struct completion comp; 799 int status; 800 }; 801 802 static void resolve_cb(int status, struct sockaddr *src_addr, 803 struct rdma_dev_addr *addr, void *context) 804 { 805 ((struct resolve_cb_context *)context)->status = status; 806 complete(&((struct resolve_cb_context *)context)->comp); 807 } 808 809 int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, 810 const union ib_gid *dgid, 811 u8 *dmac, const struct ib_gid_attr *sgid_attr, 812 int *hoplimit) 813 { 814 struct rdma_dev_addr dev_addr; 815 struct resolve_cb_context ctx; 816 union { 817 struct sockaddr _sockaddr; 818 struct sockaddr_in _sockaddr_in; 819 struct sockaddr_in6 _sockaddr_in6; 820 } sgid_addr, dgid_addr; 821 int ret; 822 823 rdma_gid2ip(&sgid_addr._sockaddr, sgid); 824 rdma_gid2ip(&dgid_addr._sockaddr, dgid); 825 826 memset(&dev_addr, 0, sizeof(dev_addr)); 827 dev_addr.net = &init_net; 828 dev_addr.sgid_attr = sgid_attr; 829 830 init_completion(&ctx.comp); 831 ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr, 832 &dev_addr, 1000, resolve_cb, true, &ctx); 833 if (ret) 834 return ret; 835 836 wait_for_completion(&ctx.comp); 837 838 ret = ctx.status; 839 if (ret) 840 return ret; 841 842 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); 843 *hoplimit = dev_addr.hoplimit; 844 return 0; 845 } 846 847 static int netevent_callback(struct notifier_block *self, unsigned long event, 848 void *ctx) 849 { 850 struct addr_req *req; 851 852 if (event == NETEVENT_NEIGH_UPDATE) { 853 struct neighbour *neigh = ctx; 854 855 if (neigh->nud_state & NUD_VALID) { 856 spin_lock_bh(&lock); 857 list_for_each_entry(req, &req_list, list) 858 set_timeout(req, jiffies); 859 spin_unlock_bh(&lock); 860 } 861 } 862 return 0; 863 } 864 865 static struct notifier_block nb = { 866 .notifier_call = netevent_callback 867 }; 868 869 int addr_init(void) 870 { 871 addr_wq = alloc_ordered_workqueue("ib_addr", 0); 872 if (!addr_wq) 873 return -ENOMEM; 874 875 register_netevent_notifier(&nb); 876 877 return 0; 878 } 879 880 void addr_cleanup(void) 881 { 882 unregister_netevent_notifier(&nb); 883 destroy_workqueue(addr_wq); 884 WARN_ON(!list_empty(&req_list)); 885 } 886