1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 #include <linux/if_arp.h> 9 #include <linux/netdevice.h> 10 #include <linux/if.h> 11 #include <linux/if_vlan.h> 12 #include <net/udp_tunnel.h> 13 #include <net/sch_generic.h> 14 #include <linux/netfilter.h> 15 #include <rdma/ib_addr.h> 16 17 #include "rxe.h" 18 #include "rxe_net.h" 19 #include "rxe_loc.h" 20 21 static struct rxe_recv_sockets recv_sockets; 22 23 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) 24 { 25 int err; 26 unsigned char ll_addr[ETH_ALEN]; 27 28 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 29 err = dev_mc_add(rxe->ndev, ll_addr); 30 31 return err; 32 } 33 34 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) 35 { 36 int err; 37 unsigned char ll_addr[ETH_ALEN]; 38 39 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); 40 err = dev_mc_del(rxe->ndev, ll_addr); 41 42 return err; 43 } 44 45 static struct dst_entry *rxe_find_route4(struct net_device *ndev, 46 struct in_addr *saddr, 47 struct in_addr *daddr) 48 { 49 struct rtable *rt; 50 struct flowi4 fl = { { 0 } }; 51 52 memset(&fl, 0, sizeof(fl)); 53 fl.flowi4_oif = ndev->ifindex; 54 memcpy(&fl.saddr, saddr, sizeof(*saddr)); 55 memcpy(&fl.daddr, daddr, sizeof(*daddr)); 56 fl.flowi4_proto = IPPROTO_UDP; 57 58 rt = ip_route_output_key(&init_net, &fl); 59 if (IS_ERR(rt)) { 60 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr); 61 return NULL; 62 } 63 64 return &rt->dst; 65 } 66 67 #if IS_ENABLED(CONFIG_IPV6) 68 static struct dst_entry *rxe_find_route6(struct net_device *ndev, 69 struct in6_addr *saddr, 70 struct in6_addr *daddr) 71 { 72 struct dst_entry *ndst; 73 struct flowi6 fl6 = { { 0 } }; 74 75 memset(&fl6, 0, sizeof(fl6)); 76 fl6.flowi6_oif = ndev->ifindex; 77 memcpy(&fl6.saddr, saddr, sizeof(*saddr)); 78 memcpy(&fl6.daddr, daddr, sizeof(*daddr)); 79 fl6.flowi6_proto = IPPROTO_UDP; 80 81 ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), 82 recv_sockets.sk6->sk, &fl6, 83 NULL); 84 if (IS_ERR(ndst)) { 85 pr_err_ratelimited("no route to %pI6\n", daddr); 86 return NULL; 87 } 88 89 if (unlikely(ndst->error)) { 90 pr_err("no route to %pI6\n", daddr); 91 goto put; 92 } 93 94 return ndst; 95 put: 96 dst_release(ndst); 97 return NULL; 98 } 99 100 #else 101 102 static struct dst_entry *rxe_find_route6(struct net_device *ndev, 103 struct in6_addr *saddr, 104 struct in6_addr *daddr) 105 { 106 return NULL; 107 } 108 109 #endif 110 111 static struct dst_entry *rxe_find_route(struct net_device *ndev, 112 struct rxe_qp *qp, 113 struct rxe_av *av) 114 { 115 struct dst_entry *dst = NULL; 116 117 if (qp_type(qp) == IB_QPT_RC) 118 dst = sk_dst_get(qp->sk->sk); 119 120 if (!dst || !dst_check(dst, qp->dst_cookie)) { 121 if (dst) 122 dst_release(dst); 123 124 if (av->network_type == RXE_NETWORK_TYPE_IPV4) { 125 struct in_addr *saddr; 126 struct in_addr *daddr; 127 128 saddr = &av->sgid_addr._sockaddr_in.sin_addr; 129 daddr = &av->dgid_addr._sockaddr_in.sin_addr; 130 dst = rxe_find_route4(ndev, saddr, daddr); 131 } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { 132 struct in6_addr *saddr6; 133 struct in6_addr *daddr6; 134 135 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; 136 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; 137 dst = rxe_find_route6(ndev, saddr6, daddr6); 138 #if IS_ENABLED(CONFIG_IPV6) 139 if (dst) 140 qp->dst_cookie = 141 rt6_get_cookie((struct rt6_info *)dst); 142 #endif 143 } 144 145 if (dst && (qp_type(qp) == IB_QPT_RC)) { 146 dst_hold(dst); 147 sk_dst_set(qp->sk->sk, dst); 148 } 149 } 150 return dst; 151 } 152 153 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 154 { 155 struct udphdr *udph; 156 struct rxe_dev *rxe; 157 struct net_device *ndev = skb->dev; 158 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 159 160 /* takes a reference on rxe->ib_dev 161 * drop when skb is freed 162 */ 163 rxe = rxe_get_dev_from_net(ndev); 164 if (!rxe && is_vlan_dev(ndev)) 165 rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); 166 if (!rxe) 167 goto drop; 168 169 if (skb_linearize(skb)) { 170 pr_err("skb_linearize failed\n"); 171 ib_device_put(&rxe->ib_dev); 172 goto drop; 173 } 174 175 udph = udp_hdr(skb); 176 pkt->rxe = rxe; 177 pkt->port_num = 1; 178 pkt->hdr = (u8 *)(udph + 1); 179 pkt->mask = RXE_GRH_MASK; 180 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); 181 182 rxe_rcv(skb); 183 184 return 0; 185 drop: 186 kfree_skb(skb); 187 188 return 0; 189 } 190 191 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, 192 bool ipv6) 193 { 194 int err; 195 struct socket *sock; 196 struct udp_port_cfg udp_cfg = { }; 197 struct udp_tunnel_sock_cfg tnl_cfg = { }; 198 199 if (ipv6) { 200 udp_cfg.family = AF_INET6; 201 udp_cfg.ipv6_v6only = 1; 202 } else { 203 udp_cfg.family = AF_INET; 204 } 205 206 udp_cfg.local_udp_port = port; 207 208 /* Create UDP socket */ 209 err = udp_sock_create(net, &udp_cfg, &sock); 210 if (err < 0) { 211 pr_err("failed to create udp socket. err = %d\n", err); 212 return ERR_PTR(err); 213 } 214 215 tnl_cfg.encap_type = 1; 216 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 217 218 /* Setup UDP tunnel */ 219 setup_udp_tunnel_sock(net, sock, &tnl_cfg); 220 221 return sock; 222 } 223 224 static void rxe_release_udp_tunnel(struct socket *sk) 225 { 226 if (sk) 227 udp_tunnel_sock_release(sk); 228 } 229 230 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, 231 __be16 dst_port) 232 { 233 struct udphdr *udph; 234 235 __skb_push(skb, sizeof(*udph)); 236 skb_reset_transport_header(skb); 237 udph = udp_hdr(skb); 238 239 udph->dest = dst_port; 240 udph->source = src_port; 241 udph->len = htons(skb->len); 242 udph->check = 0; 243 } 244 245 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, 246 __be32 saddr, __be32 daddr, __u8 proto, 247 __u8 tos, __u8 ttl, __be16 df, bool xnet) 248 { 249 struct iphdr *iph; 250 251 skb_scrub_packet(skb, xnet); 252 253 skb_clear_hash(skb); 254 skb_dst_set(skb, dst_clone(dst)); 255 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 256 257 skb_push(skb, sizeof(struct iphdr)); 258 skb_reset_network_header(skb); 259 260 iph = ip_hdr(skb); 261 262 iph->version = IPVERSION; 263 iph->ihl = sizeof(struct iphdr) >> 2; 264 iph->frag_off = df; 265 iph->protocol = proto; 266 iph->tos = tos; 267 iph->daddr = daddr; 268 iph->saddr = saddr; 269 iph->ttl = ttl; 270 __ip_select_ident(dev_net(dst->dev), iph, 271 skb_shinfo(skb)->gso_segs ?: 1); 272 iph->tot_len = htons(skb->len); 273 ip_send_check(iph); 274 } 275 276 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, 277 struct in6_addr *saddr, struct in6_addr *daddr, 278 __u8 proto, __u8 prio, __u8 ttl) 279 { 280 struct ipv6hdr *ip6h; 281 282 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 283 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 284 | IPSKB_REROUTED); 285 skb_dst_set(skb, dst_clone(dst)); 286 287 __skb_push(skb, sizeof(*ip6h)); 288 skb_reset_network_header(skb); 289 ip6h = ipv6_hdr(skb); 290 ip6_flow_hdr(ip6h, prio, htonl(0)); 291 ip6h->payload_len = htons(skb->len); 292 ip6h->nexthdr = proto; 293 ip6h->hop_limit = ttl; 294 ip6h->daddr = *daddr; 295 ip6h->saddr = *saddr; 296 ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); 297 } 298 299 static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb) 300 { 301 struct rxe_qp *qp = pkt->qp; 302 struct dst_entry *dst; 303 bool xnet = false; 304 __be16 df = htons(IP_DF); 305 struct rxe_av *av = rxe_get_av(pkt); 306 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; 307 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; 308 309 dst = rxe_find_route(skb->dev, qp, av); 310 if (!dst) { 311 pr_err("Host not reachable\n"); 312 return -EHOSTUNREACH; 313 } 314 315 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), 316 cpu_to_be16(ROCE_V2_UDP_DPORT)); 317 318 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, 319 av->grh.traffic_class, av->grh.hop_limit, df, xnet); 320 321 dst_release(dst); 322 return 0; 323 } 324 325 static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb) 326 { 327 struct rxe_qp *qp = pkt->qp; 328 struct dst_entry *dst; 329 struct rxe_av *av = rxe_get_av(pkt); 330 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; 331 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; 332 333 dst = rxe_find_route(skb->dev, qp, av); 334 if (!dst) { 335 pr_err("Host not reachable\n"); 336 return -EHOSTUNREACH; 337 } 338 339 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), 340 cpu_to_be16(ROCE_V2_UDP_DPORT)); 341 342 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, 343 av->grh.traffic_class, 344 av->grh.hop_limit); 345 346 dst_release(dst); 347 return 0; 348 } 349 350 int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc) 351 { 352 int err = 0; 353 354 if (skb->protocol == htons(ETH_P_IP)) 355 err = prepare4(pkt, skb); 356 else if (skb->protocol == htons(ETH_P_IPV6)) 357 err = prepare6(pkt, skb); 358 359 *crc = rxe_icrc_hdr(pkt, skb); 360 361 if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac)) 362 pkt->mask |= RXE_LOOPBACK_MASK; 363 364 return err; 365 } 366 367 static void rxe_skb_tx_dtor(struct sk_buff *skb) 368 { 369 struct sock *sk = skb->sk; 370 struct rxe_qp *qp = sk->sk_user_data; 371 int skb_out = atomic_dec_return(&qp->skb_out); 372 373 if (unlikely(qp->need_req_skb && 374 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) 375 rxe_run_task(&qp->req.task, 1); 376 377 rxe_drop_ref(qp); 378 } 379 380 int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) 381 { 382 int err; 383 384 skb->destructor = rxe_skb_tx_dtor; 385 skb->sk = pkt->qp->sk->sk; 386 387 rxe_add_ref(pkt->qp); 388 atomic_inc(&pkt->qp->skb_out); 389 390 if (skb->protocol == htons(ETH_P_IP)) { 391 err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 392 } else if (skb->protocol == htons(ETH_P_IPV6)) { 393 err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 394 } else { 395 pr_err("Unknown layer 3 protocol: %d\n", skb->protocol); 396 atomic_dec(&pkt->qp->skb_out); 397 rxe_drop_ref(pkt->qp); 398 kfree_skb(skb); 399 return -EINVAL; 400 } 401 402 if (unlikely(net_xmit_eval(err))) { 403 pr_debug("error sending packet: %d\n", err); 404 return -EAGAIN; 405 } 406 407 return 0; 408 } 409 410 /* fix up a send packet to match the packets 411 * received from UDP before looping them back 412 */ 413 void rxe_loopback(struct sk_buff *skb) 414 { 415 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 416 417 if (skb->protocol == htons(ETH_P_IP)) 418 skb_pull(skb, sizeof(struct iphdr)); 419 else 420 skb_pull(skb, sizeof(struct ipv6hdr)); 421 422 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) 423 kfree_skb(skb); 424 else 425 rxe_rcv(skb); 426 } 427 428 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, 429 int paylen, struct rxe_pkt_info *pkt) 430 { 431 unsigned int hdr_len; 432 struct sk_buff *skb = NULL; 433 struct net_device *ndev; 434 const struct ib_gid_attr *attr; 435 const int port_num = 1; 436 437 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); 438 if (IS_ERR(attr)) 439 return NULL; 440 441 if (av->network_type == RXE_NETWORK_TYPE_IPV4) 442 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 443 sizeof(struct iphdr); 444 else 445 hdr_len = ETH_HLEN + sizeof(struct udphdr) + 446 sizeof(struct ipv6hdr); 447 448 rcu_read_lock(); 449 ndev = rdma_read_gid_attr_ndev_rcu(attr); 450 if (IS_ERR(ndev)) { 451 rcu_read_unlock(); 452 goto out; 453 } 454 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev), 455 GFP_ATOMIC); 456 457 if (unlikely(!skb)) { 458 rcu_read_unlock(); 459 goto out; 460 } 461 462 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); 463 464 /* FIXME: hold reference to this netdev until life of this skb. */ 465 skb->dev = ndev; 466 rcu_read_unlock(); 467 468 if (av->network_type == RXE_NETWORK_TYPE_IPV4) 469 skb->protocol = htons(ETH_P_IP); 470 else 471 skb->protocol = htons(ETH_P_IPV6); 472 473 pkt->rxe = rxe; 474 pkt->port_num = port_num; 475 pkt->hdr = skb_put_zero(skb, paylen); 476 pkt->mask |= RXE_GRH_MASK; 477 478 out: 479 rdma_put_gid_attr(attr); 480 return skb; 481 } 482 483 /* 484 * this is required by rxe_cfg to match rxe devices in 485 * /sys/class/infiniband up with their underlying ethernet devices 486 */ 487 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) 488 { 489 return rxe->ndev->name; 490 } 491 492 int rxe_net_add(const char *ibdev_name, struct net_device *ndev) 493 { 494 int err; 495 struct rxe_dev *rxe = NULL; 496 497 rxe = ib_alloc_device(rxe_dev, ib_dev); 498 if (!rxe) 499 return -ENOMEM; 500 501 rxe->ndev = ndev; 502 503 err = rxe_add(rxe, ndev->mtu, ibdev_name); 504 if (err) { 505 ib_dealloc_device(&rxe->ib_dev); 506 return err; 507 } 508 509 return 0; 510 } 511 512 static void rxe_port_event(struct rxe_dev *rxe, 513 enum ib_event_type event) 514 { 515 struct ib_event ev; 516 517 ev.device = &rxe->ib_dev; 518 ev.element.port_num = 1; 519 ev.event = event; 520 521 ib_dispatch_event(&ev); 522 } 523 524 /* Caller must hold net_info_lock */ 525 void rxe_port_up(struct rxe_dev *rxe) 526 { 527 struct rxe_port *port; 528 529 port = &rxe->port; 530 port->attr.state = IB_PORT_ACTIVE; 531 532 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); 533 dev_info(&rxe->ib_dev.dev, "set active\n"); 534 } 535 536 /* Caller must hold net_info_lock */ 537 void rxe_port_down(struct rxe_dev *rxe) 538 { 539 struct rxe_port *port; 540 541 port = &rxe->port; 542 port->attr.state = IB_PORT_DOWN; 543 544 rxe_port_event(rxe, IB_EVENT_PORT_ERR); 545 rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); 546 dev_info(&rxe->ib_dev.dev, "set down\n"); 547 } 548 549 void rxe_set_port_state(struct rxe_dev *rxe) 550 { 551 if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev)) 552 rxe_port_up(rxe); 553 else 554 rxe_port_down(rxe); 555 } 556 557 static int rxe_notify(struct notifier_block *not_blk, 558 unsigned long event, 559 void *arg) 560 { 561 struct net_device *ndev = netdev_notifier_info_to_dev(arg); 562 struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); 563 564 if (!rxe) 565 return NOTIFY_OK; 566 567 switch (event) { 568 case NETDEV_UNREGISTER: 569 ib_unregister_device_queued(&rxe->ib_dev); 570 break; 571 case NETDEV_UP: 572 rxe_port_up(rxe); 573 break; 574 case NETDEV_DOWN: 575 rxe_port_down(rxe); 576 break; 577 case NETDEV_CHANGEMTU: 578 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu); 579 rxe_set_mtu(rxe, ndev->mtu); 580 break; 581 case NETDEV_CHANGE: 582 rxe_set_port_state(rxe); 583 break; 584 case NETDEV_REBOOT: 585 case NETDEV_GOING_DOWN: 586 case NETDEV_CHANGEADDR: 587 case NETDEV_CHANGENAME: 588 case NETDEV_FEAT_CHANGE: 589 default: 590 pr_info("ignoring netdev event = %ld for %s\n", 591 event, ndev->name); 592 break; 593 } 594 595 ib_device_put(&rxe->ib_dev); 596 return NOTIFY_OK; 597 } 598 599 static struct notifier_block rxe_net_notifier = { 600 .notifier_call = rxe_notify, 601 }; 602 603 static int rxe_net_ipv4_init(void) 604 { 605 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, 606 htons(ROCE_V2_UDP_DPORT), false); 607 if (IS_ERR(recv_sockets.sk4)) { 608 recv_sockets.sk4 = NULL; 609 pr_err("Failed to create IPv4 UDP tunnel\n"); 610 return -1; 611 } 612 613 return 0; 614 } 615 616 static int rxe_net_ipv6_init(void) 617 { 618 #if IS_ENABLED(CONFIG_IPV6) 619 620 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, 621 htons(ROCE_V2_UDP_DPORT), true); 622 if (IS_ERR(recv_sockets.sk6)) { 623 recv_sockets.sk6 = NULL; 624 pr_err("Failed to create IPv6 UDP tunnel\n"); 625 return -1; 626 } 627 #endif 628 return 0; 629 } 630 631 void rxe_net_exit(void) 632 { 633 rxe_release_udp_tunnel(recv_sockets.sk6); 634 rxe_release_udp_tunnel(recv_sockets.sk4); 635 unregister_netdevice_notifier(&rxe_net_notifier); 636 } 637 638 int rxe_net_init(void) 639 { 640 int err; 641 642 recv_sockets.sk6 = NULL; 643 644 err = rxe_net_ipv4_init(); 645 if (err) 646 return err; 647 err = rxe_net_ipv6_init(); 648 if (err) 649 goto err_out; 650 err = register_netdevice_notifier(&rxe_net_notifier); 651 if (err) { 652 pr_err("Failed to register netdev notifier\n"); 653 goto err_out; 654 } 655 return 0; 656 err_out: 657 rxe_net_exit(); 658 return err; 659 } 660