1*e7096c13SJason A. Donenfeld // SPDX-License-Identifier: GPL-2.0 2*e7096c13SJason A. Donenfeld /* 3*e7096c13SJason A. Donenfeld * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4*e7096c13SJason A. Donenfeld */ 5*e7096c13SJason A. Donenfeld 6*e7096c13SJason A. Donenfeld #include "queueing.h" 7*e7096c13SJason A. Donenfeld #include "socket.h" 8*e7096c13SJason A. Donenfeld #include "timers.h" 9*e7096c13SJason A. Donenfeld #include "device.h" 10*e7096c13SJason A. Donenfeld #include "ratelimiter.h" 11*e7096c13SJason A. Donenfeld #include "peer.h" 12*e7096c13SJason A. Donenfeld #include "messages.h" 13*e7096c13SJason A. Donenfeld 14*e7096c13SJason A. Donenfeld #include <linux/module.h> 15*e7096c13SJason A. Donenfeld #include <linux/rtnetlink.h> 16*e7096c13SJason A. Donenfeld #include <linux/inet.h> 17*e7096c13SJason A. Donenfeld #include <linux/netdevice.h> 18*e7096c13SJason A. Donenfeld #include <linux/inetdevice.h> 19*e7096c13SJason A. Donenfeld #include <linux/if_arp.h> 20*e7096c13SJason A. Donenfeld #include <linux/icmp.h> 21*e7096c13SJason A. Donenfeld #include <linux/suspend.h> 22*e7096c13SJason A. Donenfeld #include <net/icmp.h> 23*e7096c13SJason A. Donenfeld #include <net/rtnetlink.h> 24*e7096c13SJason A. Donenfeld #include <net/ip_tunnels.h> 25*e7096c13SJason A. Donenfeld #include <net/addrconf.h> 26*e7096c13SJason A. Donenfeld 27*e7096c13SJason A. Donenfeld static LIST_HEAD(device_list); 28*e7096c13SJason A. Donenfeld 29*e7096c13SJason A. Donenfeld static int wg_open(struct net_device *dev) 30*e7096c13SJason A. Donenfeld { 31*e7096c13SJason A. Donenfeld struct in_device *dev_v4 = __in_dev_get_rtnl(dev); 32*e7096c13SJason A. Donenfeld struct inet6_dev *dev_v6 = __in6_dev_get(dev); 33*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 34*e7096c13SJason A. Donenfeld struct wg_peer *peer; 35*e7096c13SJason A. Donenfeld int ret; 36*e7096c13SJason A. Donenfeld 37*e7096c13SJason A. Donenfeld if (dev_v4) { 38*e7096c13SJason A. Donenfeld /* At some point we might put this check near the ip_rt_send_ 39*e7096c13SJason A. Donenfeld * redirect call of ip_forward in net/ipv4/ip_forward.c, similar 40*e7096c13SJason A. Donenfeld * to the current secpath check. 41*e7096c13SJason A. Donenfeld */ 42*e7096c13SJason A. Donenfeld IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); 43*e7096c13SJason A. Donenfeld IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; 44*e7096c13SJason A. Donenfeld } 45*e7096c13SJason A. Donenfeld if (dev_v6) 46*e7096c13SJason A. Donenfeld dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; 47*e7096c13SJason A. Donenfeld 48*e7096c13SJason A. Donenfeld ret = wg_socket_init(wg, wg->incoming_port); 49*e7096c13SJason A. Donenfeld if (ret < 0) 50*e7096c13SJason A. Donenfeld return ret; 51*e7096c13SJason A. Donenfeld mutex_lock(&wg->device_update_lock); 52*e7096c13SJason A. Donenfeld list_for_each_entry(peer, &wg->peer_list, peer_list) { 53*e7096c13SJason A. Donenfeld wg_packet_send_staged_packets(peer); 54*e7096c13SJason A. Donenfeld if (peer->persistent_keepalive_interval) 55*e7096c13SJason A. Donenfeld wg_packet_send_keepalive(peer); 56*e7096c13SJason A. Donenfeld } 57*e7096c13SJason A. Donenfeld mutex_unlock(&wg->device_update_lock); 58*e7096c13SJason A. Donenfeld return 0; 59*e7096c13SJason A. Donenfeld } 60*e7096c13SJason A. Donenfeld 61*e7096c13SJason A. Donenfeld #ifdef CONFIG_PM_SLEEP 62*e7096c13SJason A. Donenfeld static int wg_pm_notification(struct notifier_block *nb, unsigned long action, 63*e7096c13SJason A. Donenfeld void *data) 64*e7096c13SJason A. Donenfeld { 65*e7096c13SJason A. Donenfeld struct wg_device *wg; 66*e7096c13SJason A. Donenfeld struct wg_peer *peer; 67*e7096c13SJason A. Donenfeld 68*e7096c13SJason A. Donenfeld /* If the machine is constantly suspending and resuming, as part of 69*e7096c13SJason A. Donenfeld * its normal operation rather than as a somewhat rare event, then we 70*e7096c13SJason A. Donenfeld * don't actually want to clear keys. 71*e7096c13SJason A. Donenfeld */ 72*e7096c13SJason A. Donenfeld if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) 73*e7096c13SJason A. Donenfeld return 0; 74*e7096c13SJason A. Donenfeld 75*e7096c13SJason A. Donenfeld if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) 76*e7096c13SJason A. Donenfeld return 0; 77*e7096c13SJason A. Donenfeld 78*e7096c13SJason A. Donenfeld rtnl_lock(); 79*e7096c13SJason A. Donenfeld list_for_each_entry(wg, &device_list, device_list) { 80*e7096c13SJason A. Donenfeld mutex_lock(&wg->device_update_lock); 81*e7096c13SJason A. Donenfeld list_for_each_entry(peer, &wg->peer_list, peer_list) { 82*e7096c13SJason A. Donenfeld del_timer(&peer->timer_zero_key_material); 83*e7096c13SJason A. Donenfeld wg_noise_handshake_clear(&peer->handshake); 84*e7096c13SJason A. Donenfeld wg_noise_keypairs_clear(&peer->keypairs); 85*e7096c13SJason A. Donenfeld } 86*e7096c13SJason A. Donenfeld mutex_unlock(&wg->device_update_lock); 87*e7096c13SJason A. Donenfeld } 88*e7096c13SJason A. Donenfeld rtnl_unlock(); 89*e7096c13SJason A. Donenfeld rcu_barrier(); 90*e7096c13SJason A. Donenfeld return 0; 91*e7096c13SJason A. Donenfeld } 92*e7096c13SJason A. Donenfeld 93*e7096c13SJason A. Donenfeld static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; 94*e7096c13SJason A. Donenfeld #endif 95*e7096c13SJason A. Donenfeld 96*e7096c13SJason A. Donenfeld static int wg_stop(struct net_device *dev) 97*e7096c13SJason A. Donenfeld { 98*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 99*e7096c13SJason A. Donenfeld struct wg_peer *peer; 100*e7096c13SJason A. Donenfeld 101*e7096c13SJason A. Donenfeld mutex_lock(&wg->device_update_lock); 102*e7096c13SJason A. Donenfeld list_for_each_entry(peer, &wg->peer_list, peer_list) { 103*e7096c13SJason A. Donenfeld wg_packet_purge_staged_packets(peer); 104*e7096c13SJason A. Donenfeld wg_timers_stop(peer); 105*e7096c13SJason A. Donenfeld wg_noise_handshake_clear(&peer->handshake); 106*e7096c13SJason A. Donenfeld wg_noise_keypairs_clear(&peer->keypairs); 107*e7096c13SJason A. Donenfeld wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); 108*e7096c13SJason A. Donenfeld } 109*e7096c13SJason A. Donenfeld mutex_unlock(&wg->device_update_lock); 110*e7096c13SJason A. Donenfeld skb_queue_purge(&wg->incoming_handshakes); 111*e7096c13SJason A. Donenfeld wg_socket_reinit(wg, NULL, NULL); 112*e7096c13SJason A. Donenfeld return 0; 113*e7096c13SJason A. Donenfeld } 114*e7096c13SJason A. Donenfeld 115*e7096c13SJason A. Donenfeld static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) 116*e7096c13SJason A. Donenfeld { 117*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 118*e7096c13SJason A. Donenfeld struct sk_buff_head packets; 119*e7096c13SJason A. Donenfeld struct wg_peer *peer; 120*e7096c13SJason A. Donenfeld struct sk_buff *next; 121*e7096c13SJason A. Donenfeld sa_family_t family; 122*e7096c13SJason A. Donenfeld u32 mtu; 123*e7096c13SJason A. Donenfeld int ret; 124*e7096c13SJason A. Donenfeld 125*e7096c13SJason A. Donenfeld if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { 126*e7096c13SJason A. Donenfeld ret = -EPROTONOSUPPORT; 127*e7096c13SJason A. Donenfeld net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); 128*e7096c13SJason A. Donenfeld goto err; 129*e7096c13SJason A. Donenfeld } 130*e7096c13SJason A. Donenfeld 131*e7096c13SJason A. Donenfeld peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); 132*e7096c13SJason A. Donenfeld if (unlikely(!peer)) { 133*e7096c13SJason A. Donenfeld ret = -ENOKEY; 134*e7096c13SJason A. Donenfeld if (skb->protocol == htons(ETH_P_IP)) 135*e7096c13SJason A. Donenfeld net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", 136*e7096c13SJason A. Donenfeld dev->name, &ip_hdr(skb)->daddr); 137*e7096c13SJason A. Donenfeld else if (skb->protocol == htons(ETH_P_IPV6)) 138*e7096c13SJason A. Donenfeld net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", 139*e7096c13SJason A. Donenfeld dev->name, &ipv6_hdr(skb)->daddr); 140*e7096c13SJason A. Donenfeld goto err; 141*e7096c13SJason A. Donenfeld } 142*e7096c13SJason A. Donenfeld 143*e7096c13SJason A. Donenfeld family = READ_ONCE(peer->endpoint.addr.sa_family); 144*e7096c13SJason A. Donenfeld if (unlikely(family != AF_INET && family != AF_INET6)) { 145*e7096c13SJason A. Donenfeld ret = -EDESTADDRREQ; 146*e7096c13SJason A. Donenfeld net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", 147*e7096c13SJason A. Donenfeld dev->name, peer->internal_id); 148*e7096c13SJason A. Donenfeld goto err_peer; 149*e7096c13SJason A. Donenfeld } 150*e7096c13SJason A. Donenfeld 151*e7096c13SJason A. Donenfeld mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 152*e7096c13SJason A. Donenfeld 153*e7096c13SJason A. Donenfeld __skb_queue_head_init(&packets); 154*e7096c13SJason A. Donenfeld if (!skb_is_gso(skb)) { 155*e7096c13SJason A. Donenfeld skb_mark_not_on_list(skb); 156*e7096c13SJason A. Donenfeld } else { 157*e7096c13SJason A. Donenfeld struct sk_buff *segs = skb_gso_segment(skb, 0); 158*e7096c13SJason A. Donenfeld 159*e7096c13SJason A. Donenfeld if (unlikely(IS_ERR(segs))) { 160*e7096c13SJason A. Donenfeld ret = PTR_ERR(segs); 161*e7096c13SJason A. Donenfeld goto err_peer; 162*e7096c13SJason A. Donenfeld } 163*e7096c13SJason A. Donenfeld dev_kfree_skb(skb); 164*e7096c13SJason A. Donenfeld skb = segs; 165*e7096c13SJason A. Donenfeld } 166*e7096c13SJason A. Donenfeld 167*e7096c13SJason A. Donenfeld skb_list_walk_safe(skb, skb, next) { 168*e7096c13SJason A. Donenfeld skb_mark_not_on_list(skb); 169*e7096c13SJason A. Donenfeld 170*e7096c13SJason A. Donenfeld skb = skb_share_check(skb, GFP_ATOMIC); 171*e7096c13SJason A. Donenfeld if (unlikely(!skb)) 172*e7096c13SJason A. Donenfeld continue; 173*e7096c13SJason A. Donenfeld 174*e7096c13SJason A. Donenfeld /* We only need to keep the original dst around for icmp, 175*e7096c13SJason A. Donenfeld * so at this point we're in a position to drop it. 176*e7096c13SJason A. Donenfeld */ 177*e7096c13SJason A. Donenfeld skb_dst_drop(skb); 178*e7096c13SJason A. Donenfeld 179*e7096c13SJason A. Donenfeld PACKET_CB(skb)->mtu = mtu; 180*e7096c13SJason A. Donenfeld 181*e7096c13SJason A. Donenfeld __skb_queue_tail(&packets, skb); 182*e7096c13SJason A. Donenfeld } 183*e7096c13SJason A. Donenfeld 184*e7096c13SJason A. Donenfeld spin_lock_bh(&peer->staged_packet_queue.lock); 185*e7096c13SJason A. Donenfeld /* If the queue is getting too big, we start removing the oldest packets 186*e7096c13SJason A. Donenfeld * until it's small again. We do this before adding the new packet, so 187*e7096c13SJason A. Donenfeld * we don't remove GSO segments that are in excess. 188*e7096c13SJason A. Donenfeld */ 189*e7096c13SJason A. Donenfeld while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { 190*e7096c13SJason A. Donenfeld dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); 191*e7096c13SJason A. Donenfeld ++dev->stats.tx_dropped; 192*e7096c13SJason A. Donenfeld } 193*e7096c13SJason A. Donenfeld skb_queue_splice_tail(&packets, &peer->staged_packet_queue); 194*e7096c13SJason A. Donenfeld spin_unlock_bh(&peer->staged_packet_queue.lock); 195*e7096c13SJason A. Donenfeld 196*e7096c13SJason A. Donenfeld wg_packet_send_staged_packets(peer); 197*e7096c13SJason A. Donenfeld 198*e7096c13SJason A. Donenfeld wg_peer_put(peer); 199*e7096c13SJason A. Donenfeld return NETDEV_TX_OK; 200*e7096c13SJason A. Donenfeld 201*e7096c13SJason A. Donenfeld err_peer: 202*e7096c13SJason A. Donenfeld wg_peer_put(peer); 203*e7096c13SJason A. Donenfeld err: 204*e7096c13SJason A. Donenfeld ++dev->stats.tx_errors; 205*e7096c13SJason A. Donenfeld if (skb->protocol == htons(ETH_P_IP)) 206*e7096c13SJason A. Donenfeld icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 207*e7096c13SJason A. Donenfeld else if (skb->protocol == htons(ETH_P_IPV6)) 208*e7096c13SJason A. Donenfeld icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 209*e7096c13SJason A. Donenfeld kfree_skb(skb); 210*e7096c13SJason A. Donenfeld return ret; 211*e7096c13SJason A. Donenfeld } 212*e7096c13SJason A. Donenfeld 213*e7096c13SJason A. Donenfeld static const struct net_device_ops netdev_ops = { 214*e7096c13SJason A. Donenfeld .ndo_open = wg_open, 215*e7096c13SJason A. Donenfeld .ndo_stop = wg_stop, 216*e7096c13SJason A. Donenfeld .ndo_start_xmit = wg_xmit, 217*e7096c13SJason A. Donenfeld .ndo_get_stats64 = ip_tunnel_get_stats64 218*e7096c13SJason A. Donenfeld }; 219*e7096c13SJason A. Donenfeld 220*e7096c13SJason A. Donenfeld static void wg_destruct(struct net_device *dev) 221*e7096c13SJason A. Donenfeld { 222*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 223*e7096c13SJason A. Donenfeld 224*e7096c13SJason A. Donenfeld rtnl_lock(); 225*e7096c13SJason A. Donenfeld list_del(&wg->device_list); 226*e7096c13SJason A. Donenfeld rtnl_unlock(); 227*e7096c13SJason A. Donenfeld mutex_lock(&wg->device_update_lock); 228*e7096c13SJason A. Donenfeld wg->incoming_port = 0; 229*e7096c13SJason A. Donenfeld wg_socket_reinit(wg, NULL, NULL); 230*e7096c13SJason A. Donenfeld /* The final references are cleared in the below calls to destroy_workqueue. */ 231*e7096c13SJason A. Donenfeld wg_peer_remove_all(wg); 232*e7096c13SJason A. Donenfeld destroy_workqueue(wg->handshake_receive_wq); 233*e7096c13SJason A. Donenfeld destroy_workqueue(wg->handshake_send_wq); 234*e7096c13SJason A. Donenfeld destroy_workqueue(wg->packet_crypt_wq); 235*e7096c13SJason A. Donenfeld wg_packet_queue_free(&wg->decrypt_queue, true); 236*e7096c13SJason A. Donenfeld wg_packet_queue_free(&wg->encrypt_queue, true); 237*e7096c13SJason A. Donenfeld rcu_barrier(); /* Wait for all the peers to be actually freed. */ 238*e7096c13SJason A. Donenfeld wg_ratelimiter_uninit(); 239*e7096c13SJason A. Donenfeld memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); 240*e7096c13SJason A. Donenfeld skb_queue_purge(&wg->incoming_handshakes); 241*e7096c13SJason A. Donenfeld free_percpu(dev->tstats); 242*e7096c13SJason A. Donenfeld free_percpu(wg->incoming_handshakes_worker); 243*e7096c13SJason A. Donenfeld if (wg->have_creating_net_ref) 244*e7096c13SJason A. Donenfeld put_net(wg->creating_net); 245*e7096c13SJason A. Donenfeld kvfree(wg->index_hashtable); 246*e7096c13SJason A. Donenfeld kvfree(wg->peer_hashtable); 247*e7096c13SJason A. Donenfeld mutex_unlock(&wg->device_update_lock); 248*e7096c13SJason A. Donenfeld 249*e7096c13SJason A. Donenfeld pr_debug("%s: Interface deleted\n", dev->name); 250*e7096c13SJason A. Donenfeld free_netdev(dev); 251*e7096c13SJason A. Donenfeld } 252*e7096c13SJason A. Donenfeld 253*e7096c13SJason A. Donenfeld static const struct device_type device_type = { .name = KBUILD_MODNAME }; 254*e7096c13SJason A. Donenfeld 255*e7096c13SJason A. Donenfeld static void wg_setup(struct net_device *dev) 256*e7096c13SJason A. Donenfeld { 257*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 258*e7096c13SJason A. Donenfeld enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 259*e7096c13SJason A. Donenfeld NETIF_F_SG | NETIF_F_GSO | 260*e7096c13SJason A. Donenfeld NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; 261*e7096c13SJason A. Donenfeld 262*e7096c13SJason A. Donenfeld dev->netdev_ops = &netdev_ops; 263*e7096c13SJason A. Donenfeld dev->hard_header_len = 0; 264*e7096c13SJason A. Donenfeld dev->addr_len = 0; 265*e7096c13SJason A. Donenfeld dev->needed_headroom = DATA_PACKET_HEAD_ROOM; 266*e7096c13SJason A. Donenfeld dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); 267*e7096c13SJason A. Donenfeld dev->type = ARPHRD_NONE; 268*e7096c13SJason A. Donenfeld dev->flags = IFF_POINTOPOINT | IFF_NOARP; 269*e7096c13SJason A. Donenfeld dev->priv_flags |= IFF_NO_QUEUE; 270*e7096c13SJason A. Donenfeld dev->features |= NETIF_F_LLTX; 271*e7096c13SJason A. Donenfeld dev->features |= WG_NETDEV_FEATURES; 272*e7096c13SJason A. Donenfeld dev->hw_features |= WG_NETDEV_FEATURES; 273*e7096c13SJason A. Donenfeld dev->hw_enc_features |= WG_NETDEV_FEATURES; 274*e7096c13SJason A. Donenfeld dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - 275*e7096c13SJason A. Donenfeld sizeof(struct udphdr) - 276*e7096c13SJason A. Donenfeld max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); 277*e7096c13SJason A. Donenfeld 278*e7096c13SJason A. Donenfeld SET_NETDEV_DEVTYPE(dev, &device_type); 279*e7096c13SJason A. Donenfeld 280*e7096c13SJason A. Donenfeld /* We need to keep the dst around in case of icmp replies. */ 281*e7096c13SJason A. Donenfeld netif_keep_dst(dev); 282*e7096c13SJason A. Donenfeld 283*e7096c13SJason A. Donenfeld memset(wg, 0, sizeof(*wg)); 284*e7096c13SJason A. Donenfeld wg->dev = dev; 285*e7096c13SJason A. Donenfeld } 286*e7096c13SJason A. Donenfeld 287*e7096c13SJason A. Donenfeld static int wg_newlink(struct net *src_net, struct net_device *dev, 288*e7096c13SJason A. Donenfeld struct nlattr *tb[], struct nlattr *data[], 289*e7096c13SJason A. Donenfeld struct netlink_ext_ack *extack) 290*e7096c13SJason A. Donenfeld { 291*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 292*e7096c13SJason A. Donenfeld int ret = -ENOMEM; 293*e7096c13SJason A. Donenfeld 294*e7096c13SJason A. Donenfeld wg->creating_net = src_net; 295*e7096c13SJason A. Donenfeld init_rwsem(&wg->static_identity.lock); 296*e7096c13SJason A. Donenfeld mutex_init(&wg->socket_update_lock); 297*e7096c13SJason A. Donenfeld mutex_init(&wg->device_update_lock); 298*e7096c13SJason A. Donenfeld skb_queue_head_init(&wg->incoming_handshakes); 299*e7096c13SJason A. Donenfeld wg_allowedips_init(&wg->peer_allowedips); 300*e7096c13SJason A. Donenfeld wg_cookie_checker_init(&wg->cookie_checker, wg); 301*e7096c13SJason A. Donenfeld INIT_LIST_HEAD(&wg->peer_list); 302*e7096c13SJason A. Donenfeld wg->device_update_gen = 1; 303*e7096c13SJason A. Donenfeld 304*e7096c13SJason A. Donenfeld wg->peer_hashtable = wg_pubkey_hashtable_alloc(); 305*e7096c13SJason A. Donenfeld if (!wg->peer_hashtable) 306*e7096c13SJason A. Donenfeld return ret; 307*e7096c13SJason A. Donenfeld 308*e7096c13SJason A. Donenfeld wg->index_hashtable = wg_index_hashtable_alloc(); 309*e7096c13SJason A. Donenfeld if (!wg->index_hashtable) 310*e7096c13SJason A. Donenfeld goto err_free_peer_hashtable; 311*e7096c13SJason A. Donenfeld 312*e7096c13SJason A. Donenfeld dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 313*e7096c13SJason A. Donenfeld if (!dev->tstats) 314*e7096c13SJason A. Donenfeld goto err_free_index_hashtable; 315*e7096c13SJason A. Donenfeld 316*e7096c13SJason A. Donenfeld wg->incoming_handshakes_worker = 317*e7096c13SJason A. Donenfeld wg_packet_percpu_multicore_worker_alloc( 318*e7096c13SJason A. Donenfeld wg_packet_handshake_receive_worker, wg); 319*e7096c13SJason A. Donenfeld if (!wg->incoming_handshakes_worker) 320*e7096c13SJason A. Donenfeld goto err_free_tstats; 321*e7096c13SJason A. Donenfeld 322*e7096c13SJason A. Donenfeld wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", 323*e7096c13SJason A. Donenfeld WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); 324*e7096c13SJason A. Donenfeld if (!wg->handshake_receive_wq) 325*e7096c13SJason A. Donenfeld goto err_free_incoming_handshakes; 326*e7096c13SJason A. Donenfeld 327*e7096c13SJason A. Donenfeld wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", 328*e7096c13SJason A. Donenfeld WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); 329*e7096c13SJason A. Donenfeld if (!wg->handshake_send_wq) 330*e7096c13SJason A. Donenfeld goto err_destroy_handshake_receive; 331*e7096c13SJason A. Donenfeld 332*e7096c13SJason A. Donenfeld wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", 333*e7096c13SJason A. Donenfeld WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); 334*e7096c13SJason A. Donenfeld if (!wg->packet_crypt_wq) 335*e7096c13SJason A. Donenfeld goto err_destroy_handshake_send; 336*e7096c13SJason A. Donenfeld 337*e7096c13SJason A. Donenfeld ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, 338*e7096c13SJason A. Donenfeld true, MAX_QUEUED_PACKETS); 339*e7096c13SJason A. Donenfeld if (ret < 0) 340*e7096c13SJason A. Donenfeld goto err_destroy_packet_crypt; 341*e7096c13SJason A. Donenfeld 342*e7096c13SJason A. Donenfeld ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, 343*e7096c13SJason A. Donenfeld true, MAX_QUEUED_PACKETS); 344*e7096c13SJason A. Donenfeld if (ret < 0) 345*e7096c13SJason A. Donenfeld goto err_free_encrypt_queue; 346*e7096c13SJason A. Donenfeld 347*e7096c13SJason A. Donenfeld ret = wg_ratelimiter_init(); 348*e7096c13SJason A. Donenfeld if (ret < 0) 349*e7096c13SJason A. Donenfeld goto err_free_decrypt_queue; 350*e7096c13SJason A. Donenfeld 351*e7096c13SJason A. Donenfeld ret = register_netdevice(dev); 352*e7096c13SJason A. Donenfeld if (ret < 0) 353*e7096c13SJason A. Donenfeld goto err_uninit_ratelimiter; 354*e7096c13SJason A. Donenfeld 355*e7096c13SJason A. Donenfeld list_add(&wg->device_list, &device_list); 356*e7096c13SJason A. Donenfeld 357*e7096c13SJason A. Donenfeld /* We wait until the end to assign priv_destructor, so that 358*e7096c13SJason A. Donenfeld * register_netdevice doesn't call it for us if it fails. 359*e7096c13SJason A. Donenfeld */ 360*e7096c13SJason A. Donenfeld dev->priv_destructor = wg_destruct; 361*e7096c13SJason A. Donenfeld 362*e7096c13SJason A. Donenfeld pr_debug("%s: Interface created\n", dev->name); 363*e7096c13SJason A. Donenfeld return ret; 364*e7096c13SJason A. Donenfeld 365*e7096c13SJason A. Donenfeld err_uninit_ratelimiter: 366*e7096c13SJason A. Donenfeld wg_ratelimiter_uninit(); 367*e7096c13SJason A. Donenfeld err_free_decrypt_queue: 368*e7096c13SJason A. Donenfeld wg_packet_queue_free(&wg->decrypt_queue, true); 369*e7096c13SJason A. Donenfeld err_free_encrypt_queue: 370*e7096c13SJason A. Donenfeld wg_packet_queue_free(&wg->encrypt_queue, true); 371*e7096c13SJason A. Donenfeld err_destroy_packet_crypt: 372*e7096c13SJason A. Donenfeld destroy_workqueue(wg->packet_crypt_wq); 373*e7096c13SJason A. Donenfeld err_destroy_handshake_send: 374*e7096c13SJason A. Donenfeld destroy_workqueue(wg->handshake_send_wq); 375*e7096c13SJason A. Donenfeld err_destroy_handshake_receive: 376*e7096c13SJason A. Donenfeld destroy_workqueue(wg->handshake_receive_wq); 377*e7096c13SJason A. Donenfeld err_free_incoming_handshakes: 378*e7096c13SJason A. Donenfeld free_percpu(wg->incoming_handshakes_worker); 379*e7096c13SJason A. Donenfeld err_free_tstats: 380*e7096c13SJason A. Donenfeld free_percpu(dev->tstats); 381*e7096c13SJason A. Donenfeld err_free_index_hashtable: 382*e7096c13SJason A. Donenfeld kvfree(wg->index_hashtable); 383*e7096c13SJason A. Donenfeld err_free_peer_hashtable: 384*e7096c13SJason A. Donenfeld kvfree(wg->peer_hashtable); 385*e7096c13SJason A. Donenfeld return ret; 386*e7096c13SJason A. Donenfeld } 387*e7096c13SJason A. Donenfeld 388*e7096c13SJason A. Donenfeld static struct rtnl_link_ops link_ops __read_mostly = { 389*e7096c13SJason A. Donenfeld .kind = KBUILD_MODNAME, 390*e7096c13SJason A. Donenfeld .priv_size = sizeof(struct wg_device), 391*e7096c13SJason A. Donenfeld .setup = wg_setup, 392*e7096c13SJason A. Donenfeld .newlink = wg_newlink, 393*e7096c13SJason A. Donenfeld }; 394*e7096c13SJason A. Donenfeld 395*e7096c13SJason A. Donenfeld static int wg_netdevice_notification(struct notifier_block *nb, 396*e7096c13SJason A. Donenfeld unsigned long action, void *data) 397*e7096c13SJason A. Donenfeld { 398*e7096c13SJason A. Donenfeld struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; 399*e7096c13SJason A. Donenfeld struct wg_device *wg = netdev_priv(dev); 400*e7096c13SJason A. Donenfeld 401*e7096c13SJason A. Donenfeld ASSERT_RTNL(); 402*e7096c13SJason A. Donenfeld 403*e7096c13SJason A. Donenfeld if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) 404*e7096c13SJason A. Donenfeld return 0; 405*e7096c13SJason A. Donenfeld 406*e7096c13SJason A. Donenfeld if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { 407*e7096c13SJason A. Donenfeld put_net(wg->creating_net); 408*e7096c13SJason A. Donenfeld wg->have_creating_net_ref = false; 409*e7096c13SJason A. Donenfeld } else if (dev_net(dev) != wg->creating_net && 410*e7096c13SJason A. Donenfeld !wg->have_creating_net_ref) { 411*e7096c13SJason A. Donenfeld wg->have_creating_net_ref = true; 412*e7096c13SJason A. Donenfeld get_net(wg->creating_net); 413*e7096c13SJason A. Donenfeld } 414*e7096c13SJason A. Donenfeld return 0; 415*e7096c13SJason A. Donenfeld } 416*e7096c13SJason A. Donenfeld 417*e7096c13SJason A. Donenfeld static struct notifier_block netdevice_notifier = { 418*e7096c13SJason A. Donenfeld .notifier_call = wg_netdevice_notification 419*e7096c13SJason A. Donenfeld }; 420*e7096c13SJason A. Donenfeld 421*e7096c13SJason A. Donenfeld int __init wg_device_init(void) 422*e7096c13SJason A. Donenfeld { 423*e7096c13SJason A. Donenfeld int ret; 424*e7096c13SJason A. Donenfeld 425*e7096c13SJason A. Donenfeld #ifdef CONFIG_PM_SLEEP 426*e7096c13SJason A. Donenfeld ret = register_pm_notifier(&pm_notifier); 427*e7096c13SJason A. Donenfeld if (ret) 428*e7096c13SJason A. Donenfeld return ret; 429*e7096c13SJason A. Donenfeld #endif 430*e7096c13SJason A. Donenfeld 431*e7096c13SJason A. Donenfeld ret = register_netdevice_notifier(&netdevice_notifier); 432*e7096c13SJason A. Donenfeld if (ret) 433*e7096c13SJason A. Donenfeld goto error_pm; 434*e7096c13SJason A. Donenfeld 435*e7096c13SJason A. Donenfeld ret = rtnl_link_register(&link_ops); 436*e7096c13SJason A. Donenfeld if (ret) 437*e7096c13SJason A. Donenfeld goto error_netdevice; 438*e7096c13SJason A. Donenfeld 439*e7096c13SJason A. Donenfeld return 0; 440*e7096c13SJason A. Donenfeld 441*e7096c13SJason A. Donenfeld error_netdevice: 442*e7096c13SJason A. Donenfeld unregister_netdevice_notifier(&netdevice_notifier); 443*e7096c13SJason A. Donenfeld error_pm: 444*e7096c13SJason A. Donenfeld #ifdef CONFIG_PM_SLEEP 445*e7096c13SJason A. Donenfeld unregister_pm_notifier(&pm_notifier); 446*e7096c13SJason A. Donenfeld #endif 447*e7096c13SJason A. Donenfeld return ret; 448*e7096c13SJason A. Donenfeld } 449*e7096c13SJason A. Donenfeld 450*e7096c13SJason A. Donenfeld void wg_device_uninit(void) 451*e7096c13SJason A. Donenfeld { 452*e7096c13SJason A. Donenfeld rtnl_link_unregister(&link_ops); 453*e7096c13SJason A. Donenfeld unregister_netdevice_notifier(&netdevice_notifier); 454*e7096c13SJason A. Donenfeld #ifdef CONFIG_PM_SLEEP 455*e7096c13SJason A. Donenfeld unregister_pm_notifier(&pm_notifier); 456*e7096c13SJason A. Donenfeld #endif 457*e7096c13SJason A. Donenfeld rcu_barrier(); 458*e7096c13SJason A. Donenfeld } 459