1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 */ 5 6 #include "queueing.h" 7 #include "socket.h" 8 #include "timers.h" 9 #include "device.h" 10 #include "ratelimiter.h" 11 #include "peer.h" 12 #include "messages.h" 13 14 #include <linux/module.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/inet.h> 17 #include <linux/netdevice.h> 18 #include <linux/inetdevice.h> 19 #include <linux/if_arp.h> 20 #include <linux/icmp.h> 21 #include <linux/suspend.h> 22 #include <net/icmp.h> 23 #include <net/rtnetlink.h> 24 #include <net/ip_tunnels.h> 25 #include <net/addrconf.h> 26 27 static LIST_HEAD(device_list); 28 29 static int wg_open(struct net_device *dev) 30 { 31 struct in_device *dev_v4 = __in_dev_get_rtnl(dev); 32 struct inet6_dev *dev_v6 = __in6_dev_get(dev); 33 struct wg_device *wg = netdev_priv(dev); 34 struct wg_peer *peer; 35 int ret; 36 37 if (dev_v4) { 38 /* At some point we might put this check near the ip_rt_send_ 39 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar 40 * to the current secpath check. 41 */ 42 IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); 43 IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; 44 } 45 if (dev_v6) 46 dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; 47 48 mutex_lock(&wg->device_update_lock); 49 ret = wg_socket_init(wg, wg->incoming_port); 50 if (ret < 0) 51 goto out; 52 list_for_each_entry(peer, &wg->peer_list, peer_list) { 53 wg_packet_send_staged_packets(peer); 54 if (peer->persistent_keepalive_interval) 55 wg_packet_send_keepalive(peer); 56 } 57 out: 58 mutex_unlock(&wg->device_update_lock); 59 return ret; 60 } 61 62 #ifdef CONFIG_PM_SLEEP 63 static int wg_pm_notification(struct notifier_block *nb, unsigned long action, 64 void *data) 65 { 66 struct wg_device *wg; 67 struct wg_peer *peer; 68 69 /* If the machine is constantly suspending and resuming, as part of 70 * its normal operation rather than as a somewhat rare event, then we 71 * don't actually want to clear keys. 72 */ 73 if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) 74 return 0; 75 76 if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) 77 return 0; 78 79 rtnl_lock(); 80 list_for_each_entry(wg, &device_list, device_list) { 81 mutex_lock(&wg->device_update_lock); 82 list_for_each_entry(peer, &wg->peer_list, peer_list) { 83 del_timer(&peer->timer_zero_key_material); 84 wg_noise_handshake_clear(&peer->handshake); 85 wg_noise_keypairs_clear(&peer->keypairs); 86 } 87 mutex_unlock(&wg->device_update_lock); 88 } 89 rtnl_unlock(); 90 rcu_barrier(); 91 return 0; 92 } 93 94 static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; 95 #endif 96 97 static int wg_stop(struct net_device *dev) 98 { 99 struct wg_device *wg = netdev_priv(dev); 100 struct wg_peer *peer; 101 struct sk_buff *skb; 102 103 mutex_lock(&wg->device_update_lock); 104 list_for_each_entry(peer, &wg->peer_list, peer_list) { 105 wg_packet_purge_staged_packets(peer); 106 wg_timers_stop(peer); 107 wg_noise_handshake_clear(&peer->handshake); 108 wg_noise_keypairs_clear(&peer->keypairs); 109 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); 110 } 111 mutex_unlock(&wg->device_update_lock); 112 while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) 113 kfree_skb(skb); 114 atomic_set(&wg->handshake_queue_len, 0); 115 wg_socket_reinit(wg, NULL, NULL); 116 return 0; 117 } 118 119 static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) 120 { 121 struct wg_device *wg = netdev_priv(dev); 122 struct sk_buff_head packets; 123 struct wg_peer *peer; 124 struct sk_buff *next; 125 sa_family_t family; 126 u32 mtu; 127 int ret; 128 129 if (unlikely(!wg_check_packet_protocol(skb))) { 130 ret = -EPROTONOSUPPORT; 131 net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); 132 goto err; 133 } 134 135 peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); 136 if (unlikely(!peer)) { 137 ret = -ENOKEY; 138 if (skb->protocol == htons(ETH_P_IP)) 139 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", 140 dev->name, &ip_hdr(skb)->daddr); 141 else if (skb->protocol == htons(ETH_P_IPV6)) 142 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", 143 dev->name, &ipv6_hdr(skb)->daddr); 144 goto err_icmp; 145 } 146 147 family = READ_ONCE(peer->endpoint.addr.sa_family); 148 if (unlikely(family != AF_INET && family != AF_INET6)) { 149 ret = -EDESTADDRREQ; 150 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", 151 dev->name, peer->internal_id); 152 goto err_peer; 153 } 154 155 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 156 157 __skb_queue_head_init(&packets); 158 if (!skb_is_gso(skb)) { 159 skb_mark_not_on_list(skb); 160 } else { 161 struct sk_buff *segs = skb_gso_segment(skb, 0); 162 163 if (IS_ERR(segs)) { 164 ret = PTR_ERR(segs); 165 goto err_peer; 166 } 167 dev_kfree_skb(skb); 168 skb = segs; 169 } 170 171 skb_list_walk_safe(skb, skb, next) { 172 skb_mark_not_on_list(skb); 173 174 skb = skb_share_check(skb, GFP_ATOMIC); 175 if (unlikely(!skb)) 176 continue; 177 178 /* We only need to keep the original dst around for icmp, 179 * so at this point we're in a position to drop it. 180 */ 181 skb_dst_drop(skb); 182 183 PACKET_CB(skb)->mtu = mtu; 184 185 __skb_queue_tail(&packets, skb); 186 } 187 188 spin_lock_bh(&peer->staged_packet_queue.lock); 189 /* If the queue is getting too big, we start removing the oldest packets 190 * until it's small again. We do this before adding the new packet, so 191 * we don't remove GSO segments that are in excess. 192 */ 193 while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { 194 dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); 195 ++dev->stats.tx_dropped; 196 } 197 skb_queue_splice_tail(&packets, &peer->staged_packet_queue); 198 spin_unlock_bh(&peer->staged_packet_queue.lock); 199 200 wg_packet_send_staged_packets(peer); 201 202 wg_peer_put(peer); 203 return NETDEV_TX_OK; 204 205 err_peer: 206 wg_peer_put(peer); 207 err_icmp: 208 if (skb->protocol == htons(ETH_P_IP)) 209 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 210 else if (skb->protocol == htons(ETH_P_IPV6)) 211 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 212 err: 213 ++dev->stats.tx_errors; 214 kfree_skb(skb); 215 return ret; 216 } 217 218 static const struct net_device_ops netdev_ops = { 219 .ndo_open = wg_open, 220 .ndo_stop = wg_stop, 221 .ndo_start_xmit = wg_xmit, 222 .ndo_get_stats64 = dev_get_tstats64 223 }; 224 225 static void wg_destruct(struct net_device *dev) 226 { 227 struct wg_device *wg = netdev_priv(dev); 228 229 rtnl_lock(); 230 list_del(&wg->device_list); 231 rtnl_unlock(); 232 mutex_lock(&wg->device_update_lock); 233 rcu_assign_pointer(wg->creating_net, NULL); 234 wg->incoming_port = 0; 235 wg_socket_reinit(wg, NULL, NULL); 236 /* The final references are cleared in the below calls to destroy_workqueue. */ 237 wg_peer_remove_all(wg); 238 destroy_workqueue(wg->handshake_receive_wq); 239 destroy_workqueue(wg->handshake_send_wq); 240 destroy_workqueue(wg->packet_crypt_wq); 241 wg_packet_queue_free(&wg->handshake_queue, true); 242 wg_packet_queue_free(&wg->decrypt_queue, false); 243 wg_packet_queue_free(&wg->encrypt_queue, false); 244 rcu_barrier(); /* Wait for all the peers to be actually freed. */ 245 wg_ratelimiter_uninit(); 246 memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); 247 free_percpu(dev->tstats); 248 kvfree(wg->index_hashtable); 249 kvfree(wg->peer_hashtable); 250 mutex_unlock(&wg->device_update_lock); 251 252 pr_debug("%s: Interface destroyed\n", dev->name); 253 free_netdev(dev); 254 } 255 256 static const struct device_type device_type = { .name = KBUILD_MODNAME }; 257 258 static void wg_setup(struct net_device *dev) 259 { 260 struct wg_device *wg = netdev_priv(dev); 261 enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 262 NETIF_F_SG | NETIF_F_GSO | 263 NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; 264 const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + 265 max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); 266 267 dev->netdev_ops = &netdev_ops; 268 dev->header_ops = &ip_tunnel_header_ops; 269 dev->hard_header_len = 0; 270 dev->addr_len = 0; 271 dev->needed_headroom = DATA_PACKET_HEAD_ROOM; 272 dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); 273 dev->type = ARPHRD_NONE; 274 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 275 dev->priv_flags |= IFF_NO_QUEUE; 276 dev->features |= NETIF_F_LLTX; 277 dev->features |= WG_NETDEV_FEATURES; 278 dev->hw_features |= WG_NETDEV_FEATURES; 279 dev->hw_enc_features |= WG_NETDEV_FEATURES; 280 dev->mtu = ETH_DATA_LEN - overhead; 281 dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; 282 283 SET_NETDEV_DEVTYPE(dev, &device_type); 284 285 /* We need to keep the dst around in case of icmp replies. */ 286 netif_keep_dst(dev); 287 288 memset(wg, 0, sizeof(*wg)); 289 wg->dev = dev; 290 } 291 292 static int wg_newlink(struct net *src_net, struct net_device *dev, 293 struct nlattr *tb[], struct nlattr *data[], 294 struct netlink_ext_ack *extack) 295 { 296 struct wg_device *wg = netdev_priv(dev); 297 int ret = -ENOMEM; 298 299 rcu_assign_pointer(wg->creating_net, src_net); 300 init_rwsem(&wg->static_identity.lock); 301 mutex_init(&wg->socket_update_lock); 302 mutex_init(&wg->device_update_lock); 303 wg_allowedips_init(&wg->peer_allowedips); 304 wg_cookie_checker_init(&wg->cookie_checker, wg); 305 INIT_LIST_HEAD(&wg->peer_list); 306 wg->device_update_gen = 1; 307 308 wg->peer_hashtable = wg_pubkey_hashtable_alloc(); 309 if (!wg->peer_hashtable) 310 return ret; 311 312 wg->index_hashtable = wg_index_hashtable_alloc(); 313 if (!wg->index_hashtable) 314 goto err_free_peer_hashtable; 315 316 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 317 if (!dev->tstats) 318 goto err_free_index_hashtable; 319 320 wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", 321 WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); 322 if (!wg->handshake_receive_wq) 323 goto err_free_tstats; 324 325 wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", 326 WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); 327 if (!wg->handshake_send_wq) 328 goto err_destroy_handshake_receive; 329 330 wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", 331 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); 332 if (!wg->packet_crypt_wq) 333 goto err_destroy_handshake_send; 334 335 ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, 336 MAX_QUEUED_PACKETS); 337 if (ret < 0) 338 goto err_destroy_packet_crypt; 339 340 ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, 341 MAX_QUEUED_PACKETS); 342 if (ret < 0) 343 goto err_free_encrypt_queue; 344 345 ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, 346 MAX_QUEUED_INCOMING_HANDSHAKES); 347 if (ret < 0) 348 goto err_free_decrypt_queue; 349 350 ret = wg_ratelimiter_init(); 351 if (ret < 0) 352 goto err_free_handshake_queue; 353 354 ret = register_netdevice(dev); 355 if (ret < 0) 356 goto err_uninit_ratelimiter; 357 358 list_add(&wg->device_list, &device_list); 359 360 /* We wait until the end to assign priv_destructor, so that 361 * register_netdevice doesn't call it for us if it fails. 362 */ 363 dev->priv_destructor = wg_destruct; 364 365 pr_debug("%s: Interface created\n", dev->name); 366 return ret; 367 368 err_uninit_ratelimiter: 369 wg_ratelimiter_uninit(); 370 err_free_handshake_queue: 371 wg_packet_queue_free(&wg->handshake_queue, false); 372 err_free_decrypt_queue: 373 wg_packet_queue_free(&wg->decrypt_queue, false); 374 err_free_encrypt_queue: 375 wg_packet_queue_free(&wg->encrypt_queue, false); 376 err_destroy_packet_crypt: 377 destroy_workqueue(wg->packet_crypt_wq); 378 err_destroy_handshake_send: 379 destroy_workqueue(wg->handshake_send_wq); 380 err_destroy_handshake_receive: 381 destroy_workqueue(wg->handshake_receive_wq); 382 err_free_tstats: 383 free_percpu(dev->tstats); 384 err_free_index_hashtable: 385 kvfree(wg->index_hashtable); 386 err_free_peer_hashtable: 387 kvfree(wg->peer_hashtable); 388 return ret; 389 } 390 391 static struct rtnl_link_ops link_ops __read_mostly = { 392 .kind = KBUILD_MODNAME, 393 .priv_size = sizeof(struct wg_device), 394 .setup = wg_setup, 395 .newlink = wg_newlink, 396 }; 397 398 static void wg_netns_pre_exit(struct net *net) 399 { 400 struct wg_device *wg; 401 struct wg_peer *peer; 402 403 rtnl_lock(); 404 list_for_each_entry(wg, &device_list, device_list) { 405 if (rcu_access_pointer(wg->creating_net) == net) { 406 pr_debug("%s: Creating namespace exiting\n", wg->dev->name); 407 netif_carrier_off(wg->dev); 408 mutex_lock(&wg->device_update_lock); 409 rcu_assign_pointer(wg->creating_net, NULL); 410 wg_socket_reinit(wg, NULL, NULL); 411 list_for_each_entry(peer, &wg->peer_list, peer_list) 412 wg_socket_clear_peer_endpoint_src(peer); 413 mutex_unlock(&wg->device_update_lock); 414 } 415 } 416 rtnl_unlock(); 417 } 418 419 static struct pernet_operations pernet_ops = { 420 .pre_exit = wg_netns_pre_exit 421 }; 422 423 int __init wg_device_init(void) 424 { 425 int ret; 426 427 #ifdef CONFIG_PM_SLEEP 428 ret = register_pm_notifier(&pm_notifier); 429 if (ret) 430 return ret; 431 #endif 432 433 ret = register_pernet_device(&pernet_ops); 434 if (ret) 435 goto error_pm; 436 437 ret = rtnl_link_register(&link_ops); 438 if (ret) 439 goto error_pernet; 440 441 return 0; 442 443 error_pernet: 444 unregister_pernet_device(&pernet_ops); 445 error_pm: 446 #ifdef CONFIG_PM_SLEEP 447 unregister_pm_notifier(&pm_notifier); 448 #endif 449 return ret; 450 } 451 452 void wg_device_uninit(void) 453 { 454 rtnl_link_unregister(&link_ops); 455 unregister_pernet_device(&pernet_ops); 456 #ifdef CONFIG_PM_SLEEP 457 unregister_pm_notifier(&pm_notifier); 458 #endif 459 rcu_barrier(); 460 } 461