1#!/bin/bash 2# SPDX-License-Identifier: GPL-2.0 3# 4# Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 5# 6# This script tests the below topology: 7# 8# ┌─────────────────────┐ ┌──────────────────────────────────┐ ┌─────────────────────┐ 9# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ 10# │ │ │ │ │ │ 11# │┌────────┐ │ │ ┌────────┐ │ │ ┌────────┐│ 12# ││ wg0 │───────────┼───┼────────────│ lo │────────────┼───┼───────────│ wg0 ││ 13# │├────────┴──────────┐│ │ ┌───────┴────────┴────────┐ │ │┌──────────┴────────┤│ 14# ││192.168.241.1/24 ││ │ │(ns1) (ns2) │ │ ││192.168.241.2/24 ││ 15# ││fd00::1/24 ││ │ │127.0.0.1:1 127.0.0.1:2│ │ ││fd00::2/24 ││ 16# │└───────────────────┘│ │ │[::]:1 [::]:2 │ │ │└───────────────────┘│ 17# └─────────────────────┘ │ └─────────────────────────┘ │ └─────────────────────┘ 18# └──────────────────────────────────┘ 19# 20# After the topology is prepared we run a series of TCP/UDP iperf3 tests between the 21# wireguard peers in $ns1 and $ns2. Note that $ns0 is the endpoint for the wg0 22# interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further 23# details on how this is accomplished. 24set -e 25 26exec 3>&1 27export LANG=C 28export WG_HIDE_KEYS=never 29netns0="wg-test-$$-0" 30netns1="wg-test-$$-1" 31netns2="wg-test-$$-2" 32pretty() { echo -e "\x1b[32m\x1b[1m[+] ${1:+NS$1: }${2}\x1b[0m" >&3; } 33pp() { pretty "" "$*"; "$@"; } 34maybe_exec() { if [[ $BASHPID -eq $$ ]]; then "$@"; else exec "$@"; fi; } 35n0() { pretty 0 "$*"; maybe_exec ip netns exec $netns0 "$@"; } 36n1() { pretty 1 "$*"; maybe_exec ip netns exec $netns1 "$@"; } 37n2() { pretty 2 "$*"; maybe_exec ip netns exec $netns2 "$@"; } 38ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } 39ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } 40ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } 41sleep() { read -t "$1" -N 1 || true; } 42waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } 43waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } 44waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } 45 46cleanup() { 47 set +e 48 exec 2>/dev/null 49 printf "$orig_message_cost" > /proc/sys/net/core/message_cost 50 ip0 link del dev wg0 51 ip0 link del dev wg1 52 ip1 link del dev wg0 53 ip1 link del dev wg1 54 ip2 link del dev wg0 55 ip2 link del dev wg1 56 local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" 57 [[ -n $to_kill ]] && kill $to_kill 58 pp ip netns del $netns1 59 pp ip netns del $netns2 60 pp ip netns del $netns0 61 exit 62} 63 64orig_message_cost="$(< /proc/sys/net/core/message_cost)" 65trap cleanup EXIT 66printf 0 > /proc/sys/net/core/message_cost 67 68ip netns del $netns0 2>/dev/null || true 69ip netns del $netns1 2>/dev/null || true 70ip netns del $netns2 2>/dev/null || true 71pp ip netns add $netns0 72pp ip netns add $netns1 73pp ip netns add $netns2 74ip0 link set up dev lo 75 76ip0 link add dev wg0 type wireguard 77ip0 link set wg0 netns $netns1 78ip0 link add dev wg0 type wireguard 79ip0 link set wg0 netns $netns2 80key1="$(pp wg genkey)" 81key2="$(pp wg genkey)" 82key3="$(pp wg genkey)" 83key4="$(pp wg genkey)" 84pub1="$(pp wg pubkey <<<"$key1")" 85pub2="$(pp wg pubkey <<<"$key2")" 86pub3="$(pp wg pubkey <<<"$key3")" 87pub4="$(pp wg pubkey <<<"$key4")" 88psk="$(pp wg genpsk)" 89[[ -n $key1 && -n $key2 && -n $psk ]] 90 91configure_peers() { 92 ip1 addr add 192.168.241.1/24 dev wg0 93 ip1 addr add fd00::1/112 dev wg0 94 95 ip2 addr add 192.168.241.2/24 dev wg0 96 ip2 addr add fd00::2/112 dev wg0 97 98 n1 wg set wg0 \ 99 private-key <(echo "$key1") \ 100 listen-port 1 \ 101 peer "$pub2" \ 102 preshared-key <(echo "$psk") \ 103 allowed-ips 192.168.241.2/32,fd00::2/128 104 n2 wg set wg0 \ 105 private-key <(echo "$key2") \ 106 listen-port 2 \ 107 peer "$pub1" \ 108 preshared-key <(echo "$psk") \ 109 allowed-ips 192.168.241.1/32,fd00::1/128 110 111 ip1 link set up dev wg0 112 ip2 link set up dev wg0 113} 114configure_peers 115 116tests() { 117 # Ping over IPv4 118 n2 ping -c 10 -f -W 1 192.168.241.1 119 n1 ping -c 10 -f -W 1 192.168.241.2 120 121 # Ping over IPv6 122 n2 ping6 -c 10 -f -W 1 fd00::1 123 n1 ping6 -c 10 -f -W 1 fd00::2 124 125 # TCP over IPv4 126 n2 iperf3 -s -1 -B 192.168.241.2 & 127 waitiperf $netns2 $! 128 n1 iperf3 -Z -t 3 -c 192.168.241.2 129 130 # TCP over IPv6 131 n1 iperf3 -s -1 -B fd00::1 & 132 waitiperf $netns1 $! 133 n2 iperf3 -Z -t 3 -c fd00::1 134 135 # UDP over IPv4 136 n1 iperf3 -s -1 -B 192.168.241.1 & 137 waitiperf $netns1 $! 138 n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 139 140 # UDP over IPv6 141 n2 iperf3 -s -1 -B fd00::2 & 142 waitiperf $netns2 $! 143 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 144 145 # TCP over IPv4, in parallel 146 for max in 4 5 50; do 147 local pids=( ) 148 for ((i=0; i < max; ++i)) do 149 n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & 150 pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) 151 done 152 for ((i=0; i < max; ++i)) do 153 n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & 154 done 155 wait "${pids[@]}" 156 done 157} 158 159[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" 160big_mtu=$(( 34816 - 1500 + $orig_mtu )) 161 162# Test using IPv4 as outer transport 163n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 164n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 165# Before calling tests, we first make sure that the stats counters and timestamper are working 166n2 ping -c 10 -f -W 1 192.168.241.1 167{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip2 -stats link show dev wg0) 168(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) 169{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip1 -stats link show dev wg0) 170(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) 171read _ rx_bytes tx_bytes < <(n2 wg show wg0 transfer) 172(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) 173read _ rx_bytes tx_bytes < <(n1 wg show wg0 transfer) 174(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) 175read _ timestamp < <(n1 wg show wg0 latest-handshakes) 176(( timestamp != 0 )) 177 178tests 179ip1 link set wg0 mtu $big_mtu 180ip2 link set wg0 mtu $big_mtu 181tests 182 183ip1 link set wg0 mtu $orig_mtu 184ip2 link set wg0 mtu $orig_mtu 185 186# Test using IPv6 as outer transport 187n1 wg set wg0 peer "$pub2" endpoint [::1]:2 188n2 wg set wg0 peer "$pub1" endpoint [::1]:1 189tests 190ip1 link set wg0 mtu $big_mtu 191ip2 link set wg0 mtu $big_mtu 192tests 193 194# Test that route MTUs work with the padding 195ip1 link set wg0 mtu 1300 196ip2 link set wg0 mtu 1300 197n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 198n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 199n0 iptables -A INPUT -m length --length 1360 -j DROP 200n1 ip route add 192.168.241.2/32 dev wg0 mtu 1299 201n2 ip route add 192.168.241.1/32 dev wg0 mtu 1299 202n2 ping -c 1 -W 1 -s 1269 192.168.241.1 203n2 ip route delete 192.168.241.1/32 dev wg0 mtu 1299 204n1 ip route delete 192.168.241.2/32 dev wg0 mtu 1299 205n0 iptables -F INPUT 206 207ip1 link set wg0 mtu $orig_mtu 208ip2 link set wg0 mtu $orig_mtu 209 210# Test using IPv4 that roaming works 211ip0 -4 addr del 127.0.0.1/8 dev lo 212ip0 -4 addr add 127.212.121.99/8 dev lo 213n1 wg set wg0 listen-port 9999 214n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 215n1 ping6 -W 1 -c 1 fd00::2 216[[ $(n2 wg show wg0 endpoints) == "$pub1 127.212.121.99:9999" ]] 217 218# Test using IPv6 that roaming works 219n1 wg set wg0 listen-port 9998 220n1 wg set wg0 peer "$pub2" endpoint [::1]:2 221n1 ping -W 1 -c 1 192.168.241.2 222[[ $(n2 wg show wg0 endpoints) == "$pub1 [::1]:9998" ]] 223 224# Test that crypto-RP filter works 225n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 226exec 4< <(n1 ncat -l -u -p 1111) 227ncat_pid=$! 228waitncatudp $netns1 $ncat_pid 229n2 ncat -u 192.168.241.1 1111 <<<"X" 230read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] 231kill $ncat_pid 232more_specific_key="$(pp wg genkey | pp wg pubkey)" 233n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32 234n2 wg set wg0 listen-port 9997 235exec 4< <(n1 ncat -l -u -p 1111) 236ncat_pid=$! 237waitncatudp $netns1 $ncat_pid 238n2 ncat -u 192.168.241.1 1111 <<<"X" 239! read -r -N 1 -t 1 out <&4 || false 240kill $ncat_pid 241n1 wg set wg0 peer "$more_specific_key" remove 242[[ $(n1 wg show wg0 endpoints) == "$pub2 [::1]:9997" ]] 243 244# Test that we can change private keys keys and immediately handshake 245n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips 192.168.241.2/32 endpoint 127.0.0.1:2 246n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 247n1 ping -W 1 -c 1 192.168.241.2 248n1 wg set wg0 private-key <(echo "$key3") 249n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove 250n1 ping -W 1 -c 1 192.168.241.2 251n2 wg set wg0 peer "$pub3" remove 252 253# Test that we can route wg through wg 254ip1 addr flush dev wg0 255ip2 addr flush dev wg0 256ip1 addr add fd00::5:1/112 dev wg0 257ip2 addr add fd00::5:2/112 dev wg0 258n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 259n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 260ip1 link add wg1 type wireguard 261ip2 link add wg1 type wireguard 262ip1 addr add 192.168.241.1/24 dev wg1 263ip1 addr add fd00::1/112 dev wg1 264ip2 addr add 192.168.241.2/24 dev wg1 265ip2 addr add fd00::2/112 dev wg1 266ip1 link set mtu 1340 up dev wg1 267ip2 link set mtu 1340 up dev wg1 268n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 269n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 270tests 271# Try to set up a routing loop between the two namespaces 272ip1 link set netns $netns0 dev wg1 273ip0 addr add 192.168.241.1/24 dev wg1 274ip0 link set up dev wg1 275n0 ping -W 1 -c 1 192.168.241.2 276n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 277ip2 link del wg0 278ip2 link del wg1 279read _ _ tx_bytes_before < <(n0 wg show wg1 transfer) 280! n0 ping -W 1 -c 10 -f 192.168.241.2 || false 281sleep 1 282read _ _ tx_bytes_after < <(n0 wg show wg1 transfer) 283(( tx_bytes_after - tx_bytes_before < 70000 )) 284 285ip0 link del wg1 286ip1 link del wg0 287 288# Test using NAT. We now change the topology to this: 289# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ 290# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ 291# │ │ │ │ │ │ 292# │ ┌─────┐ ┌─────┐ │ │ ┌──────┐ ┌──────┐ │ │ ┌─────┐ ┌─────┐ │ 293# │ │ wg0 │─────────────│vethc│───────────┼────┼────│vethrc│ │vethrs│──────────────┼─────┼──│veths│────────────│ wg0 │ │ 294# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├──────┴─────────┐ ├──────┴────────────┐ │ │ ├─────┴──────────┐ ├─────┴──────────┐ │ 295# │ │192.168.241.1/24│ │192.168.1.100/24││ │ │192.168.1.1/24 │ │10.0.0.1/24 │ │ │ │10.0.0.100/24 │ │192.168.241.2/24│ │ 296# │ │fd00::1/24 │ │ ││ │ │ │ │SNAT:192.168.1.0/24│ │ │ │ │ │fd00::2/24 │ │ 297# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └───────────────────┘ │ │ └────────────────┘ └────────────────┘ │ 298# └────────────────────────────────────────┘ └────────────────────────────────────────────────┘ └────────────────────────────────────────┘ 299 300ip1 link add dev wg0 type wireguard 301ip2 link add dev wg0 type wireguard 302configure_peers 303 304ip0 link add vethrc type veth peer name vethc 305ip0 link add vethrs type veth peer name veths 306ip0 link set vethc netns $netns1 307ip0 link set veths netns $netns2 308ip0 link set vethrc up 309ip0 link set vethrs up 310ip0 addr add 192.168.1.1/24 dev vethrc 311ip0 addr add 10.0.0.1/24 dev vethrs 312ip1 addr add 192.168.1.100/24 dev vethc 313ip1 link set vethc up 314ip1 route add default via 192.168.1.1 315ip2 addr add 10.0.0.100/24 dev veths 316ip2 link set veths up 317waitiface $netns0 vethrc 318waitiface $netns0 vethrs 319waitiface $netns1 vethc 320waitiface $netns2 veths 321 322n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' 323n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout' 324n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream' 325n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1 326 327n1 wg set wg0 peer "$pub2" endpoint 10.0.0.100:2 persistent-keepalive 1 328n1 ping -W 1 -c 1 192.168.241.2 329n2 ping -W 1 -c 1 192.168.241.1 330[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] 331# Demonstrate n2 can still send packets to n1, since persistent-keepalive will prevent connection tracking entry from expiring (to see entries: `n0 conntrack -L`). 332pp sleep 3 333n2 ping -W 1 -c 1 192.168.241.1 334n1 wg set wg0 peer "$pub2" persistent-keepalive 0 335 336# Test that sk_bound_dev_if works 337n1 ping -I wg0 -c 1 -W 1 192.168.241.2 338# What about when the mark changes and the packet must be rerouted? 339n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 340n1 ping -c 1 -W 1 192.168.241.2 # First the boring case 341n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case 342n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 343 344# Test that onion routing works, even when it loops 345n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 346ip1 addr add 192.168.242.1/24 dev wg0 347ip2 link add wg1 type wireguard 348ip2 addr add 192.168.242.2/24 dev wg1 349n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 350ip2 link set wg1 up 351n1 ping -W 1 -c 1 192.168.242.2 352ip2 link del wg1 353n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 354! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel 355n1 wg set wg0 peer "$pub3" remove 356ip1 addr del 192.168.242.1/24 dev wg0 357 358# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. 359ip1 -6 addr add fc00::9/96 dev vethc 360ip1 -6 route add default via fc00::1 361ip2 -4 addr add 192.168.99.7/32 dev wg0 362ip2 -6 addr add abab::1111/128 dev wg0 363n1 wg set wg0 fwmark 51820 peer "$pub2" allowed-ips 192.168.99.7,abab::1111 364ip1 -6 route add default dev wg0 table 51820 365ip1 -6 rule add not fwmark 51820 table 51820 366ip1 -6 rule add table main suppress_prefixlength 0 367ip1 -4 route add default dev wg0 table 51820 368ip1 -4 rule add not fwmark 51820 table 51820 369ip1 -4 rule add table main suppress_prefixlength 0 370n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter' 371# Flood the pings instead of sending just one, to trigger routing table reference counting bugs. 372n1 ping -W 1 -c 100 -f 192.168.99.7 373n1 ping -W 1 -c 100 -f abab::1111 374 375# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route. 376n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2 377n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit. 378n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' 379ip0 -4 route add 192.168.241.1 via 10.0.0.100 380n2 wg set wg0 peer "$pub1" remove 381[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]] 382 383n0 iptables -t nat -F 384n0 iptables -t filter -F 385n2 iptables -t nat -F 386ip0 link del vethrc 387ip0 link del vethrs 388ip1 link del wg0 389ip2 link del wg0 390 391# Test that saddr routing is sticky but not too sticky, changing to this topology: 392# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────┐ 393# │ $ns1 namespace │ │ $ns2 namespace │ 394# │ │ │ │ 395# │ ┌─────┐ ┌─────┐ │ │ ┌─────┐ ┌─────┐ │ 396# │ │ wg0 │─────────────│veth1│───────────┼────┼──│veth2│────────────│ wg0 │ │ 397# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├─────┴──────────┐ ├─────┴──────────┐ │ 398# │ │192.168.241.1/24│ │10.0.0.1/24 ││ │ │10.0.0.2/24 │ │192.168.241.2/24│ │ 399# │ │fd00::1/24 │ │fd00:aa::1/96 ││ │ │fd00:aa::2/96 │ │fd00::2/24 │ │ 400# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └────────────────┘ │ 401# └────────────────────────────────────────┘ └────────────────────────────────────────┘ 402 403ip1 link add dev wg0 type wireguard 404ip2 link add dev wg0 type wireguard 405configure_peers 406ip1 link add veth1 type veth peer name veth2 407ip1 link set veth2 netns $netns2 408n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' 409n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' 410n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth1/accept_dad' 411n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth2/accept_dad' 412n1 bash -c 'printf 1 > /proc/sys/net/ipv4/conf/veth1/promote_secondaries' 413 414# First we check that we aren't overly sticky and can fall over to new IPs when old ones are removed 415ip1 addr add 10.0.0.1/24 dev veth1 416ip1 addr add fd00:aa::1/96 dev veth1 417ip2 addr add 10.0.0.2/24 dev veth2 418ip2 addr add fd00:aa::2/96 dev veth2 419ip1 link set veth1 up 420ip2 link set veth2 up 421waitiface $netns1 veth1 422waitiface $netns2 veth2 423n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 424n1 ping -W 1 -c 1 192.168.241.2 425ip1 addr add 10.0.0.10/24 dev veth1 426ip1 addr del 10.0.0.1/24 dev veth1 427n1 ping -W 1 -c 1 192.168.241.2 428n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 429n1 ping -W 1 -c 1 192.168.241.2 430ip1 addr add fd00:aa::10/96 dev veth1 431ip1 addr del fd00:aa::1/96 dev veth1 432n1 ping -W 1 -c 1 192.168.241.2 433 434# Now we show that we can successfully do reply to sender routing 435ip1 link set veth1 down 436ip2 link set veth2 down 437ip1 addr flush dev veth1 438ip2 addr flush dev veth2 439ip1 addr add 10.0.0.1/24 dev veth1 440ip1 addr add 10.0.0.2/24 dev veth1 441ip1 addr add fd00:aa::1/96 dev veth1 442ip1 addr add fd00:aa::2/96 dev veth1 443ip2 addr add 10.0.0.3/24 dev veth2 444ip2 addr add fd00:aa::3/96 dev veth2 445ip1 link set veth1 up 446ip2 link set veth2 up 447waitiface $netns1 veth1 448waitiface $netns2 veth2 449n2 wg set wg0 peer "$pub1" endpoint 10.0.0.1:1 450n2 ping -W 1 -c 1 192.168.241.1 451[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] 452n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 453n2 ping -W 1 -c 1 192.168.241.1 454[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::1]:1" ]] 455n2 wg set wg0 peer "$pub1" endpoint 10.0.0.2:1 456n2 ping -W 1 -c 1 192.168.241.1 457[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.2:1" ]] 458n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::2]:1 459n2 ping -W 1 -c 1 192.168.241.1 460[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::2]:1" ]] 461 462# What happens if the inbound destination address belongs to a different interface as the default route? 463ip1 link add dummy0 type dummy 464ip1 addr add 10.50.0.1/24 dev dummy0 465ip1 link set dummy0 up 466ip2 route add 10.50.0.0/24 dev veth2 467n2 wg set wg0 peer "$pub1" endpoint 10.50.0.1:1 468n2 ping -W 1 -c 1 192.168.241.1 469[[ $(n2 wg show wg0 endpoints) == "$pub1 10.50.0.1:1" ]] 470 471ip1 link del dummy0 472ip1 addr flush dev veth1 473ip2 addr flush dev veth2 474ip1 route flush dev veth1 475ip2 route flush dev veth2 476 477# Now we see what happens if another interface route takes precedence over an ongoing one 478ip1 link add veth3 type veth peer name veth4 479ip1 link set veth4 netns $netns2 480ip1 addr add 10.0.0.1/24 dev veth1 481ip2 addr add 10.0.0.2/24 dev veth2 482ip1 addr add 10.0.0.3/24 dev veth3 483ip1 link set veth1 up 484ip2 link set veth2 up 485ip1 link set veth3 up 486ip2 link set veth4 up 487waitiface $netns1 veth1 488waitiface $netns2 veth2 489waitiface $netns1 veth3 490waitiface $netns2 veth4 491ip1 route flush dev veth1 492ip1 route flush dev veth3 493ip1 route add 10.0.0.0/24 dev veth1 src 10.0.0.1 metric 2 494n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 495n1 ping -W 1 -c 1 192.168.241.2 496[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] 497ip1 route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1 498n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth1/rp_filter' 499n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth4/rp_filter' 500n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' 501n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' 502n1 ping -W 1 -c 1 192.168.241.2 503[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]] 504 505ip1 link del veth1 506ip1 link del veth3 507ip1 link del wg0 508ip2 link del wg0 509 510# We test that Netlink/IPC is working properly by doing things that usually cause split responses 511ip0 link add dev wg0 type wireguard 512config=( "[Interface]" "PrivateKey=$(wg genkey)" "[Peer]" "PublicKey=$(wg genkey)" ) 513for a in {1..255}; do 514 for b in {0..255}; do 515 config+=( "AllowedIPs=$a.$b.0.0/16,$a::$b/128" ) 516 done 517done 518n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") 519i=0 520for ip in $(n0 wg show wg0 allowed-ips); do 521 ((++i)) 522done 523((i == 255*256*2+1)) 524ip0 link del wg0 525ip0 link add dev wg0 type wireguard 526config=( "[Interface]" "PrivateKey=$(wg genkey)" ) 527for a in {1..40}; do 528 config+=( "[Peer]" "PublicKey=$(wg genkey)" ) 529 for b in {1..52}; do 530 config+=( "AllowedIPs=$a.$b.0.0/16" ) 531 done 532done 533n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") 534i=0 535while read -r line; do 536 j=0 537 for ip in $line; do 538 ((++j)) 539 done 540 ((j == 53)) 541 ((++i)) 542done < <(n0 wg show wg0 allowed-ips) 543((i == 40)) 544ip0 link del wg0 545ip0 link add wg0 type wireguard 546config=( ) 547for i in {1..29}; do 548 config+=( "[Peer]" "PublicKey=$(wg genkey)" ) 549done 550config+=( "[Peer]" "PublicKey=$(wg genkey)" "AllowedIPs=255.2.3.4/32,abcd::255/128" ) 551n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") 552n0 wg showconf wg0 > /dev/null 553ip0 link del wg0 554 555allowedips=( ) 556for i in {1..197}; do 557 allowedips+=( abcd::$i ) 558done 559saved_ifs="$IFS" 560IFS=, 561allowedips="${allowedips[*]}" 562IFS="$saved_ifs" 563ip0 link add wg0 type wireguard 564n0 wg set wg0 peer "$pub1" 565n0 wg set wg0 peer "$pub2" allowed-ips "$allowedips" 566{ 567 read -r pub allowedips 568 [[ $pub == "$pub1" && $allowedips == "(none)" ]] 569 read -r pub allowedips 570 [[ $pub == "$pub2" ]] 571 i=0 572 for _ in $allowedips; do 573 ((++i)) 574 done 575 ((i == 197)) 576} < <(n0 wg show wg0 allowed-ips) 577ip0 link del wg0 578 579! n0 wg show doesnotexist || false 580 581ip0 link add wg0 type wireguard 582n0 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") 583[[ $(n0 wg show wg0 private-key) == "$key1" ]] 584[[ $(n0 wg show wg0 preshared-keys) == "$pub2 $psk" ]] 585n0 wg set wg0 private-key /dev/null peer "$pub2" preshared-key /dev/null 586[[ $(n0 wg show wg0 private-key) == "(none)" ]] 587[[ $(n0 wg show wg0 preshared-keys) == "$pub2 (none)" ]] 588n0 wg set wg0 peer "$pub2" 589n0 wg set wg0 private-key <(echo "$key2") 590[[ $(n0 wg show wg0 public-key) == "$pub2" ]] 591[[ -z $(n0 wg show wg0 peers) ]] 592n0 wg set wg0 peer "$pub2" 593[[ -z $(n0 wg show wg0 peers) ]] 594n0 wg set wg0 private-key <(echo "$key1") 595n0 wg set wg0 peer "$pub2" 596[[ $(n0 wg show wg0 peers) == "$pub2" ]] 597n0 wg set wg0 private-key <(echo "/${key1:1}") 598[[ $(n0 wg show wg0 private-key) == "+${key1:1}" ]] 599n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.0.0/12,192.168.0.0/16 600n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 601n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 602n0 wg set wg0 peer "$pub2" allowed-ips ::/0 603n0 wg set wg0 peer "$pub2" remove 604for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do 605 n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111 606done 607[[ -n $(n0 wg show wg0 peers) ]] 608exec 4< <(n0 ncat -l -u -p 1111) 609ncat_pid=$! 610waitncatudp $netns0 $ncat_pid 611ip0 link set wg0 up 612! read -r -n 1 -t 2 <&4 || false 613kill $ncat_pid 614ip0 link del wg0 615 616# Ensure that dst_cache references don't outlive netns lifetime 617ip1 link add dev wg0 type wireguard 618ip2 link add dev wg0 type wireguard 619configure_peers 620ip1 link add veth1 type veth peer name veth2 621ip1 link set veth2 netns $netns2 622ip1 addr add fd00:aa::1/64 dev veth1 623ip2 addr add fd00:aa::2/64 dev veth2 624ip1 link set veth1 up 625ip2 link set veth2 up 626waitiface $netns1 veth1 627waitiface $netns2 veth2 628ip1 -6 route add default dev veth1 via fd00:aa::2 629ip2 -6 route add default dev veth2 via fd00:aa::1 630n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 631n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 632n1 ping6 -c 1 fd00::2 633pp ip netns delete $netns1 634pp ip netns delete $netns2 635pp ip netns add $netns1 636pp ip netns add $netns2 637 638# Ensure there aren't circular reference loops 639ip1 link add wg1 type wireguard 640ip2 link add wg2 type wireguard 641ip1 link set wg1 netns $netns2 642ip2 link set wg2 netns $netns1 643pp ip netns delete $netns1 644pp ip netns delete $netns2 645pp ip netns add $netns1 646pp ip netns add $netns2 647 648sleep 2 # Wait for cleanup and grace periods 649declare -A objects 650while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do 651 [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue 652 objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" 653done < /dev/kmsg 654alldeleted=1 655for object in "${!objects[@]}"; do 656 if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then 657 echo "Error: $object: merely ${objects["$object"]}" >&3 658 alldeleted=0 659 fi 660done 661[[ $alldeleted -eq 1 ]] 662pretty "" "Objects that were created were also destroyed." 663