1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# Setup/topology:
5#
6#    NS1             NS2             NS3
7#   veth1 <---> veth2   veth3 <---> veth4 (the top route)
8#   veth5 <---> veth6   veth7 <---> veth8 (the bottom route)
9#
10#   each vethN gets IPv[4|6]_N address
11#
12#   IPv*_SRC = IPv*_1
13#   IPv*_DST = IPv*_4
14#
15#   all tests test pings from IPv*_SRC to IPv*_DST
16#
17#   by default, routes are configured to allow packets to go
18#   IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
19#
20#   a GRE device is installed in NS3 with IPv*_GRE, and
21#   NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
22#   (the bottom route)
23#
24# Tests:
25#
26#   1. routes NS2->IPv*_DST are brought down, so the only way a ping
27#      from IP*_SRC to IP*_DST can work is via IPv*_GRE
28#
29#   2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
30#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
31#
32#       ping: SRC->[encap at veth1:egress]->GRE:decap->DST
33#       ping replies go DST->SRC directly
34#
35#   2b. in an ingress test, a bpf LWT_IN program is installed on veth2
36#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
37#
38#       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
39#       ping replies go DST->SRC directly
40
41if [[ $EUID -ne 0 ]]; then
42	echo "This script must be run as root"
43	echo "FAIL"
44	exit 1
45fi
46
47readonly NS1="ns1-$(mktemp -u XXXXXX)"
48readonly NS2="ns2-$(mktemp -u XXXXXX)"
49readonly NS3="ns3-$(mktemp -u XXXXXX)"
50
51readonly IPv4_1="172.16.1.100"
52readonly IPv4_2="172.16.2.100"
53readonly IPv4_3="172.16.3.100"
54readonly IPv4_4="172.16.4.100"
55readonly IPv4_5="172.16.5.100"
56readonly IPv4_6="172.16.6.100"
57readonly IPv4_7="172.16.7.100"
58readonly IPv4_8="172.16.8.100"
59readonly IPv4_GRE="172.16.16.100"
60
61readonly IPv4_SRC=$IPv4_1
62readonly IPv4_DST=$IPv4_4
63
64readonly IPv6_1="fb01::1"
65readonly IPv6_2="fb02::1"
66readonly IPv6_3="fb03::1"
67readonly IPv6_4="fb04::1"
68readonly IPv6_5="fb05::1"
69readonly IPv6_6="fb06::1"
70readonly IPv6_7="fb07::1"
71readonly IPv6_8="fb08::1"
72readonly IPv6_GRE="fb10::1"
73
74readonly IPv6_SRC=$IPv6_1
75readonly IPv6_DST=$IPv6_4
76
77TEST_STATUS=0
78TESTS_SUCCEEDED=0
79TESTS_FAILED=0
80
81process_test_results()
82{
83	if [[ "${TEST_STATUS}" -eq 0 ]] ; then
84		echo "PASS"
85		TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
86	else
87		echo "FAIL"
88		TESTS_FAILED=$((TESTS_FAILED+1))
89	fi
90}
91
92print_test_summary_and_exit()
93{
94	echo "passed tests: ${TESTS_SUCCEEDED}"
95	echo "failed tests: ${TESTS_FAILED}"
96	if [ "${TESTS_FAILED}" -eq "0" ] ; then
97		exit 0
98	else
99		exit 1
100	fi
101}
102
103setup()
104{
105	set -e  # exit on error
106	TEST_STATUS=0
107
108	# create devices and namespaces
109	ip netns add "${NS1}"
110	ip netns add "${NS2}"
111	ip netns add "${NS3}"
112
113	ip link add veth1 type veth peer name veth2
114	ip link add veth3 type veth peer name veth4
115	ip link add veth5 type veth peer name veth6
116	ip link add veth7 type veth peer name veth8
117
118	ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
119	ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
120
121	ip link set veth1 netns ${NS1}
122	ip link set veth2 netns ${NS2}
123	ip link set veth3 netns ${NS2}
124	ip link set veth4 netns ${NS3}
125	ip link set veth5 netns ${NS1}
126	ip link set veth6 netns ${NS2}
127	ip link set veth7 netns ${NS2}
128	ip link set veth8 netns ${NS3}
129
130	# configure addesses: the top route (1-2-3-4)
131	ip -netns ${NS1}    addr add ${IPv4_1}/24  dev veth1
132	ip -netns ${NS2}    addr add ${IPv4_2}/24  dev veth2
133	ip -netns ${NS2}    addr add ${IPv4_3}/24  dev veth3
134	ip -netns ${NS3}    addr add ${IPv4_4}/24  dev veth4
135	ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
136	ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
137	ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
138	ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
139
140	# configure addresses: the bottom route (5-6-7-8)
141	ip -netns ${NS1}    addr add ${IPv4_5}/24  dev veth5
142	ip -netns ${NS2}    addr add ${IPv4_6}/24  dev veth6
143	ip -netns ${NS2}    addr add ${IPv4_7}/24  dev veth7
144	ip -netns ${NS3}    addr add ${IPv4_8}/24  dev veth8
145	ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
146	ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
147	ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
148	ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
149
150
151	ip -netns ${NS1} link set dev veth1 up
152	ip -netns ${NS2} link set dev veth2 up
153	ip -netns ${NS2} link set dev veth3 up
154	ip -netns ${NS3} link set dev veth4 up
155	ip -netns ${NS1} link set dev veth5 up
156	ip -netns ${NS2} link set dev veth6 up
157	ip -netns ${NS2} link set dev veth7 up
158	ip -netns ${NS3} link set dev veth8 up
159
160	# configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
161	# the bottom route to specific bottom addresses
162
163	# NS1
164	# top route
165	ip -netns ${NS1}    route add ${IPv4_2}/32  dev veth1
166	ip -netns ${NS1}    route add default dev veth1 via ${IPv4_2}  # go top by default
167	ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1
168	ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2}  # go top by default
169	# bottom route
170	ip -netns ${NS1}    route add ${IPv4_6}/32  dev veth5
171	ip -netns ${NS1}    route add ${IPv4_7}/32  dev veth5 via ${IPv4_6}
172	ip -netns ${NS1}    route add ${IPv4_8}/32  dev veth5 via ${IPv4_6}
173	ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5
174	ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6}
175	ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6}
176
177	# NS2
178	# top route
179	ip -netns ${NS2}    route add ${IPv4_1}/32  dev veth2
180	ip -netns ${NS2}    route add ${IPv4_4}/32  dev veth3
181	ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2
182	ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3
183	# bottom route
184	ip -netns ${NS2}    route add ${IPv4_5}/32  dev veth6
185	ip -netns ${NS2}    route add ${IPv4_8}/32  dev veth7
186	ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6
187	ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7
188
189	# NS3
190	# top route
191	ip -netns ${NS3}    route add ${IPv4_3}/32  dev veth4
192	ip -netns ${NS3}    route add ${IPv4_1}/32  dev veth4 via ${IPv4_3}
193	ip -netns ${NS3}    route add ${IPv4_2}/32  dev veth4 via ${IPv4_3}
194	ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
195	ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
196	ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
197	# bottom route
198	ip -netns ${NS3}    route add ${IPv4_7}/32  dev veth8
199	ip -netns ${NS3}    route add ${IPv4_5}/32  dev veth8 via ${IPv4_7}
200	ip -netns ${NS3}    route add ${IPv4_6}/32  dev veth8 via ${IPv4_7}
201	ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
202	ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
203	ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
204
205	# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
206	ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
207	ip -netns ${NS3} link set gre_dev up
208	ip -netns ${NS3} addr add ${IPv4_GRE} nodad dev gre_dev
209	ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
210	ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
211
212
213	# configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
214	ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
215	ip -netns ${NS3} link set gre6_dev up
216	ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
217	ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6}
218	ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8}
219
220	# rp_filter gets confused by what these tests are doing, so disable it
221	ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
222	ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
223	ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
224
225	sleep 1  # reduce flakiness
226	set +e
227}
228
229cleanup()
230{
231	ip netns del ${NS1} 2> /dev/null
232	ip netns del ${NS2} 2> /dev/null
233	ip netns del ${NS3} 2> /dev/null
234}
235
236trap cleanup EXIT
237
238remove_routes_to_gredev()
239{
240	ip -netns ${NS1} route del ${IPv4_GRE} dev veth5
241	ip -netns ${NS2} route del ${IPv4_GRE} dev veth7
242	ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5
243	ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7
244}
245
246add_unreachable_routes_to_gredev()
247{
248	ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32
249	ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32
250	ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128
251	ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128
252}
253
254test_ping()
255{
256	local readonly PROTO=$1
257	local readonly EXPECTED=$2
258	local RET=0
259
260	if [ "${PROTO}" == "IPv4" ] ; then
261		ip netns exec ${NS1} ping  -c 1 -W 1 -I ${IPv4_SRC} ${IPv4_DST} 2>&1 > /dev/null
262		RET=$?
263	elif [ "${PROTO}" == "IPv6" ] ; then
264		ip netns exec ${NS1} ping6 -c 1 -W 6 -I ${IPv6_SRC} ${IPv6_DST} 2>&1 > /dev/null
265		RET=$?
266	else
267		echo "    test_ping: unknown PROTO: ${PROTO}"
268		TEST_STATUS=1
269	fi
270
271	if [ "0" != "${RET}" ]; then
272		RET=1
273	fi
274
275	if [ "${EXPECTED}" != "${RET}" ] ; then
276		echo "    test_ping failed: expected: ${EXPECTED}; got ${RET}"
277		TEST_STATUS=1
278	fi
279}
280
281test_egress()
282{
283	local readonly ENCAP=$1
284	echo "starting egress ${ENCAP} encap test"
285	setup
286
287	# by default, pings work
288	test_ping IPv4 0
289	test_ping IPv6 0
290
291	# remove NS2->DST routes, ping fails
292	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
293	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
294	test_ping IPv4 1
295	test_ping IPv6 1
296
297	# install replacement routes (LWT/eBPF), pings succeed
298	if [ "${ENCAP}" == "IPv4" ] ; then
299		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
300		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre dev veth1
301	elif [ "${ENCAP}" == "IPv6" ] ; then
302		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
303		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj test_lwt_ip_encap.o sec encap_gre6 dev veth1
304	else
305		echo "    unknown encap ${ENCAP}"
306		TEST_STATUS=1
307	fi
308	test_ping IPv4 0
309	test_ping IPv6 0
310
311	# a negative test: remove routes to GRE devices: ping fails
312	remove_routes_to_gredev
313	test_ping IPv4 1
314	test_ping IPv6 1
315
316	# another negative test
317	add_unreachable_routes_to_gredev
318	test_ping IPv4 1
319	test_ping IPv6 1
320
321	cleanup
322	process_test_results
323}
324
325test_ingress()
326{
327	local readonly ENCAP=$1
328	echo "starting ingress ${ENCAP} encap test"
329	setup
330
331	# need to wait a bit for IPv6 to autoconf, otherwise
332	# ping6 sometimes fails with "unable to bind to address"
333
334	# by default, pings work
335	test_ping IPv4 0
336	test_ping IPv6 0
337
338	# remove NS2->DST routes, pings fail
339	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3
340	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3
341	test_ping IPv4 1
342	test_ping IPv6 1
343
344	# install replacement routes (LWT/eBPF), pings succeed
345	if [ "${ENCAP}" == "IPv4" ] ; then
346		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
347		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre dev veth2
348	elif [ "${ENCAP}" == "IPv6" ] ; then
349		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
350		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
351	else
352		echo "FAIL: unknown encap ${ENCAP}"
353	fi
354	test_ping IPv4 0
355	test_ping IPv6 0
356
357	# a negative test: remove routes to GRE devices: ping fails
358	remove_routes_to_gredev
359	test_ping IPv4 1
360	test_ping IPv6 1
361
362	# another negative test
363	add_unreachable_routes_to_gredev
364	test_ping IPv4 1
365	test_ping IPv6 1
366
367	cleanup
368	process_test_results
369}
370
371test_egress IPv4
372test_egress IPv6
373test_ingress IPv4
374test_ingress IPv6
375
376print_test_summary_and_exit
377