1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3#
4# Setup/topology:
5#
6#    NS1             NS2             NS3
7#   veth1 <---> veth2   veth3 <---> veth4 (the top route)
8#   veth5 <---> veth6   veth7 <---> veth8 (the bottom route)
9#
10#   each vethN gets IPv[4|6]_N address
11#
12#   IPv*_SRC = IPv*_1
13#   IPv*_DST = IPv*_4
14#
15#   all tests test pings from IPv*_SRC to IPv*_DST
16#
17#   by default, routes are configured to allow packets to go
18#   IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
19#
20#   a GRE device is installed in NS3 with IPv*_GRE, and
21#   NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
22#   (the bottom route)
23#
24# Tests:
25#
26#   1. routes NS2->IPv*_DST are brought down, so the only way a ping
27#      from IP*_SRC to IP*_DST can work is via IPv*_GRE
28#
29#   2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
30#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
31#
32#       ping: SRC->[encap at veth1:egress]->GRE:decap->DST
33#       ping replies go DST->SRC directly
34#
35#   2b. in an ingress test, a bpf LWT_IN program is installed on veth2
36#       that encaps the packets with an IP/GRE header to route to IPv*_GRE
37#
38#       ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
39#       ping replies go DST->SRC directly
40
41if [[ $EUID -ne 0 ]]; then
42	echo "This script must be run as root"
43	echo "FAIL"
44	exit 1
45fi
46
47readonly NS1="ns1-$(mktemp -u XXXXXX)"
48readonly NS2="ns2-$(mktemp -u XXXXXX)"
49readonly NS3="ns3-$(mktemp -u XXXXXX)"
50
51readonly IPv4_1="172.16.1.100"
52readonly IPv4_2="172.16.2.100"
53readonly IPv4_3="172.16.3.100"
54readonly IPv4_4="172.16.4.100"
55readonly IPv4_5="172.16.5.100"
56readonly IPv4_6="172.16.6.100"
57readonly IPv4_7="172.16.7.100"
58readonly IPv4_8="172.16.8.100"
59readonly IPv4_GRE="172.16.16.100"
60
61readonly IPv4_SRC=$IPv4_1
62readonly IPv4_DST=$IPv4_4
63
64readonly IPv6_1="fb01::1"
65readonly IPv6_2="fb02::1"
66readonly IPv6_3="fb03::1"
67readonly IPv6_4="fb04::1"
68readonly IPv6_5="fb05::1"
69readonly IPv6_6="fb06::1"
70readonly IPv6_7="fb07::1"
71readonly IPv6_8="fb08::1"
72readonly IPv6_GRE="fb10::1"
73
74readonly IPv6_SRC=$IPv6_1
75readonly IPv6_DST=$IPv6_4
76
77TEST_STATUS=0
78TESTS_SUCCEEDED=0
79TESTS_FAILED=0
80
81TMPFILE=""
82
83process_test_results()
84{
85	if [[ "${TEST_STATUS}" -eq 0 ]] ; then
86		echo "PASS"
87		TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
88	else
89		echo "FAIL"
90		TESTS_FAILED=$((TESTS_FAILED+1))
91	fi
92}
93
94print_test_summary_and_exit()
95{
96	echo "passed tests: ${TESTS_SUCCEEDED}"
97	echo "failed tests: ${TESTS_FAILED}"
98	if [ "${TESTS_FAILED}" -eq "0" ] ; then
99		exit 0
100	else
101		exit 1
102	fi
103}
104
105setup()
106{
107	set -e  # exit on error
108	TEST_STATUS=0
109
110	# create devices and namespaces
111	ip netns add "${NS1}"
112	ip netns add "${NS2}"
113	ip netns add "${NS3}"
114
115	# rp_filter gets confused by what these tests are doing, so disable it
116	ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
117	ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
118	ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
119	ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
120	ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
121	ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
122
123	ip link add veth1 type veth peer name veth2
124	ip link add veth3 type veth peer name veth4
125	ip link add veth5 type veth peer name veth6
126	ip link add veth7 type veth peer name veth8
127
128	ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
129	ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
130
131	ip link set veth1 netns ${NS1}
132	ip link set veth2 netns ${NS2}
133	ip link set veth3 netns ${NS2}
134	ip link set veth4 netns ${NS3}
135	ip link set veth5 netns ${NS1}
136	ip link set veth6 netns ${NS2}
137	ip link set veth7 netns ${NS2}
138	ip link set veth8 netns ${NS3}
139
140	if [ ! -z "${VRF}" ] ; then
141		ip -netns ${NS1} link add red type vrf table 1001
142		ip -netns ${NS1} link set red up
143		ip -netns ${NS1} route add table 1001 unreachable default metric 8192
144		ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192
145		ip -netns ${NS1} link set veth1 vrf red
146		ip -netns ${NS1} link set veth5 vrf red
147
148		ip -netns ${NS2} link add red type vrf table 1001
149		ip -netns ${NS2} link set red up
150		ip -netns ${NS2} route add table 1001 unreachable default metric 8192
151		ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192
152		ip -netns ${NS2} link set veth2 vrf red
153		ip -netns ${NS2} link set veth3 vrf red
154		ip -netns ${NS2} link set veth6 vrf red
155		ip -netns ${NS2} link set veth7 vrf red
156	fi
157
158	# configure addesses: the top route (1-2-3-4)
159	ip -netns ${NS1}    addr add ${IPv4_1}/24  dev veth1
160	ip -netns ${NS2}    addr add ${IPv4_2}/24  dev veth2
161	ip -netns ${NS2}    addr add ${IPv4_3}/24  dev veth3
162	ip -netns ${NS3}    addr add ${IPv4_4}/24  dev veth4
163	ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
164	ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
165	ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
166	ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
167
168	# configure addresses: the bottom route (5-6-7-8)
169	ip -netns ${NS1}    addr add ${IPv4_5}/24  dev veth5
170	ip -netns ${NS2}    addr add ${IPv4_6}/24  dev veth6
171	ip -netns ${NS2}    addr add ${IPv4_7}/24  dev veth7
172	ip -netns ${NS3}    addr add ${IPv4_8}/24  dev veth8
173	ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
174	ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
175	ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
176	ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
177
178	ip -netns ${NS1} link set dev veth1 up
179	ip -netns ${NS2} link set dev veth2 up
180	ip -netns ${NS2} link set dev veth3 up
181	ip -netns ${NS3} link set dev veth4 up
182	ip -netns ${NS1} link set dev veth5 up
183	ip -netns ${NS2} link set dev veth6 up
184	ip -netns ${NS2} link set dev veth7 up
185	ip -netns ${NS3} link set dev veth8 up
186
187	# configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
188	# the bottom route to specific bottom addresses
189
190	# NS1
191	# top route
192	ip -netns ${NS1}    route add ${IPv4_2}/32  dev veth1 ${VRF}
193	ip -netns ${NS1}    route add default dev veth1 via ${IPv4_2} ${VRF}  # go top by default
194	ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF}
195	ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF}  # go top by default
196	# bottom route
197	ip -netns ${NS1}    route add ${IPv4_6}/32  dev veth5 ${VRF}
198	ip -netns ${NS1}    route add ${IPv4_7}/32  dev veth5 via ${IPv4_6} ${VRF}
199	ip -netns ${NS1}    route add ${IPv4_8}/32  dev veth5 via ${IPv4_6} ${VRF}
200	ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF}
201	ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF}
202	ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF}
203
204	# NS2
205	# top route
206	ip -netns ${NS2}    route add ${IPv4_1}/32  dev veth2 ${VRF}
207	ip -netns ${NS2}    route add ${IPv4_4}/32  dev veth3 ${VRF}
208	ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF}
209	ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF}
210	# bottom route
211	ip -netns ${NS2}    route add ${IPv4_5}/32  dev veth6 ${VRF}
212	ip -netns ${NS2}    route add ${IPv4_8}/32  dev veth7 ${VRF}
213	ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF}
214	ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF}
215
216	# NS3
217	# top route
218	ip -netns ${NS3}    route add ${IPv4_3}/32  dev veth4
219	ip -netns ${NS3}    route add ${IPv4_1}/32  dev veth4 via ${IPv4_3}
220	ip -netns ${NS3}    route add ${IPv4_2}/32  dev veth4 via ${IPv4_3}
221	ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
222	ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
223	ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
224	# bottom route
225	ip -netns ${NS3}    route add ${IPv4_7}/32  dev veth8
226	ip -netns ${NS3}    route add ${IPv4_5}/32  dev veth8 via ${IPv4_7}
227	ip -netns ${NS3}    route add ${IPv4_6}/32  dev veth8 via ${IPv4_7}
228	ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
229	ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
230	ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
231
232	# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
233	ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
234	ip -netns ${NS3} link set gre_dev up
235	ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
236	ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF}
237	ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF}
238
239
240	# configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
241	ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
242	ip -netns ${NS3} link set gre6_dev up
243	ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
244	ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
245	ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
246
247	TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
248
249	sleep 1  # reduce flakiness
250	set +e
251}
252
253cleanup()
254{
255	if [ -f ${TMPFILE} ] ; then
256		rm ${TMPFILE}
257	fi
258
259	ip netns del ${NS1} 2> /dev/null
260	ip netns del ${NS2} 2> /dev/null
261	ip netns del ${NS3} 2> /dev/null
262}
263
264trap cleanup EXIT
265
266remove_routes_to_gredev()
267{
268	ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF}
269	ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF}
270	ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF}
271	ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF}
272}
273
274add_unreachable_routes_to_gredev()
275{
276	ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF}
277	ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF}
278	ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
279	ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
280}
281
282test_ping()
283{
284	local readonly PROTO=$1
285	local readonly EXPECTED=$2
286	local RET=0
287
288	if [ "${PROTO}" == "IPv4" ] ; then
289		ip netns exec ${NS1} ping  -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
290		RET=$?
291	elif [ "${PROTO}" == "IPv6" ] ; then
292		ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
293		RET=$?
294	else
295		echo "    test_ping: unknown PROTO: ${PROTO}"
296		TEST_STATUS=1
297	fi
298
299	if [ "0" != "${RET}" ]; then
300		RET=1
301	fi
302
303	if [ "${EXPECTED}" != "${RET}" ] ; then
304		echo "    test_ping failed: expected: ${EXPECTED}; got ${RET}"
305		TEST_STATUS=1
306	fi
307}
308
309test_gso()
310{
311	local readonly PROTO=$1
312	local readonly PKT_SZ=5000
313	local IP_DST=""
314	: > ${TMPFILE}  # trim the capture file
315
316	# check that nc is present
317	command -v nc >/dev/null 2>&1 || \
318		{ echo >&2 "nc is not available: skipping TSO tests"; return; }
319
320	# listen on port 9000, capture TCP into $TMPFILE
321	if [ "${PROTO}" == "IPv4" ] ; then
322		IP_DST=${IPv4_DST}
323		ip netns exec ${NS3} bash -c \
324			"nc -4 -l -p 9000 > ${TMPFILE} &"
325	elif [ "${PROTO}" == "IPv6" ] ; then
326		IP_DST=${IPv6_DST}
327		ip netns exec ${NS3} bash -c \
328			"nc -6 -l -p 9000 > ${TMPFILE} &"
329		RET=$?
330	else
331		echo "    test_gso: unknown PROTO: ${PROTO}"
332		TEST_STATUS=1
333	fi
334	sleep 1  # let nc start listening
335
336	# send a packet larger than MTU
337	ip netns exec ${NS1} bash -c \
338		"dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
339	sleep 2 # let the packet get delivered
340
341	# verify we received all expected bytes
342	SZ=$(stat -c %s ${TMPFILE})
343	if [ "$SZ" != "$PKT_SZ" ] ; then
344		echo "    test_gso failed: ${PROTO}"
345		TEST_STATUS=1
346	fi
347}
348
349test_egress()
350{
351	local readonly ENCAP=$1
352	echo "starting egress ${ENCAP} encap test ${VRF}"
353	setup
354
355	# by default, pings work
356	test_ping IPv4 0
357	test_ping IPv6 0
358
359	# remove NS2->DST routes, ping fails
360	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3 ${VRF}
361	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
362	test_ping IPv4 1
363	test_ping IPv6 1
364
365	# install replacement routes (LWT/eBPF), pings succeed
366	if [ "${ENCAP}" == "IPv4" ] ; then
367		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
368			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
369		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
370			test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
371	elif [ "${ENCAP}" == "IPv6" ] ; then
372		ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
373			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
374		ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
375			test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
376	else
377		echo "    unknown encap ${ENCAP}"
378		TEST_STATUS=1
379	fi
380	test_ping IPv4 0
381	test_ping IPv6 0
382
383	# skip GSO tests with VRF: VRF routing needs properly assigned
384	# source IP/device, which is easy to do with ping and hard with dd/nc.
385	if [ -z "${VRF}" ] ; then
386		test_gso IPv4
387		test_gso IPv6
388	fi
389
390	# a negative test: remove routes to GRE devices: ping fails
391	remove_routes_to_gredev
392	test_ping IPv4 1
393	test_ping IPv6 1
394
395	# another negative test
396	add_unreachable_routes_to_gredev
397	test_ping IPv4 1
398	test_ping IPv6 1
399
400	cleanup
401	process_test_results
402}
403
404test_ingress()
405{
406	local readonly ENCAP=$1
407	echo "starting ingress ${ENCAP} encap test ${VRF}"
408	setup
409
410	# need to wait a bit for IPv6 to autoconf, otherwise
411	# ping6 sometimes fails with "unable to bind to address"
412
413	# by default, pings work
414	test_ping IPv4 0
415	test_ping IPv6 0
416
417	# remove NS2->DST routes, pings fail
418	ip -netns ${NS2}    route del ${IPv4_DST}/32  dev veth3 ${VRF}
419	ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
420	test_ping IPv4 1
421	test_ping IPv6 1
422
423	# install replacement routes (LWT/eBPF), pings succeed
424	if [ "${ENCAP}" == "IPv4" ] ; then
425		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
426			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
427		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
428			test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
429	elif [ "${ENCAP}" == "IPv6" ] ; then
430		ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
431			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
432		ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
433			test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
434	else
435		echo "FAIL: unknown encap ${ENCAP}"
436		TEST_STATUS=1
437	fi
438	test_ping IPv4 0
439	test_ping IPv6 0
440
441	# a negative test: remove routes to GRE devices: ping fails
442	remove_routes_to_gredev
443	test_ping IPv4 1
444	test_ping IPv6 1
445
446	# another negative test
447	add_unreachable_routes_to_gredev
448	test_ping IPv4 1
449	test_ping IPv6 1
450
451	cleanup
452	process_test_results
453}
454
455VRF=""
456test_egress IPv4
457test_egress IPv6
458test_ingress IPv4
459test_ingress IPv6
460
461VRF="vrf red"
462test_egress IPv4
463test_egress IPv6
464test_ingress IPv4
465test_ingress IPv6
466
467print_test_summary_and_exit
468