1*d3bec013SDavid Verbeiren // SPDX-License-Identifier: GPL-2.0-only
2*d3bec013SDavid Verbeiren /* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
3*d3bec013SDavid Verbeiren
4*d3bec013SDavid Verbeiren #include <test_progs.h>
5*d3bec013SDavid Verbeiren #include "test_map_init.skel.h"
6*d3bec013SDavid Verbeiren
7*d3bec013SDavid Verbeiren #define TEST_VALUE 0x1234
8*d3bec013SDavid Verbeiren #define FILL_VALUE 0xdeadbeef
9*d3bec013SDavid Verbeiren
10*d3bec013SDavid Verbeiren static int nr_cpus;
11*d3bec013SDavid Verbeiren static int duration;
12*d3bec013SDavid Verbeiren
13*d3bec013SDavid Verbeiren typedef unsigned long long map_key_t;
14*d3bec013SDavid Verbeiren typedef unsigned long long map_value_t;
15*d3bec013SDavid Verbeiren typedef struct {
16*d3bec013SDavid Verbeiren map_value_t v; /* padding */
17*d3bec013SDavid Verbeiren } __bpf_percpu_val_align pcpu_map_value_t;
18*d3bec013SDavid Verbeiren
19*d3bec013SDavid Verbeiren
map_populate(int map_fd,int num)20*d3bec013SDavid Verbeiren static int map_populate(int map_fd, int num)
21*d3bec013SDavid Verbeiren {
22*d3bec013SDavid Verbeiren pcpu_map_value_t value[nr_cpus];
23*d3bec013SDavid Verbeiren int i, err;
24*d3bec013SDavid Verbeiren map_key_t key;
25*d3bec013SDavid Verbeiren
26*d3bec013SDavid Verbeiren for (i = 0; i < nr_cpus; i++)
27*d3bec013SDavid Verbeiren bpf_percpu(value, i) = FILL_VALUE;
28*d3bec013SDavid Verbeiren
29*d3bec013SDavid Verbeiren for (key = 1; key <= num; key++) {
30*d3bec013SDavid Verbeiren err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
31*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map_update_elem"))
32*d3bec013SDavid Verbeiren return -1;
33*d3bec013SDavid Verbeiren }
34*d3bec013SDavid Verbeiren
35*d3bec013SDavid Verbeiren return 0;
36*d3bec013SDavid Verbeiren }
37*d3bec013SDavid Verbeiren
setup(enum bpf_map_type map_type,int map_sz,int * map_fd,int populate)38*d3bec013SDavid Verbeiren static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
39*d3bec013SDavid Verbeiren int *map_fd, int populate)
40*d3bec013SDavid Verbeiren {
41*d3bec013SDavid Verbeiren struct test_map_init *skel;
42*d3bec013SDavid Verbeiren int err;
43*d3bec013SDavid Verbeiren
44*d3bec013SDavid Verbeiren skel = test_map_init__open();
45*d3bec013SDavid Verbeiren if (!ASSERT_OK_PTR(skel, "skel_open"))
46*d3bec013SDavid Verbeiren return NULL;
47*d3bec013SDavid Verbeiren
48*d3bec013SDavid Verbeiren err = bpf_map__set_type(skel->maps.hashmap1, map_type);
49*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map__set_type"))
50*d3bec013SDavid Verbeiren goto error;
51*d3bec013SDavid Verbeiren
52*d3bec013SDavid Verbeiren err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
53*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
54*d3bec013SDavid Verbeiren goto error;
55*d3bec013SDavid Verbeiren
56*d3bec013SDavid Verbeiren err = test_map_init__load(skel);
57*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "skel_load"))
58*d3bec013SDavid Verbeiren goto error;
59*d3bec013SDavid Verbeiren
60*d3bec013SDavid Verbeiren *map_fd = bpf_map__fd(skel->maps.hashmap1);
61*d3bec013SDavid Verbeiren if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
62*d3bec013SDavid Verbeiren goto error;
63*d3bec013SDavid Verbeiren
64*d3bec013SDavid Verbeiren err = map_populate(*map_fd, populate);
65*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "map_populate"))
66*d3bec013SDavid Verbeiren goto error_map;
67*d3bec013SDavid Verbeiren
68*d3bec013SDavid Verbeiren return skel;
69*d3bec013SDavid Verbeiren
70*d3bec013SDavid Verbeiren error_map:
71*d3bec013SDavid Verbeiren close(*map_fd);
72*d3bec013SDavid Verbeiren error:
73*d3bec013SDavid Verbeiren test_map_init__destroy(skel);
74*d3bec013SDavid Verbeiren return NULL;
75*d3bec013SDavid Verbeiren }
76*d3bec013SDavid Verbeiren
77*d3bec013SDavid Verbeiren /* executes bpf program that updates map with key, value */
prog_run_insert_elem(struct test_map_init * skel,map_key_t key,map_value_t value)78*d3bec013SDavid Verbeiren static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
79*d3bec013SDavid Verbeiren map_value_t value)
80*d3bec013SDavid Verbeiren {
81*d3bec013SDavid Verbeiren struct test_map_init__bss *bss;
82*d3bec013SDavid Verbeiren
83*d3bec013SDavid Verbeiren bss = skel->bss;
84*d3bec013SDavid Verbeiren
85*d3bec013SDavid Verbeiren bss->inKey = key;
86*d3bec013SDavid Verbeiren bss->inValue = value;
87*d3bec013SDavid Verbeiren bss->inPid = getpid();
88*d3bec013SDavid Verbeiren
89*d3bec013SDavid Verbeiren if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
90*d3bec013SDavid Verbeiren return -1;
91*d3bec013SDavid Verbeiren
92*d3bec013SDavid Verbeiren /* Let tracepoint trigger */
93*d3bec013SDavid Verbeiren syscall(__NR_getpgid);
94*d3bec013SDavid Verbeiren
95*d3bec013SDavid Verbeiren test_map_init__detach(skel);
96*d3bec013SDavid Verbeiren
97*d3bec013SDavid Verbeiren return 0;
98*d3bec013SDavid Verbeiren }
99*d3bec013SDavid Verbeiren
check_values_one_cpu(pcpu_map_value_t * value,map_value_t expected)100*d3bec013SDavid Verbeiren static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
101*d3bec013SDavid Verbeiren {
102*d3bec013SDavid Verbeiren int i, nzCnt = 0;
103*d3bec013SDavid Verbeiren map_value_t val;
104*d3bec013SDavid Verbeiren
105*d3bec013SDavid Verbeiren for (i = 0; i < nr_cpus; i++) {
106*d3bec013SDavid Verbeiren val = bpf_percpu(value, i);
107*d3bec013SDavid Verbeiren if (val) {
108*d3bec013SDavid Verbeiren if (CHECK(val != expected, "map value",
109*d3bec013SDavid Verbeiren "unexpected for cpu %d: 0x%llx\n", i, val))
110*d3bec013SDavid Verbeiren return -1;
111*d3bec013SDavid Verbeiren nzCnt++;
112*d3bec013SDavid Verbeiren }
113*d3bec013SDavid Verbeiren }
114*d3bec013SDavid Verbeiren
115*d3bec013SDavid Verbeiren if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
116*d3bec013SDavid Verbeiren nzCnt))
117*d3bec013SDavid Verbeiren return -1;
118*d3bec013SDavid Verbeiren
119*d3bec013SDavid Verbeiren return 0;
120*d3bec013SDavid Verbeiren }
121*d3bec013SDavid Verbeiren
122*d3bec013SDavid Verbeiren /* Add key=1 elem with values set for all CPUs
123*d3bec013SDavid Verbeiren * Delete elem key=1
124*d3bec013SDavid Verbeiren * Run bpf prog that inserts new key=1 elem with value=0x1234
125*d3bec013SDavid Verbeiren * (bpf prog can only set value for current CPU)
126*d3bec013SDavid Verbeiren * Lookup Key=1 and check value is as expected for all CPUs:
127*d3bec013SDavid Verbeiren * value set by bpf prog for one CPU, 0 for all others
128*d3bec013SDavid Verbeiren */
test_pcpu_map_init(void)129*d3bec013SDavid Verbeiren static void test_pcpu_map_init(void)
130*d3bec013SDavid Verbeiren {
131*d3bec013SDavid Verbeiren pcpu_map_value_t value[nr_cpus];
132*d3bec013SDavid Verbeiren struct test_map_init *skel;
133*d3bec013SDavid Verbeiren int map_fd, err;
134*d3bec013SDavid Verbeiren map_key_t key;
135*d3bec013SDavid Verbeiren
136*d3bec013SDavid Verbeiren /* max 1 elem in map so insertion is forced to reuse freed entry */
137*d3bec013SDavid Verbeiren skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
138*d3bec013SDavid Verbeiren if (!ASSERT_OK_PTR(skel, "prog_setup"))
139*d3bec013SDavid Verbeiren return;
140*d3bec013SDavid Verbeiren
141*d3bec013SDavid Verbeiren /* delete element so the entry can be re-used*/
142*d3bec013SDavid Verbeiren key = 1;
143*d3bec013SDavid Verbeiren err = bpf_map_delete_elem(map_fd, &key);
144*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map_delete_elem"))
145*d3bec013SDavid Verbeiren goto cleanup;
146*d3bec013SDavid Verbeiren
147*d3bec013SDavid Verbeiren /* run bpf prog that inserts new elem, re-using the slot just freed */
148*d3bec013SDavid Verbeiren err = prog_run_insert_elem(skel, key, TEST_VALUE);
149*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "prog_run_insert_elem"))
150*d3bec013SDavid Verbeiren goto cleanup;
151*d3bec013SDavid Verbeiren
152*d3bec013SDavid Verbeiren /* check that key=1 was re-created by bpf prog */
153*d3bec013SDavid Verbeiren err = bpf_map_lookup_elem(map_fd, &key, value);
154*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
155*d3bec013SDavid Verbeiren goto cleanup;
156*d3bec013SDavid Verbeiren
157*d3bec013SDavid Verbeiren /* and has expected values */
158*d3bec013SDavid Verbeiren check_values_one_cpu(value, TEST_VALUE);
159*d3bec013SDavid Verbeiren
160*d3bec013SDavid Verbeiren cleanup:
161*d3bec013SDavid Verbeiren test_map_init__destroy(skel);
162*d3bec013SDavid Verbeiren }
163*d3bec013SDavid Verbeiren
164*d3bec013SDavid Verbeiren /* Add key=1 and key=2 elems with values set for all CPUs
165*d3bec013SDavid Verbeiren * Run bpf prog that inserts new key=3 elem
166*d3bec013SDavid Verbeiren * (only for current cpu; other cpus should have initial value = 0)
167*d3bec013SDavid Verbeiren * Lookup Key=1 and check value is as expected for all CPUs
168*d3bec013SDavid Verbeiren */
test_pcpu_lru_map_init(void)169*d3bec013SDavid Verbeiren static void test_pcpu_lru_map_init(void)
170*d3bec013SDavid Verbeiren {
171*d3bec013SDavid Verbeiren pcpu_map_value_t value[nr_cpus];
172*d3bec013SDavid Verbeiren struct test_map_init *skel;
173*d3bec013SDavid Verbeiren int map_fd, err;
174*d3bec013SDavid Verbeiren map_key_t key;
175*d3bec013SDavid Verbeiren
176*d3bec013SDavid Verbeiren /* Set up LRU map with 2 elements, values filled for all CPUs.
177*d3bec013SDavid Verbeiren * With these 2 elements, the LRU map is full
178*d3bec013SDavid Verbeiren */
179*d3bec013SDavid Verbeiren skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
180*d3bec013SDavid Verbeiren if (!ASSERT_OK_PTR(skel, "prog_setup"))
181*d3bec013SDavid Verbeiren return;
182*d3bec013SDavid Verbeiren
183*d3bec013SDavid Verbeiren /* run bpf prog that inserts new key=3 element, re-using LRU slot */
184*d3bec013SDavid Verbeiren key = 3;
185*d3bec013SDavid Verbeiren err = prog_run_insert_elem(skel, key, TEST_VALUE);
186*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "prog_run_insert_elem"))
187*d3bec013SDavid Verbeiren goto cleanup;
188*d3bec013SDavid Verbeiren
189*d3bec013SDavid Verbeiren /* check that key=3 replaced one of earlier elements */
190*d3bec013SDavid Verbeiren err = bpf_map_lookup_elem(map_fd, &key, value);
191*d3bec013SDavid Verbeiren if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
192*d3bec013SDavid Verbeiren goto cleanup;
193*d3bec013SDavid Verbeiren
194*d3bec013SDavid Verbeiren /* and has expected values */
195*d3bec013SDavid Verbeiren check_values_one_cpu(value, TEST_VALUE);
196*d3bec013SDavid Verbeiren
197*d3bec013SDavid Verbeiren cleanup:
198*d3bec013SDavid Verbeiren test_map_init__destroy(skel);
199*d3bec013SDavid Verbeiren }
200*d3bec013SDavid Verbeiren
test_map_init(void)201*d3bec013SDavid Verbeiren void test_map_init(void)
202*d3bec013SDavid Verbeiren {
203*d3bec013SDavid Verbeiren nr_cpus = bpf_num_possible_cpus();
204*d3bec013SDavid Verbeiren if (nr_cpus <= 1) {
205*d3bec013SDavid Verbeiren printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
206*d3bec013SDavid Verbeiren test__skip();
207*d3bec013SDavid Verbeiren return;
208*d3bec013SDavid Verbeiren }
209*d3bec013SDavid Verbeiren
210*d3bec013SDavid Verbeiren if (test__start_subtest("pcpu_map_init"))
211*d3bec013SDavid Verbeiren test_pcpu_map_init();
212*d3bec013SDavid Verbeiren if (test__start_subtest("pcpu_lru_map_init"))
213*d3bec013SDavid Verbeiren test_pcpu_lru_map_init();
214*d3bec013SDavid Verbeiren }
215