1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <sys/mman.h>
4 #include "test_mmap.skel.h"
5 
6 struct map_data {
7 	__u64 val[512 * 4];
8 };
9 
10 static size_t roundup_page(size_t sz)
11 {
12 	long page_size = sysconf(_SC_PAGE_SIZE);
13 	return (sz + page_size - 1) / page_size * page_size;
14 }
15 
16 void test_mmap(void)
17 {
18 	const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
19 	const size_t map_sz = roundup_page(sizeof(struct map_data));
20 	const int zero = 0, one = 1, two = 2, far = 1500;
21 	const long page_size = sysconf(_SC_PAGE_SIZE);
22 	int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
23 	struct bpf_map *data_map, *bss_map;
24 	void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
25 	struct test_mmap__bss *bss_data;
26 	struct bpf_map_info map_info;
27 	__u32 map_info_sz = sizeof(map_info);
28 	struct map_data *map_data;
29 	struct test_mmap *skel;
30 	__u64 val = 0;
31 
32 	skel = test_mmap__open_and_load();
33 	if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
34 		return;
35 
36 	bss_map = skel->maps.bss;
37 	data_map = skel->maps.data_map;
38 	data_map_fd = bpf_map__fd(data_map);
39 
40 	/* get map's ID */
41 	memset(&map_info, 0, map_info_sz);
42 	err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
43 	if (CHECK(err, "map_get_info", "failed %d\n", errno))
44 		goto cleanup;
45 	data_map_id = map_info.id;
46 
47 	/* mmap BSS map */
48 	bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
49 			  bpf_map__fd(bss_map), 0);
50 	if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
51 		  ".bss mmap failed: %d\n", errno)) {
52 		bss_mmaped = NULL;
53 		goto cleanup;
54 	}
55 	/* map as R/W first */
56 	map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
57 			  data_map_fd, 0);
58 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
59 		  "data_map mmap failed: %d\n", errno)) {
60 		map_mmaped = NULL;
61 		goto cleanup;
62 	}
63 
64 	bss_data = bss_mmaped;
65 	map_data = map_mmaped;
66 
67 	CHECK_FAIL(bss_data->in_val);
68 	CHECK_FAIL(bss_data->out_val);
69 	CHECK_FAIL(skel->bss->in_val);
70 	CHECK_FAIL(skel->bss->out_val);
71 	CHECK_FAIL(map_data->val[0]);
72 	CHECK_FAIL(map_data->val[1]);
73 	CHECK_FAIL(map_data->val[2]);
74 	CHECK_FAIL(map_data->val[far]);
75 
76 	err = test_mmap__attach(skel);
77 	if (CHECK(err, "attach_raw_tp", "err %d\n", err))
78 		goto cleanup;
79 
80 	bss_data->in_val = 123;
81 	val = 111;
82 	CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
83 
84 	usleep(1);
85 
86 	CHECK_FAIL(bss_data->in_val != 123);
87 	CHECK_FAIL(bss_data->out_val != 123);
88 	CHECK_FAIL(skel->bss->in_val != 123);
89 	CHECK_FAIL(skel->bss->out_val != 123);
90 	CHECK_FAIL(map_data->val[0] != 111);
91 	CHECK_FAIL(map_data->val[1] != 222);
92 	CHECK_FAIL(map_data->val[2] != 123);
93 	CHECK_FAIL(map_data->val[far] != 3 * 123);
94 
95 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
96 	CHECK_FAIL(val != 111);
97 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
98 	CHECK_FAIL(val != 222);
99 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
100 	CHECK_FAIL(val != 123);
101 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
102 	CHECK_FAIL(val != 3 * 123);
103 
104 	/* data_map freeze should fail due to R/W mmap() */
105 	err = bpf_map_freeze(data_map_fd);
106 	if (CHECK(!err || errno != EBUSY, "no_freeze",
107 		  "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
108 		goto cleanup;
109 
110 	err = mprotect(map_mmaped, map_sz, PROT_READ);
111 	if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
112 		goto cleanup;
113 
114 	/* unmap R/W mapping */
115 	err = munmap(map_mmaped, map_sz);
116 	map_mmaped = NULL;
117 	if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
118 		goto cleanup;
119 
120 	/* re-map as R/O now */
121 	map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
122 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
123 		  "data_map R/O mmap failed: %d\n", errno)) {
124 		map_mmaped = NULL;
125 		goto cleanup;
126 	}
127 	err = mprotect(map_mmaped, map_sz, PROT_WRITE);
128 	if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
129 		goto cleanup;
130 	err = mprotect(map_mmaped, map_sz, PROT_EXEC);
131 	if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
132 		goto cleanup;
133 	map_data = map_mmaped;
134 
135 	/* map/unmap in a loop to test ref counting */
136 	for (i = 0; i < 10; i++) {
137 		int flags = i % 2 ? PROT_READ : PROT_WRITE;
138 		void *p;
139 
140 		p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
141 		if (CHECK_FAIL(p == MAP_FAILED))
142 			goto cleanup;
143 		err = munmap(p, map_sz);
144 		if (CHECK_FAIL(err))
145 			goto cleanup;
146 	}
147 
148 	/* data_map freeze should now succeed due to no R/W mapping */
149 	err = bpf_map_freeze(data_map_fd);
150 	if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
151 		  err, errno))
152 		goto cleanup;
153 
154 	/* mapping as R/W now should fail */
155 	tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
156 		    data_map_fd, 0);
157 	if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
158 		munmap(tmp1, map_sz);
159 		goto cleanup;
160 	}
161 
162 	bss_data->in_val = 321;
163 	usleep(1);
164 	CHECK_FAIL(bss_data->in_val != 321);
165 	CHECK_FAIL(bss_data->out_val != 321);
166 	CHECK_FAIL(skel->bss->in_val != 321);
167 	CHECK_FAIL(skel->bss->out_val != 321);
168 	CHECK_FAIL(map_data->val[0] != 111);
169 	CHECK_FAIL(map_data->val[1] != 222);
170 	CHECK_FAIL(map_data->val[2] != 321);
171 	CHECK_FAIL(map_data->val[far] != 3 * 321);
172 
173 	/* check some more advanced mmap() manipulations */
174 
175 	/* map all but last page: pages 1-3 mapped */
176 	tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
177 			  data_map_fd, 0);
178 	if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
179 		goto cleanup;
180 
181 	/* unmap second page: pages 1, 3 mapped */
182 	err = munmap(tmp1 + page_size, page_size);
183 	if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
184 		munmap(tmp1, map_sz);
185 		goto cleanup;
186 	}
187 
188 	/* map page 2 back */
189 	tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
190 		    MAP_SHARED | MAP_FIXED, data_map_fd, 0);
191 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
192 		munmap(tmp1, page_size);
193 		munmap(tmp1 + 2*page_size, page_size);
194 		goto cleanup;
195 	}
196 	CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
197 	      "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
198 
199 	/* re-map all 4 pages */
200 	tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
201 		    data_map_fd, 0);
202 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
203 		munmap(tmp1, 3 * page_size); /* unmap page 1 */
204 		goto cleanup;
205 	}
206 	CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
207 
208 	map_data = tmp2;
209 	CHECK_FAIL(bss_data->in_val != 321);
210 	CHECK_FAIL(bss_data->out_val != 321);
211 	CHECK_FAIL(skel->bss->in_val != 321);
212 	CHECK_FAIL(skel->bss->out_val != 321);
213 	CHECK_FAIL(map_data->val[0] != 111);
214 	CHECK_FAIL(map_data->val[1] != 222);
215 	CHECK_FAIL(map_data->val[2] != 321);
216 	CHECK_FAIL(map_data->val[far] != 3 * 321);
217 
218 	munmap(tmp2, 4 * page_size);
219 
220 	tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
221 	if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
222 		goto cleanup;
223 
224 	test_mmap__destroy(skel);
225 	skel = NULL;
226 	CHECK_FAIL(munmap(bss_mmaped, bss_sz));
227 	bss_mmaped = NULL;
228 	CHECK_FAIL(munmap(map_mmaped, map_sz));
229 	map_mmaped = NULL;
230 
231 	/* map should be still held by active mmap */
232 	tmp_fd = bpf_map_get_fd_by_id(data_map_id);
233 	if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
234 		munmap(tmp1, map_sz);
235 		goto cleanup;
236 	}
237 	close(tmp_fd);
238 
239 	/* this should release data map finally */
240 	munmap(tmp1, map_sz);
241 
242 	/* we need to wait for RCU grace period */
243 	for (i = 0; i < 10000; i++) {
244 		__u32 id = data_map_id - 1;
245 		if (bpf_map_get_next_id(id, &id) || id > data_map_id)
246 			break;
247 		usleep(1);
248 	}
249 
250 	/* should fail to get map FD by non-existing ID */
251 	tmp_fd = bpf_map_get_fd_by_id(data_map_id);
252 	if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
253 		  "unexpectedly succeeded %d\n", tmp_fd)) {
254 		close(tmp_fd);
255 		goto cleanup;
256 	}
257 
258 cleanup:
259 	if (bss_mmaped)
260 		CHECK_FAIL(munmap(bss_mmaped, bss_sz));
261 	if (map_mmaped)
262 		CHECK_FAIL(munmap(map_mmaped, map_sz));
263 	test_mmap__destroy(skel);
264 }
265