1*f0a42ab5SHou Tao // SPDX-License-Identifier: GPL-2.0
2*f0a42ab5SHou Tao /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3*f0a42ab5SHou Tao #include <vmlinux.h>
4*f0a42ab5SHou Tao #include <bpf/bpf_tracing.h>
5*f0a42ab5SHou Tao #include <bpf/bpf_helpers.h>
6*f0a42ab5SHou Tao 
7*f0a42ab5SHou Tao #include "bpf_experimental.h"
8*f0a42ab5SHou Tao #include "bpf_misc.h"
9*f0a42ab5SHou Tao 
10*f0a42ab5SHou Tao #ifndef ARRAY_SIZE
11*f0a42ab5SHou Tao #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
12*f0a42ab5SHou Tao #endif
13*f0a42ab5SHou Tao 
14*f0a42ab5SHou Tao struct generic_map_value {
15*f0a42ab5SHou Tao 	void *data;
16*f0a42ab5SHou Tao };
17*f0a42ab5SHou Tao 
18*f0a42ab5SHou Tao char _license[] SEC("license") = "GPL";
19*f0a42ab5SHou Tao 
20*f0a42ab5SHou Tao const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
21*f0a42ab5SHou Tao const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
22*f0a42ab5SHou Tao 
23*f0a42ab5SHou Tao int err = 0;
24*f0a42ab5SHou Tao int pid = 0;
25*f0a42ab5SHou Tao 
26*f0a42ab5SHou Tao #define DEFINE_ARRAY_WITH_KPTR(_size) \
27*f0a42ab5SHou Tao 	struct bin_data_##_size { \
28*f0a42ab5SHou Tao 		char data[_size - sizeof(void *)]; \
29*f0a42ab5SHou Tao 	}; \
30*f0a42ab5SHou Tao 	struct map_value_##_size { \
31*f0a42ab5SHou Tao 		struct bin_data_##_size __kptr * data; \
32*f0a42ab5SHou Tao 		/* To emit BTF info for bin_data_xx */ \
33*f0a42ab5SHou Tao 		struct bin_data_##_size not_used; \
34*f0a42ab5SHou Tao 	}; \
35*f0a42ab5SHou Tao 	struct { \
36*f0a42ab5SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
37*f0a42ab5SHou Tao 		__type(key, int); \
38*f0a42ab5SHou Tao 		__type(value, struct map_value_##_size); \
39*f0a42ab5SHou Tao 		__uint(max_entries, 128); \
40*f0a42ab5SHou Tao 	} array_##_size SEC(".maps");
41*f0a42ab5SHou Tao 
batch_alloc_free(struct bpf_map * map,unsigned int batch,unsigned int idx)42*f0a42ab5SHou Tao static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch,
43*f0a42ab5SHou Tao 					     unsigned int idx)
44*f0a42ab5SHou Tao {
45*f0a42ab5SHou Tao 	struct generic_map_value *value;
46*f0a42ab5SHou Tao 	unsigned int i, key;
47*f0a42ab5SHou Tao 	void *old, *new;
48*f0a42ab5SHou Tao 
49*f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
50*f0a42ab5SHou Tao 		key = i;
51*f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
52*f0a42ab5SHou Tao 		if (!value) {
53*f0a42ab5SHou Tao 			err = 1;
54*f0a42ab5SHou Tao 			return;
55*f0a42ab5SHou Tao 		}
56*f0a42ab5SHou Tao 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
57*f0a42ab5SHou Tao 		if (!new) {
58*f0a42ab5SHou Tao 			err = 2;
59*f0a42ab5SHou Tao 			return;
60*f0a42ab5SHou Tao 		}
61*f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
62*f0a42ab5SHou Tao 		if (old) {
63*f0a42ab5SHou Tao 			bpf_obj_drop(old);
64*f0a42ab5SHou Tao 			err = 3;
65*f0a42ab5SHou Tao 			return;
66*f0a42ab5SHou Tao 		}
67*f0a42ab5SHou Tao 	}
68*f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
69*f0a42ab5SHou Tao 		key = i;
70*f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
71*f0a42ab5SHou Tao 		if (!value) {
72*f0a42ab5SHou Tao 			err = 4;
73*f0a42ab5SHou Tao 			return;
74*f0a42ab5SHou Tao 		}
75*f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
76*f0a42ab5SHou Tao 		if (!old) {
77*f0a42ab5SHou Tao 			err = 5;
78*f0a42ab5SHou Tao 			return;
79*f0a42ab5SHou Tao 		}
80*f0a42ab5SHou Tao 		bpf_obj_drop(old);
81*f0a42ab5SHou Tao 	}
82*f0a42ab5SHou Tao }
83*f0a42ab5SHou Tao 
84*f0a42ab5SHou Tao #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
85*f0a42ab5SHou Tao 	batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx)
86*f0a42ab5SHou Tao 
87*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(8);
88*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(16);
89*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(32);
90*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(64);
91*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(96);
92*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(128);
93*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(192);
94*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(256);
95*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(512);
96*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(1024);
97*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(2048);
98*f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(4096);
99*f0a42ab5SHou Tao 
100*f0a42ab5SHou Tao SEC("fentry/" SYS_PREFIX "sys_nanosleep")
test_bpf_mem_alloc_free(void * ctx)101*f0a42ab5SHou Tao int test_bpf_mem_alloc_free(void *ctx)
102*f0a42ab5SHou Tao {
103*f0a42ab5SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
104*f0a42ab5SHou Tao 		return 0;
105*f0a42ab5SHou Tao 
106*f0a42ab5SHou Tao 	/* Alloc 128 8-bytes objects in batch to trigger refilling,
107*f0a42ab5SHou Tao 	 * then free 128 8-bytes objects in batch to trigger freeing.
108*f0a42ab5SHou Tao 	 */
109*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(8, 128, 0);
110*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(16, 128, 1);
111*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(32, 128, 2);
112*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(64, 128, 3);
113*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(96, 128, 4);
114*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(128, 128, 5);
115*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(192, 128, 6);
116*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(256, 128, 7);
117*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(512, 64, 8);
118*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(1024, 32, 9);
119*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(2048, 16, 10);
120*f0a42ab5SHou Tao 	CALL_BATCH_ALLOC_FREE(4096, 8, 11);
121*f0a42ab5SHou Tao 
122*f0a42ab5SHou Tao 	return 0;
123*f0a42ab5SHou Tao }
124