1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Google */
3 
4 #include <test_progs.h>
5 #include <bpf/libbpf.h>
6 #include <bpf/btf.h>
7 #include "test_ksyms_btf.skel.h"
8 #include "test_ksyms_btf_null_check.skel.h"
9 #include "test_ksyms_weak.skel.h"
10 #include "test_ksyms_weak.lskel.h"
11 
12 static int duration;
13 
14 static void test_basic(void)
15 {
16 	__u64 runqueues_addr, bpf_prog_active_addr;
17 	__u32 this_rq_cpu;
18 	int this_bpf_prog_active;
19 	struct test_ksyms_btf *skel = NULL;
20 	struct test_ksyms_btf__data *data;
21 	int err;
22 
23 	err = kallsyms_find("runqueues", &runqueues_addr);
24 	if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
25 		return;
26 	if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n"))
27 		return;
28 
29 	err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr);
30 	if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
31 		return;
32 	if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n"))
33 		return;
34 
35 	skel = test_ksyms_btf__open_and_load();
36 	if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
37 		goto cleanup;
38 
39 	err = test_ksyms_btf__attach(skel);
40 	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
41 		goto cleanup;
42 
43 	/* trigger tracepoint */
44 	usleep(1);
45 
46 	data = skel->data;
47 	CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr",
48 	      "got %llu, exp %llu\n",
49 	      (unsigned long long)data->out__runqueues_addr,
50 	      (unsigned long long)runqueues_addr);
51 	CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr",
52 	      "got %llu, exp %llu\n",
53 	      (unsigned long long)data->out__bpf_prog_active_addr,
54 	      (unsigned long long)bpf_prog_active_addr);
55 
56 	CHECK(data->out__rq_cpu == -1, "rq_cpu",
57 	      "got %u, exp != -1\n", data->out__rq_cpu);
58 	CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active",
59 	      "got %d, exp >= 0\n", data->out__bpf_prog_active);
60 	CHECK(data->out__cpu_0_rq_cpu != 0, "cpu_rq(0)->cpu",
61 	      "got %u, exp 0\n", data->out__cpu_0_rq_cpu);
62 
63 	this_rq_cpu = data->out__this_rq_cpu;
64 	CHECK(this_rq_cpu != data->out__rq_cpu, "this_rq_cpu",
65 	      "got %u, exp %u\n", this_rq_cpu, data->out__rq_cpu);
66 
67 	this_bpf_prog_active = data->out__this_bpf_prog_active;
68 	CHECK(this_bpf_prog_active != data->out__bpf_prog_active, "this_bpf_prog_active",
69 	      "got %d, exp %d\n", this_bpf_prog_active,
70 	      data->out__bpf_prog_active);
71 
72 cleanup:
73 	test_ksyms_btf__destroy(skel);
74 }
75 
76 static void test_null_check(void)
77 {
78 	struct test_ksyms_btf_null_check *skel;
79 
80 	skel = test_ksyms_btf_null_check__open_and_load();
81 	CHECK(skel, "skel_open", "unexpected load of a prog missing null check\n");
82 
83 	test_ksyms_btf_null_check__destroy(skel);
84 }
85 
86 static void test_weak_syms(void)
87 {
88 	struct test_ksyms_weak *skel;
89 	struct test_ksyms_weak__data *data;
90 	int err;
91 
92 	skel = test_ksyms_weak__open_and_load();
93 	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
94 		return;
95 
96 	err = test_ksyms_weak__attach(skel);
97 	if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
98 		goto cleanup;
99 
100 	/* trigger tracepoint */
101 	usleep(1);
102 
103 	data = skel->data;
104 	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
105 	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
106 	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
107 	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
108 
109 cleanup:
110 	test_ksyms_weak__destroy(skel);
111 }
112 
113 static void test_weak_syms_lskel(void)
114 {
115 	struct test_ksyms_weak_lskel *skel;
116 	struct test_ksyms_weak_lskel__data *data;
117 	int err;
118 
119 	skel = test_ksyms_weak_lskel__open_and_load();
120 	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load"))
121 		return;
122 
123 	err = test_ksyms_weak_lskel__attach(skel);
124 	if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach"))
125 		goto cleanup;
126 
127 	/* trigger tracepoint */
128 	usleep(1);
129 
130 	data = skel->data;
131 	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
132 	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
133 	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
134 	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
135 
136 cleanup:
137 	test_ksyms_weak_lskel__destroy(skel);
138 }
139 
140 void test_ksyms_btf(void)
141 {
142 	int percpu_datasec;
143 	struct btf *btf;
144 
145 	btf = libbpf_find_kernel_btf();
146 	if (!ASSERT_OK_PTR(btf, "btf_exists"))
147 		return;
148 
149 	percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
150 						BTF_KIND_DATASEC);
151 	btf__free(btf);
152 	if (percpu_datasec < 0) {
153 		printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n",
154 		       __func__);
155 		test__skip();
156 		return;
157 	}
158 
159 	if (test__start_subtest("basic"))
160 		test_basic();
161 
162 	if (test__start_subtest("null_check"))
163 		test_null_check();
164 
165 	if (test__start_subtest("weak_ksyms"))
166 		test_weak_syms();
167 
168 	if (test__start_subtest("weak_ksyms_lskel"))
169 		test_weak_syms_lskel();
170 }
171