1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Test module for unwind_for_each_frame
4 */
5
6 #include <kunit/test.h>
7 #include <asm/unwind.h>
8 #include <linux/completion.h>
9 #include <linux/kallsyms.h>
10 #include <linux/kthread.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/timer.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/kprobes.h>
17 #include <linux/wait.h>
18 #include <asm/irq.h>
19
20 static struct kunit *current_test;
21
22 #define BT_BUF_SIZE (PAGE_SIZE * 4)
23
24 static bool force_bt;
25 module_param_named(backtrace, force_bt, bool, 0444);
26 MODULE_PARM_DESC(backtrace, "print backtraces for all tests");
27
28 /*
29 * To avoid printk line limit split backtrace by lines
30 */
print_backtrace(char * bt)31 static void print_backtrace(char *bt)
32 {
33 char *p;
34
35 while (true) {
36 p = strsep(&bt, "\n");
37 if (!p)
38 break;
39 kunit_err(current_test, "%s\n", p);
40 }
41 }
42
43 /*
44 * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result
45 * contains unwindme_func2 followed by unwindme_func1.
46 */
test_unwind(struct task_struct * task,struct pt_regs * regs,unsigned long sp)47 static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
48 unsigned long sp)
49 {
50 int frame_count, prev_is_func2, seen_func2_func1, seen_arch_rethook_trampoline;
51 const int max_frames = 128;
52 struct unwind_state state;
53 size_t bt_pos = 0;
54 int ret = 0;
55 char *bt;
56
57 bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC);
58 if (!bt) {
59 kunit_err(current_test, "failed to allocate backtrace buffer\n");
60 return -ENOMEM;
61 }
62 /* Unwind. */
63 frame_count = 0;
64 prev_is_func2 = 0;
65 seen_func2_func1 = 0;
66 seen_arch_rethook_trampoline = 0;
67 unwind_for_each_frame(&state, task, regs, sp) {
68 unsigned long addr = unwind_get_return_address(&state);
69 char sym[KSYM_SYMBOL_LEN];
70
71 if (frame_count++ == max_frames)
72 break;
73 if (state.reliable && !addr) {
74 kunit_err(current_test, "unwind state reliable but addr is 0\n");
75 ret = -EINVAL;
76 break;
77 }
78 sprint_symbol(sym, addr);
79 if (bt_pos < BT_BUF_SIZE) {
80 bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos,
81 state.reliable ? " [%-7s%px] %pSR\n" :
82 "([%-7s%px] %pSR)\n",
83 stack_type_name(state.stack_info.type),
84 (void *)state.sp, (void *)state.ip);
85 if (bt_pos >= BT_BUF_SIZE)
86 kunit_err(current_test, "backtrace buffer is too small\n");
87 }
88 frame_count += 1;
89 if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
90 seen_func2_func1 = 1;
91 prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
92 if (str_has_prefix(sym, "arch_rethook_trampoline+0x0/"))
93 seen_arch_rethook_trampoline = 1;
94 }
95
96 /* Check the results. */
97 if (unwind_error(&state)) {
98 kunit_err(current_test, "unwind error\n");
99 ret = -EINVAL;
100 }
101 if (!seen_func2_func1) {
102 kunit_err(current_test, "unwindme_func2 and unwindme_func1 not found\n");
103 ret = -EINVAL;
104 }
105 if (frame_count == max_frames) {
106 kunit_err(current_test, "Maximum number of frames exceeded\n");
107 ret = -EINVAL;
108 }
109 if (seen_arch_rethook_trampoline) {
110 kunit_err(current_test, "arch_rethook_trampoline+0x0 in unwinding results\n");
111 ret = -EINVAL;
112 }
113 if (ret || force_bt)
114 print_backtrace(bt);
115 kfree(bt);
116 return ret;
117 }
118
119 /* State of the task being unwound. */
120 struct unwindme {
121 int flags;
122 int ret;
123 struct task_struct *task;
124 struct completion task_ready;
125 wait_queue_head_t task_wq;
126 unsigned long sp;
127 };
128
129 static struct unwindme *unwindme;
130
131 /* Values of unwindme.flags. */
132 #define UWM_DEFAULT 0x0
133 #define UWM_THREAD 0x1 /* Unwind a separate task. */
134 #define UWM_REGS 0x2 /* Pass regs to test_unwind(). */
135 #define UWM_SP 0x4 /* Pass sp to test_unwind(). */
136 #define UWM_CALLER 0x8 /* Unwind starting from caller. */
137 #define UWM_SWITCH_STACK 0x10 /* Use call_on_stack. */
138 #define UWM_IRQ 0x20 /* Unwind from irq context. */
139 #define UWM_PGM 0x40 /* Unwind from program check handler */
140 #define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */
141 #define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */
142 #define UWM_KRETPROBE 0x200 /* Unwind through kretprobed function. */
143 #define UWM_KRETPROBE_HANDLER 0x400 /* Unwind from kretprobe handler. */
144
fake_pt_regs(void)145 static __always_inline struct pt_regs fake_pt_regs(void)
146 {
147 struct pt_regs regs;
148
149 memset(®s, 0, sizeof(regs));
150 regs.gprs[15] = current_stack_pointer;
151
152 asm volatile(
153 "basr %[psw_addr],0\n"
154 : [psw_addr] "=d" (regs.psw.addr));
155 return regs;
156 }
157
kretprobe_ret_handler(struct kretprobe_instance * ri,struct pt_regs * regs)158 static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
159 {
160 struct unwindme *u = unwindme;
161
162 if (!(u->flags & UWM_KRETPROBE_HANDLER))
163 return 0;
164
165 u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
166 (u->flags & UWM_SP) ? u->sp : 0);
167
168 return 0;
169 }
170
test_unwind_kretprobed_func(struct unwindme * u)171 static noinline notrace int test_unwind_kretprobed_func(struct unwindme *u)
172 {
173 struct pt_regs regs;
174
175 if (!(u->flags & UWM_KRETPROBE))
176 return 0;
177
178 regs = fake_pt_regs();
179 return test_unwind(NULL, (u->flags & UWM_REGS) ? ®s : NULL,
180 (u->flags & UWM_SP) ? u->sp : 0);
181 }
182
test_unwind_kretprobed_func_caller(struct unwindme * u)183 static noinline int test_unwind_kretprobed_func_caller(struct unwindme *u)
184 {
185 return test_unwind_kretprobed_func(u);
186 }
187
test_unwind_kretprobe(struct unwindme * u)188 static int test_unwind_kretprobe(struct unwindme *u)
189 {
190 int ret;
191 struct kretprobe my_kretprobe;
192
193 if (!IS_ENABLED(CONFIG_KPROBES))
194 kunit_skip(current_test, "requires CONFIG_KPROBES");
195
196 u->ret = -1; /* make sure kprobe is called */
197 unwindme = u;
198
199 memset(&my_kretprobe, 0, sizeof(my_kretprobe));
200 my_kretprobe.handler = kretprobe_ret_handler;
201 my_kretprobe.maxactive = 1;
202 my_kretprobe.kp.addr = (kprobe_opcode_t *)test_unwind_kretprobed_func;
203
204 ret = register_kretprobe(&my_kretprobe);
205
206 if (ret < 0) {
207 kunit_err(current_test, "register_kretprobe failed %d\n", ret);
208 return -EINVAL;
209 }
210
211 ret = test_unwind_kretprobed_func_caller(u);
212 unregister_kretprobe(&my_kretprobe);
213 unwindme = NULL;
214 if (u->flags & UWM_KRETPROBE_HANDLER)
215 ret = u->ret;
216 return ret;
217 }
218
kprobe_pre_handler(struct kprobe * p,struct pt_regs * regs)219 static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs)
220 {
221 struct unwindme *u = unwindme;
222
223 u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
224 (u->flags & UWM_SP) ? u->sp : 0);
225 return 0;
226 }
227
228 extern const char test_unwind_kprobed_insn[];
229
test_unwind_kprobed_func(void)230 static noinline void test_unwind_kprobed_func(void)
231 {
232 asm volatile(
233 " nopr %%r7\n"
234 "test_unwind_kprobed_insn:\n"
235 " nopr %%r7\n"
236 :);
237 }
238
test_unwind_kprobe(struct unwindme * u)239 static int test_unwind_kprobe(struct unwindme *u)
240 {
241 struct kprobe kp;
242 int ret;
243
244 if (!IS_ENABLED(CONFIG_KPROBES))
245 kunit_skip(current_test, "requires CONFIG_KPROBES");
246 if (!IS_ENABLED(CONFIG_KPROBES_ON_FTRACE) && u->flags & UWM_KPROBE_ON_FTRACE)
247 kunit_skip(current_test, "requires CONFIG_KPROBES_ON_FTRACE");
248
249 u->ret = -1; /* make sure kprobe is called */
250 unwindme = u;
251 memset(&kp, 0, sizeof(kp));
252 kp.pre_handler = kprobe_pre_handler;
253 kp.addr = u->flags & UWM_KPROBE_ON_FTRACE ?
254 (kprobe_opcode_t *)test_unwind_kprobed_func :
255 (kprobe_opcode_t *)test_unwind_kprobed_insn;
256 ret = register_kprobe(&kp);
257 if (ret < 0) {
258 kunit_err(current_test, "register_kprobe failed %d\n", ret);
259 return -EINVAL;
260 }
261
262 test_unwind_kprobed_func();
263 unregister_kprobe(&kp);
264 unwindme = NULL;
265 return u->ret;
266 }
267
test_unwind_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * fops,struct ftrace_regs * fregs)268 static void notrace __used test_unwind_ftrace_handler(unsigned long ip,
269 unsigned long parent_ip,
270 struct ftrace_ops *fops,
271 struct ftrace_regs *fregs)
272 {
273 struct unwindme *u = (struct unwindme *)fregs->regs.gprs[2];
274
275 u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &fregs->regs : NULL,
276 (u->flags & UWM_SP) ? u->sp : 0);
277 }
278
test_unwind_ftraced_func(struct unwindme * u)279 static noinline int test_unwind_ftraced_func(struct unwindme *u)
280 {
281 return READ_ONCE(u)->ret;
282 }
283
test_unwind_ftrace(struct unwindme * u)284 static int test_unwind_ftrace(struct unwindme *u)
285 {
286 int ret;
287 #ifdef CONFIG_DYNAMIC_FTRACE
288 struct ftrace_ops *fops;
289
290 fops = kunit_kzalloc(current_test, sizeof(*fops), GFP_KERNEL);
291 fops->func = test_unwind_ftrace_handler;
292 fops->flags = FTRACE_OPS_FL_DYNAMIC |
293 FTRACE_OPS_FL_RECURSION |
294 FTRACE_OPS_FL_SAVE_REGS |
295 FTRACE_OPS_FL_PERMANENT;
296 #else
297 kunit_skip(current_test, "requires CONFIG_DYNAMIC_FTRACE");
298 #endif
299
300 ret = ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 0, 0);
301 if (ret) {
302 kunit_err(current_test, "failed to set ftrace filter (%d)\n", ret);
303 return -1;
304 }
305
306 ret = register_ftrace_function(fops);
307 if (!ret) {
308 ret = test_unwind_ftraced_func(u);
309 unregister_ftrace_function(fops);
310 } else {
311 kunit_err(current_test, "failed to register ftrace handler (%d)\n", ret);
312 }
313
314 ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 1, 0);
315 return ret;
316 }
317
318 /* This function may or may not appear in the backtrace. */
unwindme_func4(struct unwindme * u)319 static noinline int unwindme_func4(struct unwindme *u)
320 {
321 if (!(u->flags & UWM_CALLER))
322 u->sp = current_frame_address();
323 if (u->flags & UWM_THREAD) {
324 complete(&u->task_ready);
325 wait_event(u->task_wq, kthread_should_park());
326 kthread_parkme();
327 return 0;
328 } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) {
329 return test_unwind_kprobe(u);
330 } else if (u->flags & (UWM_KRETPROBE | UWM_KRETPROBE_HANDLER)) {
331 return test_unwind_kretprobe(u);
332 } else if (u->flags & UWM_FTRACE) {
333 return test_unwind_ftrace(u);
334 } else {
335 struct pt_regs regs = fake_pt_regs();
336
337 return test_unwind(NULL,
338 (u->flags & UWM_REGS) ? ®s : NULL,
339 (u->flags & UWM_SP) ? u->sp : 0);
340 }
341 }
342
343 /* This function may or may not appear in the backtrace. */
unwindme_func3(struct unwindme * u)344 static noinline int unwindme_func3(struct unwindme *u)
345 {
346 u->sp = current_frame_address();
347 return unwindme_func4(u);
348 }
349
350 /* This function must appear in the backtrace. */
unwindme_func2(struct unwindme * u)351 static noinline int unwindme_func2(struct unwindme *u)
352 {
353 unsigned long flags;
354 int rc;
355
356 if (u->flags & UWM_SWITCH_STACK) {
357 local_irq_save(flags);
358 local_mcck_disable();
359 rc = call_on_stack(1, S390_lowcore.nodat_stack,
360 int, unwindme_func3, struct unwindme *, u);
361 local_mcck_enable();
362 local_irq_restore(flags);
363 return rc;
364 } else {
365 return unwindme_func3(u);
366 }
367 }
368
369 /* This function must follow unwindme_func2 in the backtrace. */
unwindme_func1(void * u)370 static noinline int unwindme_func1(void *u)
371 {
372 return unwindme_func2((struct unwindme *)u);
373 }
374
unwindme_timer_fn(struct timer_list * unused)375 static void unwindme_timer_fn(struct timer_list *unused)
376 {
377 struct unwindme *u = READ_ONCE(unwindme);
378
379 if (u) {
380 unwindme = NULL;
381 u->task = NULL;
382 u->ret = unwindme_func1(u);
383 complete(&u->task_ready);
384 }
385 }
386
387 static struct timer_list unwind_timer;
388
test_unwind_irq(struct unwindme * u)389 static int test_unwind_irq(struct unwindme *u)
390 {
391 unwindme = u;
392 init_completion(&u->task_ready);
393 timer_setup(&unwind_timer, unwindme_timer_fn, 0);
394 mod_timer(&unwind_timer, jiffies + 1);
395 wait_for_completion(&u->task_ready);
396 return u->ret;
397 }
398
399 /* Spawns a task and passes it to test_unwind(). */
test_unwind_task(struct unwindme * u)400 static int test_unwind_task(struct unwindme *u)
401 {
402 struct task_struct *task;
403 int ret;
404
405 /* Initialize thread-related fields. */
406 init_completion(&u->task_ready);
407 init_waitqueue_head(&u->task_wq);
408
409 /*
410 * Start the task and wait until it reaches unwindme_func4() and sleeps
411 * in (task_ready, unwind_done] range.
412 */
413 task = kthread_run(unwindme_func1, u, "%s", __func__);
414 if (IS_ERR(task)) {
415 kunit_err(current_test, "kthread_run() failed\n");
416 return PTR_ERR(task);
417 }
418 /*
419 * Make sure task reaches unwindme_func4 before parking it,
420 * we might park it before kthread function has been executed otherwise
421 */
422 wait_for_completion(&u->task_ready);
423 kthread_park(task);
424 /* Unwind. */
425 ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0);
426 kthread_stop(task);
427 return ret;
428 }
429
430 struct test_params {
431 int flags;
432 char *name;
433 };
434
435 /*
436 * Create required parameter list for tests
437 */
438 #define TEST_WITH_FLAGS(f) { .flags = f, .name = #f }
439 static const struct test_params param_list[] = {
440 TEST_WITH_FLAGS(UWM_DEFAULT),
441 TEST_WITH_FLAGS(UWM_SP),
442 TEST_WITH_FLAGS(UWM_REGS),
443 TEST_WITH_FLAGS(UWM_SWITCH_STACK),
444 TEST_WITH_FLAGS(UWM_SP | UWM_REGS),
445 TEST_WITH_FLAGS(UWM_CALLER | UWM_SP),
446 TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS),
447 TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
448 TEST_WITH_FLAGS(UWM_THREAD),
449 TEST_WITH_FLAGS(UWM_THREAD | UWM_SP),
450 TEST_WITH_FLAGS(UWM_THREAD | UWM_CALLER | UWM_SP),
451 TEST_WITH_FLAGS(UWM_IRQ),
452 TEST_WITH_FLAGS(UWM_IRQ | UWM_SWITCH_STACK),
453 TEST_WITH_FLAGS(UWM_IRQ | UWM_SP),
454 TEST_WITH_FLAGS(UWM_IRQ | UWM_REGS),
455 TEST_WITH_FLAGS(UWM_IRQ | UWM_SP | UWM_REGS),
456 TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP),
457 TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS),
458 TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK),
459 TEST_WITH_FLAGS(UWM_PGM),
460 TEST_WITH_FLAGS(UWM_PGM | UWM_SP),
461 TEST_WITH_FLAGS(UWM_PGM | UWM_REGS),
462 TEST_WITH_FLAGS(UWM_PGM | UWM_SP | UWM_REGS),
463 TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE),
464 TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP),
465 TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_REGS),
466 TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP | UWM_REGS),
467 TEST_WITH_FLAGS(UWM_FTRACE),
468 TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP),
469 TEST_WITH_FLAGS(UWM_FTRACE | UWM_REGS),
470 TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP | UWM_REGS),
471 TEST_WITH_FLAGS(UWM_KRETPROBE),
472 TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP),
473 TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS),
474 TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS),
475 TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER),
476 TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP),
477 TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_REGS),
478 TEST_WITH_FLAGS(UWM_KRETPROBE_HANDLER | UWM_SP | UWM_REGS),
479 };
480
481 /*
482 * Parameter description generator: required for KUNIT_ARRAY_PARAM()
483 */
get_desc(const struct test_params * params,char * desc)484 static void get_desc(const struct test_params *params, char *desc)
485 {
486 strscpy(desc, params->name, KUNIT_PARAM_DESC_SIZE);
487 }
488
489 /*
490 * Create test_unwind_gen_params
491 */
492 KUNIT_ARRAY_PARAM(test_unwind, param_list, get_desc);
493
test_unwind_flags(struct kunit * test)494 static void test_unwind_flags(struct kunit *test)
495 {
496 struct unwindme u;
497 const struct test_params *params;
498
499 current_test = test;
500 params = (const struct test_params *)test->param_value;
501 u.flags = params->flags;
502 if (u.flags & UWM_THREAD)
503 KUNIT_EXPECT_EQ(test, 0, test_unwind_task(&u));
504 else if (u.flags & UWM_IRQ)
505 KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u));
506 else
507 KUNIT_EXPECT_EQ(test, 0, unwindme_func1(&u));
508 }
509
510 static struct kunit_case unwind_test_cases[] = {
511 KUNIT_CASE_PARAM(test_unwind_flags, test_unwind_gen_params),
512 {}
513 };
514
515 static struct kunit_suite test_unwind_suite = {
516 .name = "test_unwind",
517 .test_cases = unwind_test_cases,
518 };
519
520 kunit_test_suites(&test_unwind_suite);
521
522 MODULE_LICENSE("GPL");
523