xref: /openbmc/linux/arch/s390/kernel/dumpstack.c (revision e6badee9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Stack dumping functions
4  *
5  *  Copyright IBM Corp. 1999, 2013
6  */
7 
8 #include <linux/kallsyms.h>
9 #include <linux/hardirq.h>
10 #include <linux/kprobes.h>
11 #include <linux/utsname.h>
12 #include <linux/export.h>
13 #include <linux/kdebug.h>
14 #include <linux/ptrace.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task_stack.h>
20 #include <asm/processor.h>
21 #include <asm/debug.h>
22 #include <asm/dis.h>
23 #include <asm/ipl.h>
24 #include <asm/unwind.h>
25 
stack_type_name(enum stack_type type)26 const char *stack_type_name(enum stack_type type)
27 {
28 	switch (type) {
29 	case STACK_TYPE_TASK:
30 		return "task";
31 	case STACK_TYPE_IRQ:
32 		return "irq";
33 	case STACK_TYPE_NODAT:
34 		return "nodat";
35 	case STACK_TYPE_RESTART:
36 		return "restart";
37 	default:
38 		return "unknown";
39 	}
40 }
41 EXPORT_SYMBOL_GPL(stack_type_name);
42 
in_stack(unsigned long sp,struct stack_info * info,enum stack_type type,unsigned long stack)43 static inline bool in_stack(unsigned long sp, struct stack_info *info,
44 			    enum stack_type type, unsigned long stack)
45 {
46 	if (sp < stack || sp >= stack + THREAD_SIZE)
47 		return false;
48 	info->type = type;
49 	info->begin = stack;
50 	info->end = stack + THREAD_SIZE;
51 	return true;
52 }
53 
in_task_stack(unsigned long sp,struct task_struct * task,struct stack_info * info)54 static bool in_task_stack(unsigned long sp, struct task_struct *task,
55 			  struct stack_info *info)
56 {
57 	unsigned long stack = (unsigned long)task_stack_page(task);
58 
59 	return in_stack(sp, info, STACK_TYPE_TASK, stack);
60 }
61 
in_irq_stack(unsigned long sp,struct stack_info * info)62 static bool in_irq_stack(unsigned long sp, struct stack_info *info)
63 {
64 	unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET;
65 
66 	return in_stack(sp, info, STACK_TYPE_IRQ, stack);
67 }
68 
in_nodat_stack(unsigned long sp,struct stack_info * info)69 static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
70 {
71 	unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET;
72 
73 	return in_stack(sp, info, STACK_TYPE_NODAT, stack);
74 }
75 
in_mcck_stack(unsigned long sp,struct stack_info * info)76 static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
77 {
78 	unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET;
79 
80 	return in_stack(sp, info, STACK_TYPE_MCCK, stack);
81 }
82 
in_restart_stack(unsigned long sp,struct stack_info * info)83 static bool in_restart_stack(unsigned long sp, struct stack_info *info)
84 {
85 	unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET;
86 
87 	return in_stack(sp, info, STACK_TYPE_RESTART, stack);
88 }
89 
get_stack_info(unsigned long sp,struct task_struct * task,struct stack_info * info,unsigned long * visit_mask)90 int get_stack_info(unsigned long sp, struct task_struct *task,
91 		   struct stack_info *info, unsigned long *visit_mask)
92 {
93 	if (!sp)
94 		goto unknown;
95 
96 	/* Sanity check: ABI requires SP to be aligned 8 bytes. */
97 	if (sp & 0x7)
98 		goto unknown;
99 
100 	/* Check per-task stack */
101 	if (in_task_stack(sp, task, info))
102 		goto recursion_check;
103 
104 	if (task != current)
105 		goto unknown;
106 
107 	/* Check per-cpu stacks */
108 	if (!in_irq_stack(sp, info) &&
109 	    !in_nodat_stack(sp, info) &&
110 	    !in_restart_stack(sp, info) &&
111 	    !in_mcck_stack(sp, info))
112 		goto unknown;
113 
114 recursion_check:
115 	/*
116 	 * Make sure we don't iterate through any given stack more than once.
117 	 * If it comes up a second time then there's something wrong going on:
118 	 * just break out and report an unknown stack type.
119 	 */
120 	if (*visit_mask & (1UL << info->type))
121 		goto unknown;
122 	*visit_mask |= 1UL << info->type;
123 	return 0;
124 unknown:
125 	info->type = STACK_TYPE_UNKNOWN;
126 	return -EINVAL;
127 }
128 
show_stack(struct task_struct * task,unsigned long * stack,const char * loglvl)129 void show_stack(struct task_struct *task, unsigned long *stack,
130 		       const char *loglvl)
131 {
132 	struct unwind_state state;
133 
134 	printk("%sCall Trace:\n", loglvl);
135 	unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
136 		printk(state.reliable ? "%s [<%016lx>] %pSR \n" :
137 					"%s([<%016lx>] %pSR)\n",
138 		       loglvl, state.ip, (void *) state.ip);
139 	debug_show_held_locks(task ? : current);
140 }
141 
show_last_breaking_event(struct pt_regs * regs)142 static void show_last_breaking_event(struct pt_regs *regs)
143 {
144 	printk("Last Breaking-Event-Address:\n");
145 	printk(" [<%016lx>] ", regs->last_break);
146 	if (user_mode(regs)) {
147 		print_vma_addr(KERN_CONT, regs->last_break);
148 		pr_cont("\n");
149 	} else {
150 		pr_cont("%pSR\n", (void *)regs->last_break);
151 	}
152 }
153 
show_registers(struct pt_regs * regs)154 void show_registers(struct pt_regs *regs)
155 {
156 	struct psw_bits *psw = &psw_bits(regs->psw);
157 	char *mode;
158 
159 	mode = user_mode(regs) ? "User" : "Krnl";
160 	printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
161 	if (!user_mode(regs))
162 		pr_cont(" (%pSR)", (void *)regs->psw.addr);
163 	pr_cont("\n");
164 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
165 	       "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
166 	       psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
167 	pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
168 	printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
169 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
170 	printk("           %016lx %016lx %016lx %016lx\n",
171 	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
172 	printk("           %016lx %016lx %016lx %016lx\n",
173 	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
174 	printk("           %016lx %016lx %016lx %016lx\n",
175 	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
176 	show_code(regs);
177 }
178 
show_regs(struct pt_regs * regs)179 void show_regs(struct pt_regs *regs)
180 {
181 	show_regs_print_info(KERN_DEFAULT);
182 	show_registers(regs);
183 	/* Show stack backtrace if pt_regs is from kernel mode */
184 	if (!user_mode(regs))
185 		show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT);
186 	show_last_breaking_event(regs);
187 }
188 
189 static DEFINE_SPINLOCK(die_lock);
190 
die(struct pt_regs * regs,const char * str)191 void __noreturn die(struct pt_regs *regs, const char *str)
192 {
193 	static int die_counter;
194 
195 	oops_enter();
196 	lgr_info_log();
197 	debug_stop_all();
198 	console_verbose();
199 	spin_lock_irq(&die_lock);
200 	bust_spinlocks(1);
201 	printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
202 	       regs->int_code >> 17, ++die_counter);
203 #ifdef CONFIG_PREEMPT
204 	pr_cont("PREEMPT ");
205 #elif defined(CONFIG_PREEMPT_RT)
206 	pr_cont("PREEMPT_RT ");
207 #endif
208 	pr_cont("SMP ");
209 	if (debug_pagealloc_enabled())
210 		pr_cont("DEBUG_PAGEALLOC");
211 	pr_cont("\n");
212 	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
213 	print_modules();
214 	show_regs(regs);
215 	bust_spinlocks(0);
216 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
217 	spin_unlock_irq(&die_lock);
218 	if (in_interrupt())
219 		panic("Fatal exception in interrupt");
220 	if (panic_on_oops)
221 		panic("Fatal exception: panic_on_oops");
222 	oops_exit();
223 	make_task_dead(SIGSEGV);
224 }
225