1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Stack dumping functions 4 * 5 * Copyright IBM Corp. 1999, 2013 6 */ 7 8 #include <linux/kallsyms.h> 9 #include <linux/hardirq.h> 10 #include <linux/kprobes.h> 11 #include <linux/utsname.h> 12 #include <linux/export.h> 13 #include <linux/kdebug.h> 14 #include <linux/ptrace.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task_stack.h> 20 #include <asm/processor.h> 21 #include <asm/debug.h> 22 #include <asm/dis.h> 23 #include <asm/ipl.h> 24 #include <asm/unwind.h> 25 26 const char *stack_type_name(enum stack_type type) 27 { 28 switch (type) { 29 case STACK_TYPE_TASK: 30 return "task"; 31 case STACK_TYPE_IRQ: 32 return "irq"; 33 case STACK_TYPE_NODAT: 34 return "nodat"; 35 case STACK_TYPE_RESTART: 36 return "restart"; 37 default: 38 return "unknown"; 39 } 40 } 41 EXPORT_SYMBOL_GPL(stack_type_name); 42 43 static inline bool in_stack(unsigned long sp, struct stack_info *info, 44 enum stack_type type, unsigned long low, 45 unsigned long high) 46 { 47 if (sp < low || sp >= high) 48 return false; 49 info->type = type; 50 info->begin = low; 51 info->end = high; 52 return true; 53 } 54 55 static bool in_task_stack(unsigned long sp, struct task_struct *task, 56 struct stack_info *info) 57 { 58 unsigned long stack; 59 60 stack = (unsigned long) task_stack_page(task); 61 return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE); 62 } 63 64 static bool in_irq_stack(unsigned long sp, struct stack_info *info) 65 { 66 unsigned long frame_size, top; 67 68 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 69 top = S390_lowcore.async_stack + frame_size; 70 return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top); 71 } 72 73 static bool in_nodat_stack(unsigned long sp, struct stack_info *info) 74 { 75 unsigned long frame_size, top; 76 77 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 78 top = S390_lowcore.nodat_stack + frame_size; 79 return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top); 80 } 81 82 static bool in_restart_stack(unsigned long sp, struct stack_info *info) 83 { 84 unsigned long frame_size, top; 85 86 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 87 top = S390_lowcore.restart_stack + frame_size; 88 return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top); 89 } 90 91 int get_stack_info(unsigned long sp, struct task_struct *task, 92 struct stack_info *info, unsigned long *visit_mask) 93 { 94 if (!sp) 95 goto unknown; 96 97 /* Sanity check: ABI requires SP to be aligned 8 bytes. */ 98 if (sp & 0x7) 99 goto unknown; 100 101 /* Check per-task stack */ 102 if (in_task_stack(sp, task, info)) 103 goto recursion_check; 104 105 if (task != current) 106 goto unknown; 107 108 /* Check per-cpu stacks */ 109 if (!in_irq_stack(sp, info) && 110 !in_nodat_stack(sp, info) && 111 !in_restart_stack(sp, info)) 112 goto unknown; 113 114 recursion_check: 115 /* 116 * Make sure we don't iterate through any given stack more than once. 117 * If it comes up a second time then there's something wrong going on: 118 * just break out and report an unknown stack type. 119 */ 120 if (*visit_mask & (1UL << info->type)) 121 goto unknown; 122 *visit_mask |= 1UL << info->type; 123 return 0; 124 unknown: 125 info->type = STACK_TYPE_UNKNOWN; 126 return -EINVAL; 127 } 128 129 void show_stack(struct task_struct *task, unsigned long *stack) 130 { 131 struct unwind_state state; 132 133 printk("Call Trace:\n"); 134 unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) 135 printk(state.reliable ? " [<%016lx>] %pSR \n" : 136 "([<%016lx>] %pSR)\n", 137 state.ip, (void *) state.ip); 138 debug_show_held_locks(task ? : current); 139 } 140 141 static void show_last_breaking_event(struct pt_regs *regs) 142 { 143 printk("Last Breaking-Event-Address:\n"); 144 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); 145 } 146 147 void show_registers(struct pt_regs *regs) 148 { 149 struct psw_bits *psw = &psw_bits(regs->psw); 150 char *mode; 151 152 mode = user_mode(regs) ? "User" : "Krnl"; 153 printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 154 if (!user_mode(regs)) 155 pr_cont(" (%pSR)", (void *)regs->psw.addr); 156 pr_cont("\n"); 157 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 158 "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext, 159 psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm); 160 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); 161 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 162 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 163 printk(" %016lx %016lx %016lx %016lx\n", 164 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 165 printk(" %016lx %016lx %016lx %016lx\n", 166 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 167 printk(" %016lx %016lx %016lx %016lx\n", 168 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 169 show_code(regs); 170 } 171 172 void show_regs(struct pt_regs *regs) 173 { 174 show_regs_print_info(KERN_DEFAULT); 175 show_registers(regs); 176 /* Show stack backtrace if pt_regs is from kernel mode */ 177 if (!user_mode(regs)) 178 show_stack(NULL, (unsigned long *) regs->gprs[15]); 179 show_last_breaking_event(regs); 180 } 181 182 static DEFINE_SPINLOCK(die_lock); 183 184 void die(struct pt_regs *regs, const char *str) 185 { 186 static int die_counter; 187 188 oops_enter(); 189 lgr_info_log(); 190 debug_stop_all(); 191 console_verbose(); 192 spin_lock_irq(&die_lock); 193 bust_spinlocks(1); 194 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, 195 regs->int_code >> 17, ++die_counter); 196 #ifdef CONFIG_PREEMPT 197 pr_cont("PREEMPT "); 198 #elif defined(CONFIG_PREEMPT_RT) 199 pr_cont("PREEMPT_RT "); 200 #endif 201 pr_cont("SMP "); 202 if (debug_pagealloc_enabled()) 203 pr_cont("DEBUG_PAGEALLOC"); 204 pr_cont("\n"); 205 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 206 print_modules(); 207 show_regs(regs); 208 bust_spinlocks(0); 209 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 210 spin_unlock_irq(&die_lock); 211 if (in_interrupt()) 212 panic("Fatal exception in interrupt"); 213 if (panic_on_oops) 214 panic("Fatal exception: panic_on_oops"); 215 oops_exit(); 216 do_exit(SIGSEGV); 217 } 218