1 #include <linux/extable.h> 2 #include <linux/uaccess.h> 3 #include <linux/sched/debug.h> 4 5 #include <asm/traps.h> 6 #include <asm/kdebug.h> 7 8 typedef bool (*ex_handler_t)(const struct exception_table_entry *, 9 struct pt_regs *, int); 10 11 static inline unsigned long 12 ex_fixup_addr(const struct exception_table_entry *x) 13 { 14 return (unsigned long)&x->fixup + x->fixup; 15 } 16 static inline ex_handler_t 17 ex_fixup_handler(const struct exception_table_entry *x) 18 { 19 return (ex_handler_t)((unsigned long)&x->handler + x->handler); 20 } 21 22 bool ex_handler_default(const struct exception_table_entry *fixup, 23 struct pt_regs *regs, int trapnr) 24 { 25 regs->ip = ex_fixup_addr(fixup); 26 return true; 27 } 28 EXPORT_SYMBOL(ex_handler_default); 29 30 bool ex_handler_fault(const struct exception_table_entry *fixup, 31 struct pt_regs *regs, int trapnr) 32 { 33 regs->ip = ex_fixup_addr(fixup); 34 regs->ax = trapnr; 35 return true; 36 } 37 EXPORT_SYMBOL_GPL(ex_handler_fault); 38 39 bool ex_handler_ext(const struct exception_table_entry *fixup, 40 struct pt_regs *regs, int trapnr) 41 { 42 /* Special hack for uaccess_err */ 43 current->thread.uaccess_err = 1; 44 regs->ip = ex_fixup_addr(fixup); 45 return true; 46 } 47 EXPORT_SYMBOL(ex_handler_ext); 48 49 bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, 50 struct pt_regs *regs, int trapnr) 51 { 52 if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n", 53 (unsigned int)regs->cx, regs->ip, (void *)regs->ip)) 54 show_stack_regs(regs); 55 56 /* Pretend that the read succeeded and returned 0. */ 57 regs->ip = ex_fixup_addr(fixup); 58 regs->ax = 0; 59 regs->dx = 0; 60 return true; 61 } 62 EXPORT_SYMBOL(ex_handler_rdmsr_unsafe); 63 64 bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup, 65 struct pt_regs *regs, int trapnr) 66 { 67 if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n", 68 (unsigned int)regs->cx, (unsigned int)regs->dx, 69 (unsigned int)regs->ax, regs->ip, (void *)regs->ip)) 70 show_stack_regs(regs); 71 72 /* Pretend that the write succeeded. */ 73 regs->ip = ex_fixup_addr(fixup); 74 return true; 75 } 76 EXPORT_SYMBOL(ex_handler_wrmsr_unsafe); 77 78 bool ex_handler_clear_fs(const struct exception_table_entry *fixup, 79 struct pt_regs *regs, int trapnr) 80 { 81 if (static_cpu_has(X86_BUG_NULL_SEG)) 82 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS)); 83 asm volatile ("mov %0, %%fs" : : "rm" (0)); 84 return ex_handler_default(fixup, regs, trapnr); 85 } 86 EXPORT_SYMBOL(ex_handler_clear_fs); 87 88 bool ex_has_fault_handler(unsigned long ip) 89 { 90 const struct exception_table_entry *e; 91 ex_handler_t handler; 92 93 e = search_exception_tables(ip); 94 if (!e) 95 return false; 96 handler = ex_fixup_handler(e); 97 98 return handler == ex_handler_fault; 99 } 100 101 int fixup_exception(struct pt_regs *regs, int trapnr) 102 { 103 const struct exception_table_entry *e; 104 ex_handler_t handler; 105 106 #ifdef CONFIG_PNPBIOS 107 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { 108 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; 109 extern u32 pnp_bios_is_utter_crap; 110 pnp_bios_is_utter_crap = 1; 111 printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); 112 __asm__ volatile( 113 "movl %0, %%esp\n\t" 114 "jmp *%1\n\t" 115 : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip)); 116 panic("do_trap: can't hit this"); 117 } 118 #endif 119 120 e = search_exception_tables(regs->ip); 121 if (!e) 122 return 0; 123 124 handler = ex_fixup_handler(e); 125 return handler(e, regs, trapnr); 126 } 127 128 extern unsigned int early_recursion_flag; 129 130 /* Restricted version used during very early boot */ 131 void __init early_fixup_exception(struct pt_regs *regs, int trapnr) 132 { 133 /* Ignore early NMIs. */ 134 if (trapnr == X86_TRAP_NMI) 135 return; 136 137 if (early_recursion_flag > 2) 138 goto halt_loop; 139 140 /* 141 * Old CPUs leave the high bits of CS on the stack 142 * undefined. I'm not sure which CPUs do this, but at least 143 * the 486 DX works this way. 144 */ 145 if ((regs->cs & 0xFFFF) != __KERNEL_CS) 146 goto fail; 147 148 /* 149 * The full exception fixup machinery is available as soon as 150 * the early IDT is loaded. This means that it is the 151 * responsibility of extable users to either function correctly 152 * when handlers are invoked early or to simply avoid causing 153 * exceptions before they're ready to handle them. 154 * 155 * This is better than filtering which handlers can be used, 156 * because refusing to call a handler here is guaranteed to 157 * result in a hard-to-debug panic. 158 * 159 * Keep in mind that not all vectors actually get here. Early 160 * fage faults, for example, are special. 161 */ 162 if (fixup_exception(regs, trapnr)) 163 return; 164 165 if (fixup_bug(regs, trapnr)) 166 return; 167 168 fail: 169 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", 170 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, 171 regs->orig_ax, read_cr2()); 172 173 show_regs(regs); 174 175 halt_loop: 176 while (true) 177 halt(); 178 } 179