1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PARISC Architecture-dependent parts of process handling 4 * based on the work for i386 5 * 6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> 7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> 8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> 9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> 10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> 11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> 12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> 13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> 15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> 16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> 17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de> 18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 19 */ 20 21 #include <stdarg.h> 22 23 #include <linux/elf.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/mm.h> 27 #include <linux/fs.h> 28 #include <linux/cpu.h> 29 #include <linux/module.h> 30 #include <linux/personality.h> 31 #include <linux/ptrace.h> 32 #include <linux/sched.h> 33 #include <linux/sched/debug.h> 34 #include <linux/sched/task.h> 35 #include <linux/sched/task_stack.h> 36 #include <linux/slab.h> 37 #include <linux/stddef.h> 38 #include <linux/unistd.h> 39 #include <linux/kallsyms.h> 40 #include <linux/uaccess.h> 41 #include <linux/rcupdate.h> 42 #include <linux/random.h> 43 #include <linux/nmi.h> 44 45 #include <asm/io.h> 46 #include <asm/asm-offsets.h> 47 #include <asm/assembly.h> 48 #include <asm/pdc.h> 49 #include <asm/pdc_chassis.h> 50 #include <asm/pgalloc.h> 51 #include <asm/unwind.h> 52 #include <asm/sections.h> 53 54 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 55 #define CMD_RESET 5 /* reset any module */ 56 57 /* 58 ** The Wright Brothers and Gecko systems have a H/W problem 59 ** (Lasi...'nuf said) may cause a broadcast reset to lockup 60 ** the system. An HVERSION dependent PDC call was developed 61 ** to perform a "safe", platform specific broadcast reset instead 62 ** of kludging up all the code. 63 ** 64 ** Older machines which do not implement PDC_BROADCAST_RESET will 65 ** return (with an error) and the regular broadcast reset can be 66 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET 67 ** the PDC call will not return (the system will be reset). 68 */ 69 void machine_restart(char *cmd) 70 { 71 #ifdef FASTBOOT_SELFTEST_SUPPORT 72 /* 73 ** If user has modified the Firmware Selftest Bitmap, 74 ** run the tests specified in the bitmap after the 75 ** system is rebooted w/PDC_DO_RESET. 76 ** 77 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests" 78 ** 79 ** Using "directed resets" at each processor with the MEM_TOC 80 ** vector cleared will also avoid running destructive 81 ** memory self tests. (Not implemented yet) 82 */ 83 if (ftc_bitmap) { 84 pdc_do_firm_test_reset(ftc_bitmap); 85 } 86 #endif 87 /* set up a new led state on systems shipped with a LED State panel */ 88 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 89 90 /* "Normal" system reset */ 91 pdc_do_reset(); 92 93 /* Nope...box should reset with just CMD_RESET now */ 94 gsc_writel(CMD_RESET, COMMAND_GLOBAL); 95 96 /* Wait for RESET to lay us to rest. */ 97 while (1) ; 98 99 } 100 101 void (*chassis_power_off)(void); 102 103 /* 104 * This routine is called from sys_reboot to actually turn off the 105 * machine 106 */ 107 void machine_power_off(void) 108 { 109 /* If there is a registered power off handler, call it. */ 110 if (chassis_power_off) 111 chassis_power_off(); 112 113 /* Put the soft power button back under hardware control. 114 * If the user had already pressed the power button, the 115 * following call will immediately power off. */ 116 pdc_soft_power_button(0); 117 118 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 119 120 /* ipmi_poweroff may have been installed. */ 121 if (pm_power_off) 122 pm_power_off(); 123 124 /* It seems we have no way to power the system off via 125 * software. The user has to press the button himself. */ 126 127 printk(KERN_EMERG "System shut down completed.\n" 128 "Please power this system off now."); 129 130 /* prevent soft lockup/stalled CPU messages for endless loop. */ 131 rcu_sysrq_start(); 132 lockup_detector_soft_poweroff(); 133 for (;;); 134 } 135 136 void (*pm_power_off)(void); 137 EXPORT_SYMBOL(pm_power_off); 138 139 void machine_halt(void) 140 { 141 machine_power_off(); 142 } 143 144 void flush_thread(void) 145 { 146 /* Only needs to handle fpu stuff or perf monitors. 147 ** REVISIT: several arches implement a "lazy fpu state". 148 */ 149 } 150 151 void release_thread(struct task_struct *dead_task) 152 { 153 } 154 155 /* 156 * Fill in the FPU structure for a core dump. 157 */ 158 159 int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) 160 { 161 if (regs == NULL) 162 return 0; 163 164 memcpy(r, regs->fr, sizeof *r); 165 return 1; 166 } 167 168 int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) 169 { 170 memcpy(r, tsk->thread.regs.fr, sizeof(*r)); 171 return 1; 172 } 173 174 /* 175 * Idle thread support 176 * 177 * Detect when running on QEMU with SeaBIOS PDC Firmware and let 178 * QEMU idle the host too. 179 */ 180 181 int running_on_qemu __ro_after_init; 182 EXPORT_SYMBOL(running_on_qemu); 183 184 void __cpuidle arch_cpu_idle_dead(void) 185 { 186 /* nop on real hardware, qemu will offline CPU. */ 187 asm volatile("or %%r31,%%r31,%%r31\n":::); 188 } 189 190 void __cpuidle arch_cpu_idle(void) 191 { 192 local_irq_enable(); 193 194 /* nop on real hardware, qemu will idle sleep. */ 195 asm volatile("or %%r10,%%r10,%%r10\n":::); 196 } 197 198 static int __init parisc_idle_init(void) 199 { 200 if (!running_on_qemu) 201 cpu_idle_poll_ctrl(1); 202 203 return 0; 204 } 205 arch_initcall(parisc_idle_init); 206 207 /* 208 * Copy architecture-specific thread state 209 */ 210 int 211 copy_thread_tls(unsigned long clone_flags, unsigned long usp, 212 unsigned long kthread_arg, struct task_struct *p, unsigned long tls) 213 { 214 struct pt_regs *cregs = &(p->thread.regs); 215 void *stack = task_stack_page(p); 216 217 /* We have to use void * instead of a function pointer, because 218 * function pointers aren't a pointer to the function on 64-bit. 219 * Make them const so the compiler knows they live in .text */ 220 extern void * const ret_from_kernel_thread; 221 extern void * const child_return; 222 223 if (unlikely(p->flags & PF_KTHREAD)) { 224 /* kernel thread */ 225 memset(cregs, 0, sizeof(struct pt_regs)); 226 if (!usp) /* idle thread */ 227 return 0; 228 /* Must exit via ret_from_kernel_thread in order 229 * to call schedule_tail() 230 */ 231 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; 232 cregs->kpc = (unsigned long) &ret_from_kernel_thread; 233 /* 234 * Copy function and argument to be called from 235 * ret_from_kernel_thread. 236 */ 237 #ifdef CONFIG_64BIT 238 cregs->gr[27] = ((unsigned long *)usp)[3]; 239 cregs->gr[26] = ((unsigned long *)usp)[2]; 240 #else 241 cregs->gr[26] = usp; 242 #endif 243 cregs->gr[25] = kthread_arg; 244 } else { 245 /* user thread */ 246 /* usp must be word aligned. This also prevents users from 247 * passing in the value 1 (which is the signal for a special 248 * return for a kernel thread) */ 249 if (usp) { 250 usp = ALIGN(usp, 4); 251 if (likely(usp)) 252 cregs->gr[30] = usp; 253 } 254 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE; 255 cregs->kpc = (unsigned long) &child_return; 256 257 /* Setup thread TLS area */ 258 if (clone_flags & CLONE_SETTLS) 259 cregs->cr27 = tls; 260 } 261 262 return 0; 263 } 264 265 unsigned long 266 get_wchan(struct task_struct *p) 267 { 268 struct unwind_frame_info info; 269 unsigned long ip; 270 int count = 0; 271 272 if (!p || p == current || p->state == TASK_RUNNING) 273 return 0; 274 275 /* 276 * These bracket the sleeping functions.. 277 */ 278 279 unwind_frame_init_from_blocked_task(&info, p); 280 do { 281 if (unwind_once(&info) < 0) 282 return 0; 283 ip = info.ip; 284 if (!in_sched_functions(ip)) 285 return ip; 286 } while (count++ < MAX_UNWIND_ENTRIES); 287 return 0; 288 } 289 290 #ifdef CONFIG_64BIT 291 void *dereference_function_descriptor(void *ptr) 292 { 293 Elf64_Fdesc *desc = ptr; 294 void *p; 295 296 if (!probe_kernel_address(&desc->addr, p)) 297 ptr = p; 298 return ptr; 299 } 300 301 void *dereference_kernel_function_descriptor(void *ptr) 302 { 303 if (ptr < (void *)__start_opd || 304 ptr >= (void *)__end_opd) 305 return ptr; 306 307 return dereference_function_descriptor(ptr); 308 } 309 #endif 310 311 static inline unsigned long brk_rnd(void) 312 { 313 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; 314 } 315 316 unsigned long arch_randomize_brk(struct mm_struct *mm) 317 { 318 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 319 320 if (ret < mm->brk) 321 return mm->brk; 322 return ret; 323 } 324