1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004 Thiemo Seufer 10 * Copyright (C) 2013 Imagination Technologies Ltd. 11 */ 12 #include <linux/cpu.h> 13 #include <linux/errno.h> 14 #include <linux/init.h> 15 #include <linux/kallsyms.h> 16 #include <linux/kernel.h> 17 #include <linux/nmi.h> 18 #include <linux/personality.h> 19 #include <linux/prctl.h> 20 #include <linux/random.h> 21 #include <linux/sched.h> 22 #include <linux/sched/debug.h> 23 #include <linux/sched/task_stack.h> 24 25 #include <asm/abi.h> 26 #include <asm/asm.h> 27 #include <asm/dsemul.h> 28 #include <asm/dsp.h> 29 #include <asm/exec.h> 30 #include <asm/fpu.h> 31 #include <asm/inst.h> 32 #include <asm/irq.h> 33 #include <asm/irq_regs.h> 34 #include <asm/isadep.h> 35 #include <asm/msa.h> 36 #include <asm/mips-cps.h> 37 #include <asm/mipsregs.h> 38 #include <asm/processor.h> 39 #include <asm/reg.h> 40 #include <asm/stacktrace.h> 41 42 #ifdef CONFIG_HOTPLUG_CPU 43 void arch_cpu_idle_dead(void) 44 { 45 play_dead(); 46 } 47 #endif 48 49 asmlinkage void ret_from_fork(void); 50 asmlinkage void ret_from_kernel_thread(void); 51 52 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 53 { 54 unsigned long status; 55 56 /* New thread loses kernel privileges. */ 57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK); 58 status |= KU_USER; 59 regs->cp0_status = status; 60 lose_fpu(0); 61 clear_thread_flag(TIF_MSA_CTX_LIVE); 62 clear_used_math(); 63 #ifdef CONFIG_MIPS_FP_SUPPORT 64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); 65 #endif 66 init_dsp(); 67 regs->cp0_epc = pc; 68 regs->regs[29] = sp; 69 } 70 71 void exit_thread(struct task_struct *tsk) 72 { 73 /* 74 * User threads may have allocated a delay slot emulation frame. 75 * If so, clean up that allocation. 76 */ 77 if (!(current->flags & PF_KTHREAD)) 78 dsemul_thread_cleanup(tsk); 79 } 80 81 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 82 { 83 /* 84 * Save any process state which is live in hardware registers to the 85 * parent context prior to duplication. This prevents the new child 86 * state becoming stale if the parent is preempted before copy_thread() 87 * gets a chance to save the parent's live hardware registers to the 88 * child context. 89 */ 90 preempt_disable(); 91 92 if (is_msa_enabled()) 93 save_msa(current); 94 else if (is_fpu_owner()) 95 _save_fp(current); 96 97 save_dsp(current); 98 99 preempt_enable(); 100 101 *dst = *src; 102 return 0; 103 } 104 105 /* 106 * Copy architecture-specific thread state 107 */ 108 int copy_thread(unsigned long clone_flags, unsigned long usp, 109 unsigned long kthread_arg, struct task_struct *p, 110 unsigned long tls) 111 { 112 struct thread_info *ti = task_thread_info(p); 113 struct pt_regs *childregs, *regs = current_pt_regs(); 114 unsigned long childksp; 115 116 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 117 118 /* set up new TSS. */ 119 childregs = (struct pt_regs *) childksp - 1; 120 /* Put the stack after the struct pt_regs. */ 121 childksp = (unsigned long) childregs; 122 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; 123 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { 124 /* kernel thread */ 125 unsigned long status = p->thread.cp0_status; 126 memset(childregs, 0, sizeof(struct pt_regs)); 127 ti->addr_limit = KERNEL_DS; 128 p->thread.reg16 = usp; /* fn */ 129 p->thread.reg17 = kthread_arg; 130 p->thread.reg29 = childksp; 131 p->thread.reg31 = (unsigned long) ret_from_kernel_thread; 132 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 133 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | 134 ((status & (ST0_KUC | ST0_IEC)) << 2); 135 #else 136 status |= ST0_EXL; 137 #endif 138 childregs->cp0_status = status; 139 return 0; 140 } 141 142 /* user thread */ 143 *childregs = *regs; 144 childregs->regs[7] = 0; /* Clear error flag */ 145 childregs->regs[2] = 0; /* Child gets zero as return value */ 146 if (usp) 147 childregs->regs[29] = usp; 148 ti->addr_limit = USER_DS; 149 150 p->thread.reg29 = (unsigned long) childregs; 151 p->thread.reg31 = (unsigned long) ret_from_fork; 152 153 /* 154 * New tasks lose permission to use the fpu. This accelerates context 155 * switching for most programs since they don't use the fpu. 156 */ 157 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 158 159 clear_tsk_thread_flag(p, TIF_USEDFPU); 160 clear_tsk_thread_flag(p, TIF_USEDMSA); 161 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); 162 163 #ifdef CONFIG_MIPS_MT_FPAFF 164 clear_tsk_thread_flag(p, TIF_FPUBOUND); 165 #endif /* CONFIG_MIPS_MT_FPAFF */ 166 167 #ifdef CONFIG_MIPS_FP_SUPPORT 168 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); 169 #endif 170 171 if (clone_flags & CLONE_SETTLS) 172 ti->tp_value = tls; 173 174 return 0; 175 } 176 177 #ifdef CONFIG_STACKPROTECTOR 178 #include <linux/stackprotector.h> 179 unsigned long __stack_chk_guard __read_mostly; 180 EXPORT_SYMBOL(__stack_chk_guard); 181 #endif 182 183 struct mips_frame_info { 184 void *func; 185 unsigned long func_size; 186 int frame_size; 187 int pc_offset; 188 }; 189 190 #define J_TARGET(pc,target) \ 191 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) 192 193 static inline int is_jr_ra_ins(union mips_instruction *ip) 194 { 195 #ifdef CONFIG_CPU_MICROMIPS 196 /* 197 * jr16 ra 198 * jr ra 199 */ 200 if (mm_insn_16bit(ip->word >> 16)) { 201 if (ip->mm16_r5_format.opcode == mm_pool16c_op && 202 ip->mm16_r5_format.rt == mm_jr16_op && 203 ip->mm16_r5_format.imm == 31) 204 return 1; 205 return 0; 206 } 207 208 if (ip->r_format.opcode == mm_pool32a_op && 209 ip->r_format.func == mm_pool32axf_op && 210 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op && 211 ip->r_format.rt == 31) 212 return 1; 213 return 0; 214 #else 215 if (ip->r_format.opcode == spec_op && 216 ip->r_format.func == jr_op && 217 ip->r_format.rs == 31) 218 return 1; 219 return 0; 220 #endif 221 } 222 223 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) 224 { 225 #ifdef CONFIG_CPU_MICROMIPS 226 /* 227 * swsp ra,offset 228 * swm16 reglist,offset(sp) 229 * swm32 reglist,offset(sp) 230 * sw32 ra,offset(sp) 231 * jradiussp - NOT SUPPORTED 232 * 233 * microMIPS is way more fun... 234 */ 235 if (mm_insn_16bit(ip->word >> 16)) { 236 switch (ip->mm16_r5_format.opcode) { 237 case mm_swsp16_op: 238 if (ip->mm16_r5_format.rt != 31) 239 return 0; 240 241 *poff = ip->mm16_r5_format.imm; 242 *poff = (*poff << 2) / sizeof(ulong); 243 return 1; 244 245 case mm_pool16c_op: 246 switch (ip->mm16_m_format.func) { 247 case mm_swm16_op: 248 *poff = ip->mm16_m_format.imm; 249 *poff += 1 + ip->mm16_m_format.rlist; 250 *poff = (*poff << 2) / sizeof(ulong); 251 return 1; 252 253 default: 254 return 0; 255 } 256 257 default: 258 return 0; 259 } 260 } 261 262 switch (ip->i_format.opcode) { 263 case mm_sw32_op: 264 if (ip->i_format.rs != 29) 265 return 0; 266 if (ip->i_format.rt != 31) 267 return 0; 268 269 *poff = ip->i_format.simmediate / sizeof(ulong); 270 return 1; 271 272 case mm_pool32b_op: 273 switch (ip->mm_m_format.func) { 274 case mm_swm32_func: 275 if (ip->mm_m_format.rd < 0x10) 276 return 0; 277 if (ip->mm_m_format.base != 29) 278 return 0; 279 280 *poff = ip->mm_m_format.simmediate; 281 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); 282 *poff /= sizeof(ulong); 283 return 1; 284 default: 285 return 0; 286 } 287 288 default: 289 return 0; 290 } 291 #else 292 /* sw / sd $ra, offset($sp) */ 293 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && 294 ip->i_format.rs == 29 && ip->i_format.rt == 31) { 295 *poff = ip->i_format.simmediate / sizeof(ulong); 296 return 1; 297 } 298 #ifdef CONFIG_CPU_LOONGSON64 299 if ((ip->loongson3_lswc2_format.opcode == swc2_op) && 300 (ip->loongson3_lswc2_format.ls == 1) && 301 (ip->loongson3_lswc2_format.fr == 0) && 302 (ip->loongson3_lswc2_format.base == 29)) { 303 if (ip->loongson3_lswc2_format.rt == 31) { 304 *poff = ip->loongson3_lswc2_format.offset << 1; 305 return 1; 306 } 307 if (ip->loongson3_lswc2_format.rq == 31) { 308 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1; 309 return 1; 310 } 311 } 312 #endif 313 return 0; 314 #endif 315 } 316 317 static inline int is_jump_ins(union mips_instruction *ip) 318 { 319 #ifdef CONFIG_CPU_MICROMIPS 320 /* 321 * jr16,jrc,jalr16,jalr16 322 * jal 323 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb 324 * jraddiusp - NOT SUPPORTED 325 * 326 * microMIPS is kind of more fun... 327 */ 328 if (mm_insn_16bit(ip->word >> 16)) { 329 if ((ip->mm16_r5_format.opcode == mm_pool16c_op && 330 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) 331 return 1; 332 return 0; 333 } 334 335 if (ip->j_format.opcode == mm_j32_op) 336 return 1; 337 if (ip->j_format.opcode == mm_jal32_op) 338 return 1; 339 if (ip->r_format.opcode != mm_pool32a_op || 340 ip->r_format.func != mm_pool32axf_op) 341 return 0; 342 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; 343 #else 344 if (ip->j_format.opcode == j_op) 345 return 1; 346 if (ip->j_format.opcode == jal_op) 347 return 1; 348 if (ip->r_format.opcode != spec_op) 349 return 0; 350 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; 351 #endif 352 } 353 354 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) 355 { 356 #ifdef CONFIG_CPU_MICROMIPS 357 unsigned short tmp; 358 359 /* 360 * addiusp -imm 361 * addius5 sp,-imm 362 * addiu32 sp,sp,-imm 363 * jradiussp - NOT SUPPORTED 364 * 365 * microMIPS is not more fun... 366 */ 367 if (mm_insn_16bit(ip->word >> 16)) { 368 if (ip->mm16_r3_format.opcode == mm_pool16d_op && 369 ip->mm16_r3_format.simmediate & mm_addiusp_func) { 370 tmp = ip->mm_b0_format.simmediate >> 1; 371 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100; 372 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */ 373 tmp ^= 0x100; 374 *frame_size = -(signed short)(tmp << 2); 375 return 1; 376 } 377 if (ip->mm16_r5_format.opcode == mm_pool16d_op && 378 ip->mm16_r5_format.rt == 29) { 379 tmp = ip->mm16_r5_format.imm >> 1; 380 *frame_size = -(signed short)(tmp & 0xf); 381 return 1; 382 } 383 return 0; 384 } 385 386 if (ip->mm_i_format.opcode == mm_addiu32_op && 387 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) { 388 *frame_size = -ip->i_format.simmediate; 389 return 1; 390 } 391 #else 392 /* addiu/daddiu sp,sp,-imm */ 393 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) 394 return 0; 395 396 if (ip->i_format.opcode == addiu_op || 397 ip->i_format.opcode == daddiu_op) { 398 *frame_size = -ip->i_format.simmediate; 399 return 1; 400 } 401 #endif 402 return 0; 403 } 404 405 static int get_frame_info(struct mips_frame_info *info) 406 { 407 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 408 union mips_instruction insn, *ip, *ip_end; 409 unsigned int last_insn_size = 0; 410 bool saw_jump = false; 411 412 info->pc_offset = -1; 413 info->frame_size = 0; 414 415 ip = (void *)msk_isa16_mode((ulong)info->func); 416 if (!ip) 417 goto err; 418 419 ip_end = (void *)ip + (info->func_size ? info->func_size : 512); 420 421 while (ip < ip_end) { 422 ip = (void *)ip + last_insn_size; 423 424 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { 425 insn.word = ip->halfword[0] << 16; 426 last_insn_size = 2; 427 } else if (is_mmips) { 428 insn.word = ip->halfword[0] << 16 | ip->halfword[1]; 429 last_insn_size = 4; 430 } else { 431 insn.word = ip->word; 432 last_insn_size = 4; 433 } 434 435 if (is_jr_ra_ins(ip)) { 436 break; 437 } else if (!info->frame_size) { 438 is_sp_move_ins(&insn, &info->frame_size); 439 continue; 440 } else if (!saw_jump && is_jump_ins(ip)) { 441 /* 442 * If we see a jump instruction, we are finished 443 * with the frame save. 444 * 445 * Some functions can have a shortcut return at 446 * the beginning of the function, so don't start 447 * looking for jump instruction until we see the 448 * frame setup. 449 * 450 * The RA save instruction can get put into the 451 * delay slot of the jump instruction, so look 452 * at the next instruction, too. 453 */ 454 saw_jump = true; 455 continue; 456 } 457 if (info->pc_offset == -1 && 458 is_ra_save_ins(&insn, &info->pc_offset)) 459 break; 460 if (saw_jump) 461 break; 462 } 463 if (info->frame_size && info->pc_offset >= 0) /* nested */ 464 return 0; 465 if (info->pc_offset < 0) /* leaf */ 466 return 1; 467 /* prologue seems bogus... */ 468 err: 469 return -1; 470 } 471 472 static struct mips_frame_info schedule_mfi __read_mostly; 473 474 #ifdef CONFIG_KALLSYMS 475 static unsigned long get___schedule_addr(void) 476 { 477 return kallsyms_lookup_name("__schedule"); 478 } 479 #else 480 static unsigned long get___schedule_addr(void) 481 { 482 union mips_instruction *ip = (void *)schedule; 483 int max_insns = 8; 484 int i; 485 486 for (i = 0; i < max_insns; i++, ip++) { 487 if (ip->j_format.opcode == j_op) 488 return J_TARGET(ip, ip->j_format.target); 489 } 490 return 0; 491 } 492 #endif 493 494 static int __init frame_info_init(void) 495 { 496 unsigned long size = 0; 497 #ifdef CONFIG_KALLSYMS 498 unsigned long ofs; 499 #endif 500 unsigned long addr; 501 502 addr = get___schedule_addr(); 503 if (!addr) 504 addr = (unsigned long)schedule; 505 506 #ifdef CONFIG_KALLSYMS 507 kallsyms_lookup_size_offset(addr, &size, &ofs); 508 #endif 509 schedule_mfi.func = (void *)addr; 510 schedule_mfi.func_size = size; 511 512 get_frame_info(&schedule_mfi); 513 514 /* 515 * Without schedule() frame info, result given by 516 * thread_saved_pc() and get_wchan() are not reliable. 517 */ 518 if (schedule_mfi.pc_offset < 0) 519 printk("Can't analyze schedule() prologue at %p\n", schedule); 520 521 return 0; 522 } 523 524 arch_initcall(frame_info_init); 525 526 /* 527 * Return saved PC of a blocked thread. 528 */ 529 static unsigned long thread_saved_pc(struct task_struct *tsk) 530 { 531 struct thread_struct *t = &tsk->thread; 532 533 /* New born processes are a special case */ 534 if (t->reg31 == (unsigned long) ret_from_fork) 535 return t->reg31; 536 if (schedule_mfi.pc_offset < 0) 537 return 0; 538 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; 539 } 540 541 542 #ifdef CONFIG_KALLSYMS 543 /* generic stack unwinding function */ 544 unsigned long notrace unwind_stack_by_address(unsigned long stack_page, 545 unsigned long *sp, 546 unsigned long pc, 547 unsigned long *ra) 548 { 549 unsigned long low, high, irq_stack_high; 550 struct mips_frame_info info; 551 unsigned long size, ofs; 552 struct pt_regs *regs; 553 int leaf; 554 555 if (!stack_page) 556 return 0; 557 558 /* 559 * IRQ stacks start at IRQ_STACK_START 560 * task stacks at THREAD_SIZE - 32 561 */ 562 low = stack_page; 563 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { 564 high = stack_page + IRQ_STACK_START; 565 irq_stack_high = high; 566 } else { 567 high = stack_page + THREAD_SIZE - 32; 568 irq_stack_high = 0; 569 } 570 571 /* 572 * If we reached the top of the interrupt stack, start unwinding 573 * the interrupted task stack. 574 */ 575 if (unlikely(*sp == irq_stack_high)) { 576 unsigned long task_sp = *(unsigned long *)*sp; 577 578 /* 579 * Check that the pointer saved in the IRQ stack head points to 580 * something within the stack of the current task 581 */ 582 if (!object_is_on_stack((void *)task_sp)) 583 return 0; 584 585 /* 586 * Follow pointer to tasks kernel stack frame where interrupted 587 * state was saved. 588 */ 589 regs = (struct pt_regs *)task_sp; 590 pc = regs->cp0_epc; 591 if (!user_mode(regs) && __kernel_text_address(pc)) { 592 *sp = regs->regs[29]; 593 *ra = regs->regs[31]; 594 return pc; 595 } 596 return 0; 597 } 598 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 599 return 0; 600 /* 601 * Return ra if an exception occurred at the first instruction 602 */ 603 if (unlikely(ofs == 0)) { 604 pc = *ra; 605 *ra = 0; 606 return pc; 607 } 608 609 info.func = (void *)(pc - ofs); 610 info.func_size = ofs; /* analyze from start to ofs */ 611 leaf = get_frame_info(&info); 612 if (leaf < 0) 613 return 0; 614 615 if (*sp < low || *sp + info.frame_size > high) 616 return 0; 617 618 if (leaf) 619 /* 620 * For some extreme cases, get_frame_info() can 621 * consider wrongly a nested function as a leaf 622 * one. In that cases avoid to return always the 623 * same value. 624 */ 625 pc = pc != *ra ? *ra : 0; 626 else 627 pc = ((unsigned long *)(*sp))[info.pc_offset]; 628 629 *sp += info.frame_size; 630 *ra = 0; 631 return __kernel_text_address(pc) ? pc : 0; 632 } 633 EXPORT_SYMBOL(unwind_stack_by_address); 634 635 /* used by show_backtrace() */ 636 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, 637 unsigned long pc, unsigned long *ra) 638 { 639 unsigned long stack_page = 0; 640 int cpu; 641 642 for_each_possible_cpu(cpu) { 643 if (on_irq_stack(cpu, *sp)) { 644 stack_page = (unsigned long)irq_stack[cpu]; 645 break; 646 } 647 } 648 649 if (!stack_page) 650 stack_page = (unsigned long)task_stack_page(task); 651 652 return unwind_stack_by_address(stack_page, sp, pc, ra); 653 } 654 #endif 655 656 /* 657 * get_wchan - a maintenance nightmare^W^Wpain in the ass ... 658 */ 659 unsigned long get_wchan(struct task_struct *task) 660 { 661 unsigned long pc = 0; 662 #ifdef CONFIG_KALLSYMS 663 unsigned long sp; 664 unsigned long ra = 0; 665 #endif 666 667 if (!task || task == current || task->state == TASK_RUNNING) 668 goto out; 669 if (!task_stack_page(task)) 670 goto out; 671 672 pc = thread_saved_pc(task); 673 674 #ifdef CONFIG_KALLSYMS 675 sp = task->thread.reg29 + schedule_mfi.frame_size; 676 677 while (in_sched_functions(pc)) 678 pc = unwind_stack(task, &sp, pc, &ra); 679 #endif 680 681 out: 682 return pc; 683 } 684 685 unsigned long mips_stack_top(void) 686 { 687 unsigned long top = TASK_SIZE & PAGE_MASK; 688 689 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { 690 /* One page for branch delay slot "emulation" */ 691 top -= PAGE_SIZE; 692 } 693 694 /* Space for the VDSO, data page & GIC user page */ 695 top -= PAGE_ALIGN(current->thread.abi->vdso->size); 696 top -= PAGE_SIZE; 697 top -= mips_gic_present() ? PAGE_SIZE : 0; 698 699 /* Space for cache colour alignment */ 700 if (cpu_has_dc_aliases) 701 top -= shm_align_mask + 1; 702 703 /* Space to randomize the VDSO base */ 704 if (current->flags & PF_RANDOMIZE) 705 top -= VDSO_RANDOMIZE_SIZE; 706 707 return top; 708 } 709 710 /* 711 * Don't forget that the stack pointer must be aligned on a 8 bytes 712 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. 713 */ 714 unsigned long arch_align_stack(unsigned long sp) 715 { 716 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 717 sp -= get_random_int() & ~PAGE_MASK; 718 719 return sp & ALMASK; 720 } 721 722 static struct cpumask backtrace_csd_busy; 723 724 static void handle_backtrace(void *info) 725 { 726 nmi_cpu_backtrace(get_irq_regs()); 727 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); 728 } 729 730 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) = 731 CSD_INIT(handle_backtrace, NULL); 732 733 static void raise_backtrace(cpumask_t *mask) 734 { 735 call_single_data_t *csd; 736 int cpu; 737 738 for_each_cpu(cpu, mask) { 739 /* 740 * If we previously sent an IPI to the target CPU & it hasn't 741 * cleared its bit in the busy cpumask then it didn't handle 742 * our previous IPI & it's not safe for us to reuse the 743 * call_single_data_t. 744 */ 745 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { 746 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", 747 cpu); 748 continue; 749 } 750 751 csd = &per_cpu(backtrace_csd, cpu); 752 smp_call_function_single_async(cpu, csd); 753 } 754 } 755 756 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) 757 { 758 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); 759 } 760 761 int mips_get_process_fp_mode(struct task_struct *task) 762 { 763 int value = 0; 764 765 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) 766 value |= PR_FP_MODE_FR; 767 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) 768 value |= PR_FP_MODE_FRE; 769 770 return value; 771 } 772 773 static long prepare_for_fp_mode_switch(void *unused) 774 { 775 /* 776 * This is icky, but we use this to simply ensure that all CPUs have 777 * context switched, regardless of whether they were previously running 778 * kernel or user code. This ensures that no CPU that a mode-switching 779 * program may execute on keeps its FPU enabled (& in the old mode) 780 * throughout the mode switch. 781 */ 782 return 0; 783 } 784 785 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) 786 { 787 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; 788 struct task_struct *t; 789 struct cpumask process_cpus; 790 int cpu; 791 792 /* If nothing to change, return right away, successfully. */ 793 if (value == mips_get_process_fp_mode(task)) 794 return 0; 795 796 /* Only accept a mode change if 64-bit FP enabled for o32. */ 797 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) 798 return -EOPNOTSUPP; 799 800 /* And only for o32 tasks. */ 801 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) 802 return -EOPNOTSUPP; 803 804 /* Check the value is valid */ 805 if (value & ~known_bits) 806 return -EOPNOTSUPP; 807 808 /* Setting FRE without FR is not supported. */ 809 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) 810 return -EOPNOTSUPP; 811 812 /* Avoid inadvertently triggering emulation */ 813 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 814 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) 815 return -EOPNOTSUPP; 816 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) 817 return -EOPNOTSUPP; 818 819 /* FR = 0 not supported in MIPS R6 */ 820 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) 821 return -EOPNOTSUPP; 822 823 /* Indicate the new FP mode in each thread */ 824 for_each_thread(task, t) { 825 /* Update desired FP register width */ 826 if (value & PR_FP_MODE_FR) { 827 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); 828 } else { 829 set_tsk_thread_flag(t, TIF_32BIT_FPREGS); 830 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); 831 } 832 833 /* Update desired FP single layout */ 834 if (value & PR_FP_MODE_FRE) 835 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 836 else 837 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 838 } 839 840 /* 841 * We need to ensure that all threads in the process have switched mode 842 * before returning, in order to allow userland to not worry about 843 * races. We can do this by forcing all CPUs that any thread in the 844 * process may be running on to schedule something else - in this case 845 * prepare_for_fp_mode_switch(). 846 * 847 * We begin by generating a mask of all CPUs that any thread in the 848 * process may be running on. 849 */ 850 cpumask_clear(&process_cpus); 851 for_each_thread(task, t) 852 cpumask_set_cpu(task_cpu(t), &process_cpus); 853 854 /* 855 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs. 856 * 857 * The CPUs may have rescheduled already since we switched mode or 858 * generated the cpumask, but that doesn't matter. If the task in this 859 * process is scheduled out then our scheduling 860 * prepare_for_fp_mode_switch() will simply be redundant. If it's 861 * scheduled in then it will already have picked up the new FP mode 862 * whilst doing so. 863 */ 864 get_online_cpus(); 865 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) 866 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); 867 put_online_cpus(); 868 869 return 0; 870 } 871 872 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 873 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs) 874 { 875 unsigned int i; 876 877 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { 878 /* k0/k1 are copied as zero. */ 879 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) 880 uregs[i] = 0; 881 else 882 uregs[i] = regs->regs[i - MIPS32_EF_R0]; 883 } 884 885 uregs[MIPS32_EF_LO] = regs->lo; 886 uregs[MIPS32_EF_HI] = regs->hi; 887 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; 888 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 889 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; 890 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; 891 } 892 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 893 894 #ifdef CONFIG_64BIT 895 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs) 896 { 897 unsigned int i; 898 899 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { 900 /* k0/k1 are copied as zero. */ 901 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) 902 uregs[i] = 0; 903 else 904 uregs[i] = regs->regs[i - MIPS64_EF_R0]; 905 } 906 907 uregs[MIPS64_EF_LO] = regs->lo; 908 uregs[MIPS64_EF_HI] = regs->hi; 909 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; 910 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; 911 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; 912 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; 913 } 914 #endif /* CONFIG_64BIT */ 915