1 /* 2 * arch/arm/include/asm/processor.h 3 * 4 * Copyright (C) 1995-1999 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifndef __ASM_ARM_PROCESSOR_H 12 #define __ASM_ARM_PROCESSOR_H 13 14 #ifdef __KERNEL__ 15 16 #include <asm/hw_breakpoint.h> 17 #include <asm/ptrace.h> 18 #include <asm/types.h> 19 #include <asm/unified.h> 20 21 #ifdef __KERNEL__ 22 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ 23 TASK_SIZE : TASK_SIZE_26) 24 #define STACK_TOP_MAX TASK_SIZE 25 #endif 26 27 struct debug_info { 28 #ifdef CONFIG_HAVE_HW_BREAKPOINT 29 struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; 30 #endif 31 }; 32 33 struct thread_struct { 34 /* fault info */ 35 unsigned long address; 36 unsigned long trap_no; 37 unsigned long error_code; 38 /* debugging */ 39 struct debug_info debug; 40 }; 41 42 /* 43 * Everything usercopied to/from thread_struct is statically-sized, so 44 * no hardened usercopy whitelist is needed. 45 */ 46 static inline void arch_thread_struct_whitelist(unsigned long *offset, 47 unsigned long *size) 48 { 49 *offset = *size = 0; 50 } 51 52 #define INIT_THREAD { } 53 54 #define start_thread(regs,pc,sp) \ 55 ({ \ 56 unsigned long r7, r8, r9; \ 57 \ 58 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \ 59 r7 = regs->ARM_r7; \ 60 r8 = regs->ARM_r8; \ 61 r9 = regs->ARM_r9; \ 62 } \ 63 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 64 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \ 65 current->personality & FDPIC_FUNCPTRS) { \ 66 regs->ARM_r7 = r7; \ 67 regs->ARM_r8 = r8; \ 68 regs->ARM_r9 = r9; \ 69 regs->ARM_r10 = current->mm->start_data; \ 70 } else if (!IS_ENABLED(CONFIG_MMU)) \ 71 regs->ARM_r10 = current->mm->start_data; \ 72 if (current->personality & ADDR_LIMIT_32BIT) \ 73 regs->ARM_cpsr = USR_MODE; \ 74 else \ 75 regs->ARM_cpsr = USR26_MODE; \ 76 if (elf_hwcap & HWCAP_THUMB && pc & 1) \ 77 regs->ARM_cpsr |= PSR_T_BIT; \ 78 regs->ARM_cpsr |= PSR_ENDSTATE; \ 79 regs->ARM_pc = pc & ~1; /* pc */ \ 80 regs->ARM_sp = sp; /* sp */ \ 81 }) 82 83 /* Forward declaration, a strange C thing */ 84 struct task_struct; 85 86 /* Free all resources held by a thread. */ 87 extern void release_thread(struct task_struct *); 88 89 unsigned long get_wchan(struct task_struct *p); 90 91 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) 92 #define cpu_relax() \ 93 do { \ 94 smp_mb(); \ 95 __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ 96 } while (0) 97 #else 98 #define cpu_relax() barrier() 99 #endif 100 101 #define task_pt_regs(p) \ 102 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 103 104 #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc 105 #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp 106 107 #ifdef CONFIG_SMP 108 #define __ALT_SMP_ASM(smp, up) \ 109 "9998: " smp "\n" \ 110 " .pushsection \".alt.smp.init\", \"a\"\n" \ 111 " .long 9998b\n" \ 112 " " up "\n" \ 113 " .popsection\n" 114 #else 115 #define __ALT_SMP_ASM(smp, up) up 116 #endif 117 118 /* 119 * Prefetching support - only ARMv5. 120 */ 121 #if __LINUX_ARM_ARCH__ >= 5 122 123 #define ARCH_HAS_PREFETCH 124 static inline void prefetch(const void *ptr) 125 { 126 __asm__ __volatile__( 127 "pld\t%a0" 128 :: "p" (ptr)); 129 } 130 131 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) 132 #define ARCH_HAS_PREFETCHW 133 static inline void prefetchw(const void *ptr) 134 { 135 __asm__ __volatile__( 136 ".arch_extension mp\n" 137 __ALT_SMP_ASM( 138 WASM(pldw) "\t%a0", 139 WASM(pld) "\t%a0" 140 ) 141 :: "p" (ptr)); 142 } 143 #endif 144 #endif 145 146 #define HAVE_ARCH_PICK_MMAP_LAYOUT 147 148 #endif 149 150 #endif /* __ASM_ARM_PROCESSOR_H */ 151