1 /* 2 * arch/arm/include/asm/processor.h 3 * 4 * Copyright (C) 1995-1999 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifndef __ASM_ARM_PROCESSOR_H 12 #define __ASM_ARM_PROCESSOR_H 13 14 /* 15 * Default implementation of macro that returns current 16 * instruction pointer ("program counter"). 17 */ 18 #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 19 20 #ifdef __KERNEL__ 21 22 #include <asm/hw_breakpoint.h> 23 #include <asm/ptrace.h> 24 #include <asm/types.h> 25 #include <asm/unified.h> 26 27 #ifdef __KERNEL__ 28 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ 29 TASK_SIZE : TASK_SIZE_26) 30 #define STACK_TOP_MAX TASK_SIZE 31 #endif 32 33 struct debug_info { 34 #ifdef CONFIG_HAVE_HW_BREAKPOINT 35 struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; 36 #endif 37 }; 38 39 struct thread_struct { 40 /* fault info */ 41 unsigned long address; 42 unsigned long trap_no; 43 unsigned long error_code; 44 /* debugging */ 45 struct debug_info debug; 46 }; 47 48 /* 49 * Everything usercopied to/from thread_struct is statically-sized, so 50 * no hardened usercopy whitelist is needed. 51 */ 52 static inline void arch_thread_struct_whitelist(unsigned long *offset, 53 unsigned long *size) 54 { 55 *offset = *size = 0; 56 } 57 58 #define INIT_THREAD { } 59 60 #define start_thread(regs,pc,sp) \ 61 ({ \ 62 unsigned long r7, r8, r9; \ 63 \ 64 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \ 65 r7 = regs->ARM_r7; \ 66 r8 = regs->ARM_r8; \ 67 r9 = regs->ARM_r9; \ 68 } \ 69 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 70 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \ 71 current->personality & FDPIC_FUNCPTRS) { \ 72 regs->ARM_r7 = r7; \ 73 regs->ARM_r8 = r8; \ 74 regs->ARM_r9 = r9; \ 75 regs->ARM_r10 = current->mm->start_data; \ 76 } else if (!IS_ENABLED(CONFIG_MMU)) \ 77 regs->ARM_r10 = current->mm->start_data; \ 78 if (current->personality & ADDR_LIMIT_32BIT) \ 79 regs->ARM_cpsr = USR_MODE; \ 80 else \ 81 regs->ARM_cpsr = USR26_MODE; \ 82 if (elf_hwcap & HWCAP_THUMB && pc & 1) \ 83 regs->ARM_cpsr |= PSR_T_BIT; \ 84 regs->ARM_cpsr |= PSR_ENDSTATE; \ 85 regs->ARM_pc = pc & ~1; /* pc */ \ 86 regs->ARM_sp = sp; /* sp */ \ 87 }) 88 89 /* Forward declaration, a strange C thing */ 90 struct task_struct; 91 92 /* Free all resources held by a thread. */ 93 extern void release_thread(struct task_struct *); 94 95 unsigned long get_wchan(struct task_struct *p); 96 97 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) 98 #define cpu_relax() smp_mb() 99 #else 100 #define cpu_relax() barrier() 101 #endif 102 103 #define task_pt_regs(p) \ 104 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 105 106 #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc 107 #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp 108 109 #ifdef CONFIG_SMP 110 #define __ALT_SMP_ASM(smp, up) \ 111 "9998: " smp "\n" \ 112 " .pushsection \".alt.smp.init\", \"a\"\n" \ 113 " .long 9998b\n" \ 114 " " up "\n" \ 115 " .popsection\n" 116 #else 117 #define __ALT_SMP_ASM(smp, up) up 118 #endif 119 120 /* 121 * Prefetching support - only ARMv5. 122 */ 123 #if __LINUX_ARM_ARCH__ >= 5 124 125 #define ARCH_HAS_PREFETCH 126 static inline void prefetch(const void *ptr) 127 { 128 __asm__ __volatile__( 129 "pld\t%a0" 130 :: "p" (ptr)); 131 } 132 133 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) 134 #define ARCH_HAS_PREFETCHW 135 static inline void prefetchw(const void *ptr) 136 { 137 __asm__ __volatile__( 138 ".arch_extension mp\n" 139 __ALT_SMP_ASM( 140 WASM(pldw) "\t%a0", 141 WASM(pld) "\t%a0" 142 ) 143 :: "p" (ptr)); 144 } 145 #endif 146 #endif 147 148 #define HAVE_ARCH_PICK_MMAP_LAYOUT 149 150 #endif 151 152 #endif /* __ASM_ARM_PROCESSOR_H */ 153