1 /* 2 * Process/processor support for the Hexagon architecture 3 * 4 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA. 19 */ 20 21 #ifndef _ASM_PROCESSOR_H 22 #define _ASM_PROCESSOR_H 23 24 #ifndef __ASSEMBLY__ 25 26 #include <asm/mem-layout.h> 27 #include <asm/registers.h> 28 #include <asm/hexagon_vm.h> 29 30 /* must be a macro */ 31 #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 32 33 /* task_struct, defined elsewhere, is the "process descriptor" */ 34 struct task_struct; 35 36 extern void start_thread(struct pt_regs *, unsigned long, unsigned long); 37 38 /* 39 * thread_struct is supposed to be for context switch data. 40 * Specifically, to hold the state necessary to perform switch_to... 41 */ 42 struct thread_struct { 43 void *switch_sp; 44 }; 45 46 /* 47 * initializes thread_struct 48 * The only thing we have in there is switch_sp 49 * which doesn't really need to be initialized. 50 */ 51 52 #define INIT_THREAD { \ 53 } 54 55 #define cpu_relax() __vmyield() 56 57 /* 58 * Decides where the kernel will search for a free chunk of vm space during 59 * mmaps. 60 * See also arch_get_unmapped_area. 61 * Doesn't affect if you have MAX_FIXED in the page flags set though... 62 * 63 * Apparently the convention is that ld.so will ask for "unmapped" private 64 * memory to be allocated SOMEWHERE, but it also asks for memory explicitly 65 * via MAP_FIXED at the lower * addresses starting at VA=0x0. 66 * 67 * If the two requests collide, you get authentic segfaulting action, so 68 * you have to kick the "unmapped" base requests higher up. 69 */ 70 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3)) 71 72 73 #define task_pt_regs(task) \ 74 ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1) 75 76 #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) 77 #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) 78 79 /* Free all resources held by a thread; defined in process.c */ 80 extern void release_thread(struct task_struct *dead_task); 81 82 /* Get wait channel for task P. */ 83 extern unsigned long get_wchan(struct task_struct *p); 84 85 /* The following stuff is pretty HEXAGON specific. */ 86 87 /* This is really just here for __switch_to. 88 Offsets are pulled via asm-offsets.c */ 89 90 /* 91 * No real reason why VM and native switch stacks should be different. 92 * Ultimately this should merge. Note that Rev C. ABI called out only 93 * R24-27 as callee saved GPRs needing explicit attention (R29-31 being 94 * dealt with automagically by allocframe), but the current ABI has 95 * more, R16-R27. By saving more, the worst case is that we waste some 96 * cycles if building with the old compilers. 97 */ 98 99 struct hexagon_switch_stack { 100 union { 101 struct { 102 unsigned long r16; 103 unsigned long r17; 104 }; 105 unsigned long long r1716; 106 }; 107 union { 108 struct { 109 unsigned long r18; 110 unsigned long r19; 111 }; 112 unsigned long long r1918; 113 }; 114 union { 115 struct { 116 unsigned long r20; 117 unsigned long r21; 118 }; 119 unsigned long long r2120; 120 }; 121 union { 122 struct { 123 unsigned long r22; 124 unsigned long r23; 125 }; 126 unsigned long long r2322; 127 }; 128 union { 129 struct { 130 unsigned long r24; 131 unsigned long r25; 132 }; 133 unsigned long long r2524; 134 }; 135 union { 136 struct { 137 unsigned long r26; 138 unsigned long r27; 139 }; 140 unsigned long long r2726; 141 }; 142 143 unsigned long fp; 144 unsigned long lr; 145 }; 146 147 #endif /* !__ASSEMBLY__ */ 148 149 #endif 150