1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_TASK_STACK_H 3 #define _LINUX_SCHED_TASK_STACK_H 4 5 /* 6 * task->stack (kernel stack) handling interfaces: 7 */ 8 9 #include <linux/sched.h> 10 #include <linux/magic.h> 11 12 #ifdef CONFIG_THREAD_INFO_IN_TASK 13 14 /* 15 * When accessing the stack of a non-current task that might exit, use 16 * try_get_task_stack() instead. task_stack_page will return a pointer 17 * that could get freed out from under you. 18 */ 19 static inline void *task_stack_page(const struct task_struct *task) 20 { 21 return task->stack; 22 } 23 24 #define setup_thread_stack(new,old) do { } while(0) 25 26 static inline unsigned long *end_of_stack(const struct task_struct *task) 27 { 28 return task->stack; 29 } 30 31 #elif !defined(__HAVE_THREAD_FUNCTIONS) 32 33 #define task_stack_page(task) ((void *)(task)->stack) 34 35 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 36 { 37 *task_thread_info(p) = *task_thread_info(org); 38 task_thread_info(p)->task = p; 39 } 40 41 /* 42 * Return the address of the last usable long on the stack. 43 * 44 * When the stack grows down, this is just above the thread 45 * info struct. Going any lower will corrupt the threadinfo. 46 * 47 * When the stack grows up, this is the highest address. 48 * Beyond that position, we corrupt data on the next page. 49 */ 50 static inline unsigned long *end_of_stack(struct task_struct *p) 51 { 52 #ifdef CONFIG_STACK_GROWSUP 53 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; 54 #else 55 return (unsigned long *)(task_thread_info(p) + 1); 56 #endif 57 } 58 59 #endif 60 61 #ifdef CONFIG_THREAD_INFO_IN_TASK 62 static inline void *try_get_task_stack(struct task_struct *tsk) 63 { 64 return refcount_inc_not_zero(&tsk->stack_refcount) ? 65 task_stack_page(tsk) : NULL; 66 } 67 68 extern void put_task_stack(struct task_struct *tsk); 69 #else 70 static inline void *try_get_task_stack(struct task_struct *tsk) 71 { 72 return task_stack_page(tsk); 73 } 74 75 static inline void put_task_stack(struct task_struct *tsk) {} 76 #endif 77 78 #define task_stack_end_corrupted(task) \ 79 (*(end_of_stack(task)) != STACK_END_MAGIC) 80 81 static inline int object_is_on_stack(const void *obj) 82 { 83 void *stack = task_stack_page(current); 84 85 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 86 } 87 88 extern void thread_stack_cache_init(void); 89 90 #ifdef CONFIG_DEBUG_STACK_USAGE 91 static inline unsigned long stack_not_used(struct task_struct *p) 92 { 93 unsigned long *n = end_of_stack(p); 94 95 do { /* Skip over canary */ 96 # ifdef CONFIG_STACK_GROWSUP 97 n--; 98 # else 99 n++; 100 # endif 101 } while (!*n); 102 103 # ifdef CONFIG_STACK_GROWSUP 104 return (unsigned long)end_of_stack(p) - (unsigned long)n; 105 # else 106 return (unsigned long)n - (unsigned long)end_of_stack(p); 107 # endif 108 } 109 #endif 110 extern void set_task_stack_end_magic(struct task_struct *tsk); 111 112 #ifndef __HAVE_ARCH_KSTACK_END 113 static inline int kstack_end(void *addr) 114 { 115 /* Reliable end of stack detection: 116 * Some APM bios versions misalign the stack 117 */ 118 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 119 } 120 #endif 121 122 #endif /* _LINUX_SCHED_TASK_STACK_H */ 123