xref: /openbmc/linux/include/linux/sched/task_stack.h (revision 00ef0ef2)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
268db0cf1SIngo Molnar #ifndef _LINUX_SCHED_TASK_STACK_H
368db0cf1SIngo Molnar #define _LINUX_SCHED_TASK_STACK_H
468db0cf1SIngo Molnar 
5f3ac6067SIngo Molnar /*
6f3ac6067SIngo Molnar  * task->stack (kernel stack) handling interfaces:
7f3ac6067SIngo Molnar  */
8f3ac6067SIngo Molnar 
968db0cf1SIngo Molnar #include <linux/sched.h>
1050d34394SIngo Molnar #include <linux/magic.h>
1168db0cf1SIngo Molnar 
12f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
13f3ac6067SIngo Molnar 
14f3ac6067SIngo Molnar /*
15f3ac6067SIngo Molnar  * When accessing the stack of a non-current task that might exit, use
16f3ac6067SIngo Molnar  * try_get_task_stack() instead.  task_stack_page will return a pointer
17f3ac6067SIngo Molnar  * that could get freed out from under you.
18f3ac6067SIngo Molnar  */
19f3ac6067SIngo Molnar static inline void *task_stack_page(const struct task_struct *task)
20f3ac6067SIngo Molnar {
21f3ac6067SIngo Molnar 	return task->stack;
22f3ac6067SIngo Molnar }
23f3ac6067SIngo Molnar 
24f3ac6067SIngo Molnar #define setup_thread_stack(new,old)	do { } while(0)
25f3ac6067SIngo Molnar 
26f3ac6067SIngo Molnar static inline unsigned long *end_of_stack(const struct task_struct *task)
27f3ac6067SIngo Molnar {
28f3ac6067SIngo Molnar 	return task->stack;
29f3ac6067SIngo Molnar }
30f3ac6067SIngo Molnar 
31f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
32f3ac6067SIngo Molnar 
33f3ac6067SIngo Molnar #define task_stack_page(task)	((void *)(task)->stack)
34f3ac6067SIngo Molnar 
35f3ac6067SIngo Molnar static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
36f3ac6067SIngo Molnar {
37f3ac6067SIngo Molnar 	*task_thread_info(p) = *task_thread_info(org);
38f3ac6067SIngo Molnar 	task_thread_info(p)->task = p;
39f3ac6067SIngo Molnar }
40f3ac6067SIngo Molnar 
41f3ac6067SIngo Molnar /*
42f3ac6067SIngo Molnar  * Return the address of the last usable long on the stack.
43f3ac6067SIngo Molnar  *
44f3ac6067SIngo Molnar  * When the stack grows down, this is just above the thread
45f3ac6067SIngo Molnar  * info struct. Going any lower will corrupt the threadinfo.
46f3ac6067SIngo Molnar  *
47f3ac6067SIngo Molnar  * When the stack grows up, this is the highest address.
48f3ac6067SIngo Molnar  * Beyond that position, we corrupt data on the next page.
49f3ac6067SIngo Molnar  */
50f3ac6067SIngo Molnar static inline unsigned long *end_of_stack(struct task_struct *p)
51f3ac6067SIngo Molnar {
52f3ac6067SIngo Molnar #ifdef CONFIG_STACK_GROWSUP
53f3ac6067SIngo Molnar 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
54f3ac6067SIngo Molnar #else
55f3ac6067SIngo Molnar 	return (unsigned long *)(task_thread_info(p) + 1);
56f3ac6067SIngo Molnar #endif
57f3ac6067SIngo Molnar }
58f3ac6067SIngo Molnar 
59f3ac6067SIngo Molnar #endif
60f3ac6067SIngo Molnar 
61f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
62f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
63f3ac6067SIngo Molnar {
64f3ac6067SIngo Molnar 	return atomic_inc_not_zero(&tsk->stack_refcount) ?
65f3ac6067SIngo Molnar 		task_stack_page(tsk) : NULL;
66f3ac6067SIngo Molnar }
67f3ac6067SIngo Molnar 
68f3ac6067SIngo Molnar extern void put_task_stack(struct task_struct *tsk);
69f3ac6067SIngo Molnar #else
70f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
71f3ac6067SIngo Molnar {
72f3ac6067SIngo Molnar 	return task_stack_page(tsk);
73f3ac6067SIngo Molnar }
74f3ac6067SIngo Molnar 
75f3ac6067SIngo Molnar static inline void put_task_stack(struct task_struct *tsk) {}
76f3ac6067SIngo Molnar #endif
77f3ac6067SIngo Molnar 
78f3ac6067SIngo Molnar #define task_stack_end_corrupted(task) \
79f3ac6067SIngo Molnar 		(*(end_of_stack(task)) != STACK_END_MAGIC)
80f3ac6067SIngo Molnar 
8100ef0ef2SSascha Hauer static inline int object_is_on_stack(const void *obj)
82f3ac6067SIngo Molnar {
83f3ac6067SIngo Molnar 	void *stack = task_stack_page(current);
84f3ac6067SIngo Molnar 
85f3ac6067SIngo Molnar 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
86f3ac6067SIngo Molnar }
87f3ac6067SIngo Molnar 
88f3ac6067SIngo Molnar extern void thread_stack_cache_init(void);
89f3ac6067SIngo Molnar 
90f3ac6067SIngo Molnar #ifdef CONFIG_DEBUG_STACK_USAGE
91f3ac6067SIngo Molnar static inline unsigned long stack_not_used(struct task_struct *p)
92f3ac6067SIngo Molnar {
93f3ac6067SIngo Molnar 	unsigned long *n = end_of_stack(p);
94f3ac6067SIngo Molnar 
95f3ac6067SIngo Molnar 	do { 	/* Skip over canary */
96f3ac6067SIngo Molnar # ifdef CONFIG_STACK_GROWSUP
97f3ac6067SIngo Molnar 		n--;
98f3ac6067SIngo Molnar # else
99f3ac6067SIngo Molnar 		n++;
100f3ac6067SIngo Molnar # endif
101f3ac6067SIngo Molnar 	} while (!*n);
102f3ac6067SIngo Molnar 
103f3ac6067SIngo Molnar # ifdef CONFIG_STACK_GROWSUP
104f3ac6067SIngo Molnar 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
105f3ac6067SIngo Molnar # else
106f3ac6067SIngo Molnar 	return (unsigned long)n - (unsigned long)end_of_stack(p);
107f3ac6067SIngo Molnar # endif
108f3ac6067SIngo Molnar }
109f3ac6067SIngo Molnar #endif
110f3ac6067SIngo Molnar extern void set_task_stack_end_magic(struct task_struct *tsk);
111f3ac6067SIngo Molnar 
1129049863aSIngo Molnar #ifndef __HAVE_ARCH_KSTACK_END
1139049863aSIngo Molnar static inline int kstack_end(void *addr)
1149049863aSIngo Molnar {
1159049863aSIngo Molnar 	/* Reliable end of stack detection:
1169049863aSIngo Molnar 	 * Some APM bios versions misalign the stack
1179049863aSIngo Molnar 	 */
1189049863aSIngo Molnar 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1199049863aSIngo Molnar }
1209049863aSIngo Molnar #endif
1219049863aSIngo Molnar 
12268db0cf1SIngo Molnar #endif /* _LINUX_SCHED_TASK_STACK_H */
123