xref: /openbmc/linux/include/linux/sched/task_stack.h (revision 9cc2fa4f)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
268db0cf1SIngo Molnar #ifndef _LINUX_SCHED_TASK_STACK_H
368db0cf1SIngo Molnar #define _LINUX_SCHED_TASK_STACK_H
468db0cf1SIngo Molnar 
5f3ac6067SIngo Molnar /*
6f3ac6067SIngo Molnar  * task->stack (kernel stack) handling interfaces:
7f3ac6067SIngo Molnar  */
8f3ac6067SIngo Molnar 
968db0cf1SIngo Molnar #include <linux/sched.h>
1050d34394SIngo Molnar #include <linux/magic.h>
1168db0cf1SIngo Molnar 
12f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
13f3ac6067SIngo Molnar 
14f3ac6067SIngo Molnar /*
15f3ac6067SIngo Molnar  * When accessing the stack of a non-current task that might exit, use
16f3ac6067SIngo Molnar  * try_get_task_stack() instead.  task_stack_page will return a pointer
17f3ac6067SIngo Molnar  * that could get freed out from under you.
18f3ac6067SIngo Molnar  */
19f3ac6067SIngo Molnar static inline void *task_stack_page(const struct task_struct *task)
20f3ac6067SIngo Molnar {
21f3ac6067SIngo Molnar 	return task->stack;
22f3ac6067SIngo Molnar }
23f3ac6067SIngo Molnar 
24f3ac6067SIngo Molnar #define setup_thread_stack(new,old)	do { } while(0)
25f3ac6067SIngo Molnar 
26f3ac6067SIngo Molnar static inline unsigned long *end_of_stack(const struct task_struct *task)
27f3ac6067SIngo Molnar {
28*9cc2fa4fSHelge Deller #ifdef CONFIG_STACK_GROWSUP
29*9cc2fa4fSHelge Deller 	return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
30*9cc2fa4fSHelge Deller #else
31f3ac6067SIngo Molnar 	return task->stack;
32*9cc2fa4fSHelge Deller #endif
33f3ac6067SIngo Molnar }
34f3ac6067SIngo Molnar 
35f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
36f3ac6067SIngo Molnar 
37f3ac6067SIngo Molnar #define task_stack_page(task)	((void *)(task)->stack)
38f3ac6067SIngo Molnar 
39f3ac6067SIngo Molnar static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
40f3ac6067SIngo Molnar {
41f3ac6067SIngo Molnar 	*task_thread_info(p) = *task_thread_info(org);
42f3ac6067SIngo Molnar 	task_thread_info(p)->task = p;
43f3ac6067SIngo Molnar }
44f3ac6067SIngo Molnar 
45f3ac6067SIngo Molnar /*
46f3ac6067SIngo Molnar  * Return the address of the last usable long on the stack.
47f3ac6067SIngo Molnar  *
48f3ac6067SIngo Molnar  * When the stack grows down, this is just above the thread
49f3ac6067SIngo Molnar  * info struct. Going any lower will corrupt the threadinfo.
50f3ac6067SIngo Molnar  *
51f3ac6067SIngo Molnar  * When the stack grows up, this is the highest address.
52f3ac6067SIngo Molnar  * Beyond that position, we corrupt data on the next page.
53f3ac6067SIngo Molnar  */
54f3ac6067SIngo Molnar static inline unsigned long *end_of_stack(struct task_struct *p)
55f3ac6067SIngo Molnar {
56f3ac6067SIngo Molnar #ifdef CONFIG_STACK_GROWSUP
57f3ac6067SIngo Molnar 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
58f3ac6067SIngo Molnar #else
59f3ac6067SIngo Molnar 	return (unsigned long *)(task_thread_info(p) + 1);
60f3ac6067SIngo Molnar #endif
61f3ac6067SIngo Molnar }
62f3ac6067SIngo Molnar 
63f3ac6067SIngo Molnar #endif
64f3ac6067SIngo Molnar 
65f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
66f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
67f3ac6067SIngo Molnar {
68f0b89d39SElena Reshetova 	return refcount_inc_not_zero(&tsk->stack_refcount) ?
69f3ac6067SIngo Molnar 		task_stack_page(tsk) : NULL;
70f3ac6067SIngo Molnar }
71f3ac6067SIngo Molnar 
72f3ac6067SIngo Molnar extern void put_task_stack(struct task_struct *tsk);
73f3ac6067SIngo Molnar #else
74f3ac6067SIngo Molnar static inline void *try_get_task_stack(struct task_struct *tsk)
75f3ac6067SIngo Molnar {
76f3ac6067SIngo Molnar 	return task_stack_page(tsk);
77f3ac6067SIngo Molnar }
78f3ac6067SIngo Molnar 
79f3ac6067SIngo Molnar static inline void put_task_stack(struct task_struct *tsk) {}
80f3ac6067SIngo Molnar #endif
81f3ac6067SIngo Molnar 
82f3ac6067SIngo Molnar #define task_stack_end_corrupted(task) \
83f3ac6067SIngo Molnar 		(*(end_of_stack(task)) != STACK_END_MAGIC)
84f3ac6067SIngo Molnar 
8500ef0ef2SSascha Hauer static inline int object_is_on_stack(const void *obj)
86f3ac6067SIngo Molnar {
87f3ac6067SIngo Molnar 	void *stack = task_stack_page(current);
88f3ac6067SIngo Molnar 
89f3ac6067SIngo Molnar 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
90f3ac6067SIngo Molnar }
91f3ac6067SIngo Molnar 
92f3ac6067SIngo Molnar extern void thread_stack_cache_init(void);
93f3ac6067SIngo Molnar 
94f3ac6067SIngo Molnar #ifdef CONFIG_DEBUG_STACK_USAGE
95f3ac6067SIngo Molnar static inline unsigned long stack_not_used(struct task_struct *p)
96f3ac6067SIngo Molnar {
97f3ac6067SIngo Molnar 	unsigned long *n = end_of_stack(p);
98f3ac6067SIngo Molnar 
99f3ac6067SIngo Molnar 	do { 	/* Skip over canary */
100f3ac6067SIngo Molnar # ifdef CONFIG_STACK_GROWSUP
101f3ac6067SIngo Molnar 		n--;
102f3ac6067SIngo Molnar # else
103f3ac6067SIngo Molnar 		n++;
104f3ac6067SIngo Molnar # endif
105f3ac6067SIngo Molnar 	} while (!*n);
106f3ac6067SIngo Molnar 
107f3ac6067SIngo Molnar # ifdef CONFIG_STACK_GROWSUP
108f3ac6067SIngo Molnar 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
109f3ac6067SIngo Molnar # else
110f3ac6067SIngo Molnar 	return (unsigned long)n - (unsigned long)end_of_stack(p);
111f3ac6067SIngo Molnar # endif
112f3ac6067SIngo Molnar }
113f3ac6067SIngo Molnar #endif
114f3ac6067SIngo Molnar extern void set_task_stack_end_magic(struct task_struct *tsk);
115f3ac6067SIngo Molnar 
1169049863aSIngo Molnar #ifndef __HAVE_ARCH_KSTACK_END
1179049863aSIngo Molnar static inline int kstack_end(void *addr)
1189049863aSIngo Molnar {
1199049863aSIngo Molnar 	/* Reliable end of stack detection:
1209049863aSIngo Molnar 	 * Some APM bios versions misalign the stack
1219049863aSIngo Molnar 	 */
1229049863aSIngo Molnar 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1239049863aSIngo Molnar }
1249049863aSIngo Molnar #endif
1259049863aSIngo Molnar 
12668db0cf1SIngo Molnar #endif /* _LINUX_SCHED_TASK_STACK_H */
127