1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PROCESSOR_H
7 #define _ASM_RISCV_PROCESSOR_H
8
9 #include <linux/const.h>
10 #include <linux/cache.h>
11
12 #include <vdso/processor.h>
13
14 #include <asm/ptrace.h>
15
16 #ifdef CONFIG_64BIT
17 #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
18 #define STACK_TOP_MAX TASK_SIZE
19
20 #define arch_get_mmap_end(addr, len, flags) \
21 ({ \
22 unsigned long mmap_end; \
23 typeof(addr) _addr = (addr); \
24 if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
25 mmap_end = STACK_TOP_MAX; \
26 else if ((_addr) >= VA_USER_SV57) \
27 mmap_end = STACK_TOP_MAX; \
28 else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
29 mmap_end = VA_USER_SV48; \
30 else \
31 mmap_end = VA_USER_SV39; \
32 mmap_end; \
33 })
34
35 #define arch_get_mmap_base(addr, base) \
36 ({ \
37 unsigned long mmap_base; \
38 typeof(addr) _addr = (addr); \
39 typeof(base) _base = (base); \
40 unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
41 if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
42 mmap_base = (_base); \
43 else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
44 mmap_base = VA_USER_SV57 - rnd_gap; \
45 else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
46 mmap_base = VA_USER_SV48 - rnd_gap; \
47 else \
48 mmap_base = VA_USER_SV39 - rnd_gap; \
49 mmap_base; \
50 })
51
52 #else
53 #define DEFAULT_MAP_WINDOW TASK_SIZE
54 #define STACK_TOP_MAX TASK_SIZE
55 #endif
56 #define STACK_ALIGN 16
57
58 #define STACK_TOP DEFAULT_MAP_WINDOW
59
60 /*
61 * This decides where the kernel will search for a free chunk of vm
62 * space during mmap's.
63 */
64 #ifdef CONFIG_64BIT
65 #define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
66 #else
67 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
68 #endif
69
70 #ifndef __ASSEMBLY__
71
72 struct task_struct;
73 struct pt_regs;
74
75 /* CPU-specific state of a task */
76 struct thread_struct {
77 /* Callee-saved registers */
78 unsigned long ra;
79 unsigned long sp; /* Kernel mode stack */
80 unsigned long s[12]; /* s[0]: frame pointer */
81 struct __riscv_d_ext_state fstate;
82 unsigned long bad_cause;
83 unsigned long vstate_ctrl;
84 struct __riscv_v_ext_state vstate;
85 };
86
87 /* Whitelist the fstate from the task_struct for hardened usercopy */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)88 static inline void arch_thread_struct_whitelist(unsigned long *offset,
89 unsigned long *size)
90 {
91 *offset = offsetof(struct thread_struct, fstate);
92 *size = sizeof_field(struct thread_struct, fstate);
93 }
94
95 #define INIT_THREAD { \
96 .sp = sizeof(init_stack) + (long)&init_stack, \
97 }
98
99 #define task_pt_regs(tsk) \
100 ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
101 - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
102
103 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
104 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
105
106
107 /* Do necessary setup to start up a newly executed thread. */
108 extern void start_thread(struct pt_regs *regs,
109 unsigned long pc, unsigned long sp);
110
111 extern unsigned long __get_wchan(struct task_struct *p);
112
113
wait_for_interrupt(void)114 static inline void wait_for_interrupt(void)
115 {
116 __asm__ __volatile__ ("wfi");
117 }
118
119 struct device_node;
120 int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
121 int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
122 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
123
124 extern void riscv_fill_hwcap(void);
125 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
126
127 extern unsigned long signal_minsigstksz __ro_after_init;
128
129 #ifdef CONFIG_RISCV_ISA_V
130 /* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
131 #define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg)
132 #define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current()
133 extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
134 extern long riscv_v_vstate_ctrl_get_current(void);
135 #endif /* CONFIG_RISCV_ISA_V */
136
137 #endif /* __ASSEMBLY__ */
138
139 #endif /* _ASM_RISCV_PROCESSOR_H */
140