xref: /openbmc/linux/include/linux/uaccess.h (revision a72594ca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/sched.h>
6 #include <linux/thread_info.h>
7 #include <linux/kasan-checks.h>
8 
9 #define VERIFY_READ 0
10 #define VERIFY_WRITE 1
11 
12 #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
13 
14 #include <asm/uaccess.h>
15 
16 /*
17  * Architectures should provide two primitives (raw_copy_{to,from}_user())
18  * and get rid of their private instances of copy_{to,from}_user() and
19  * __copy_{to,from}_user{,_inatomic}().
20  *
21  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
22  * return the amount left to copy.  They should assume that access_ok() has
23  * already been checked (and succeeded); they should *not* zero-pad anything.
24  * No KASAN or object size checks either - those belong here.
25  *
26  * Both of these functions should attempt to copy size bytes starting at from
27  * into the area starting at to.  They must not fetch or store anything
28  * outside of those areas.  Return value must be between 0 (everything
29  * copied successfully) and size (nothing copied).
30  *
31  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
32  * at to must become equal to the bytes fetched from the corresponding area
33  * starting at from.  All data past to + size - N must be left unmodified.
34  *
35  * If copying succeeds, the return value must be 0.  If some data cannot be
36  * fetched, it is permitted to copy less than had been fetched; the only
37  * hard requirement is that not storing anything at all (i.e. returning size)
38  * should happen only when nothing could be copied.  In other words, you don't
39  * have to squeeze as much as possible - it is allowed, but not necessary.
40  *
41  * For raw_copy_from_user() to always points to kernel memory and no faults
42  * on store should happen.  Interpretation of from is affected by set_fs().
43  * For raw_copy_to_user() it's the other way round.
44  *
45  * Both can be inlined - it's up to architectures whether it wants to bother
46  * with that.  They should not be used directly; they are used to implement
47  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
48  * that are used instead.  Out of those, __... ones are inlined.  Plain
49  * copy_{to,from}_user() might or might not be inlined.  If you want them
50  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
51  *
52  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
53  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
54  * at all; their callers absolutely must check the return value.
55  *
56  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
57  * but both source and destination are __user pointers (affected by set_fs()
58  * as usual) and both source and destination can trigger faults.
59  */
60 
61 static __always_inline unsigned long
62 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
63 {
64 	kasan_check_write(to, n);
65 	check_object_size(to, n, false);
66 	return raw_copy_from_user(to, from, n);
67 }
68 
69 static __always_inline unsigned long
70 __copy_from_user(void *to, const void __user *from, unsigned long n)
71 {
72 	might_fault();
73 	kasan_check_write(to, n);
74 	check_object_size(to, n, false);
75 	return raw_copy_from_user(to, from, n);
76 }
77 
78 /**
79  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
80  * @to:   Destination address, in user space.
81  * @from: Source address, in kernel space.
82  * @n:    Number of bytes to copy.
83  *
84  * Context: User context only.
85  *
86  * Copy data from kernel space to user space.  Caller must check
87  * the specified block with access_ok() before calling this function.
88  * The caller should also make sure he pins the user space address
89  * so that we don't result in page fault and sleep.
90  */
91 static __always_inline unsigned long
92 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
93 {
94 	kasan_check_read(from, n);
95 	check_object_size(from, n, true);
96 	return raw_copy_to_user(to, from, n);
97 }
98 
99 static __always_inline unsigned long
100 __copy_to_user(void __user *to, const void *from, unsigned long n)
101 {
102 	might_fault();
103 	kasan_check_read(from, n);
104 	check_object_size(from, n, true);
105 	return raw_copy_to_user(to, from, n);
106 }
107 
108 #ifdef INLINE_COPY_FROM_USER
109 static inline unsigned long
110 _copy_from_user(void *to, const void __user *from, unsigned long n)
111 {
112 	unsigned long res = n;
113 	might_fault();
114 	if (likely(access_ok(VERIFY_READ, from, n))) {
115 		kasan_check_write(to, n);
116 		res = raw_copy_from_user(to, from, n);
117 	}
118 	if (unlikely(res))
119 		memset(to + (n - res), 0, res);
120 	return res;
121 }
122 #else
123 extern unsigned long
124 _copy_from_user(void *, const void __user *, unsigned long);
125 #endif
126 
127 #ifdef INLINE_COPY_TO_USER
128 static inline unsigned long
129 _copy_to_user(void __user *to, const void *from, unsigned long n)
130 {
131 	might_fault();
132 	if (access_ok(VERIFY_WRITE, to, n)) {
133 		kasan_check_read(from, n);
134 		n = raw_copy_to_user(to, from, n);
135 	}
136 	return n;
137 }
138 #else
139 extern unsigned long
140 _copy_to_user(void __user *, const void *, unsigned long);
141 #endif
142 
143 static __always_inline unsigned long __must_check
144 copy_from_user(void *to, const void __user *from, unsigned long n)
145 {
146 	if (likely(check_copy_size(to, n, false)))
147 		n = _copy_from_user(to, from, n);
148 	return n;
149 }
150 
151 static __always_inline unsigned long __must_check
152 copy_to_user(void __user *to, const void *from, unsigned long n)
153 {
154 	if (likely(check_copy_size(from, n, true)))
155 		n = _copy_to_user(to, from, n);
156 	return n;
157 }
158 #ifdef CONFIG_COMPAT
159 static __always_inline unsigned long __must_check
160 copy_in_user(void __user *to, const void __user *from, unsigned long n)
161 {
162 	might_fault();
163 	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
164 		n = raw_copy_in_user(to, from, n);
165 	return n;
166 }
167 #endif
168 
169 static __always_inline void pagefault_disabled_inc(void)
170 {
171 	current->pagefault_disabled++;
172 }
173 
174 static __always_inline void pagefault_disabled_dec(void)
175 {
176 	current->pagefault_disabled--;
177 }
178 
179 /*
180  * These routines enable/disable the pagefault handler. If disabled, it will
181  * not take any locks and go straight to the fixup table.
182  *
183  * User access methods will not sleep when called from a pagefault_disabled()
184  * environment.
185  */
186 static inline void pagefault_disable(void)
187 {
188 	pagefault_disabled_inc();
189 	/*
190 	 * make sure to have issued the store before a pagefault
191 	 * can hit.
192 	 */
193 	barrier();
194 }
195 
196 static inline void pagefault_enable(void)
197 {
198 	/*
199 	 * make sure to issue those last loads/stores before enabling
200 	 * the pagefault handler again.
201 	 */
202 	barrier();
203 	pagefault_disabled_dec();
204 }
205 
206 /*
207  * Is the pagefault handler disabled? If so, user access methods will not sleep.
208  */
209 #define pagefault_disabled() (current->pagefault_disabled != 0)
210 
211 /*
212  * The pagefault handler is in general disabled by pagefault_disable() or
213  * when in irq context (via in_atomic()).
214  *
215  * This function should only be used by the fault handlers. Other users should
216  * stick to pagefault_disabled().
217  * Please NEVER use preempt_disable() to disable the fault handler. With
218  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
219  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
220  */
221 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
222 
223 #ifndef ARCH_HAS_NOCACHE_UACCESS
224 
225 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
226 				const void __user *from, unsigned long n)
227 {
228 	return __copy_from_user_inatomic(to, from, n);
229 }
230 
231 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
232 
233 /*
234  * probe_kernel_read(): safely attempt to read from a location
235  * @dst: pointer to the buffer that shall take the data
236  * @src: address to read from
237  * @size: size of the data chunk
238  *
239  * Safely read from address @src to the buffer at @dst.  If a kernel fault
240  * happens, handle that and return -EFAULT.
241  */
242 extern long probe_kernel_read(void *dst, const void *src, size_t size);
243 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
244 
245 /*
246  * probe_kernel_write(): safely attempt to write to a location
247  * @dst: address to write to
248  * @src: pointer to the data that shall be written
249  * @size: size of the data chunk
250  *
251  * Safely write to address @dst from the buffer at @src.  If a kernel fault
252  * happens, handle that and return -EFAULT.
253  */
254 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
255 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
256 
257 extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
258 
259 /**
260  * probe_kernel_address(): safely attempt to read from a location
261  * @addr: address to read from
262  * @retval: read into this variable
263  *
264  * Returns 0 on success, or -EFAULT.
265  */
266 #define probe_kernel_address(addr, retval)		\
267 	probe_kernel_read(&retval, addr, sizeof(retval))
268 
269 #ifndef user_access_begin
270 #define user_access_begin() do { } while (0)
271 #define user_access_end() do { } while (0)
272 #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
273 #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
274 #endif
275 
276 #endif		/* __LINUX_UACCESS_H__ */
277