xref: /openbmc/linux/include/linux/uaccess.h (revision 2d96b44f)
1 #ifndef __LINUX_UACCESS_H__
2 #define __LINUX_UACCESS_H__
3 
4 #include <linux/sched.h>
5 #include <asm/uaccess.h>
6 
7 static __always_inline void pagefault_disabled_inc(void)
8 {
9 	current->pagefault_disabled++;
10 }
11 
12 static __always_inline void pagefault_disabled_dec(void)
13 {
14 	current->pagefault_disabled--;
15 	WARN_ON(current->pagefault_disabled < 0);
16 }
17 
18 /*
19  * These routines enable/disable the pagefault handler. If disabled, it will
20  * not take any locks and go straight to the fixup table.
21  *
22  * User access methods will not sleep when called from a pagefault_disabled()
23  * environment.
24  */
25 static inline void pagefault_disable(void)
26 {
27 	pagefault_disabled_inc();
28 	/*
29 	 * make sure to have issued the store before a pagefault
30 	 * can hit.
31 	 */
32 	barrier();
33 }
34 
35 static inline void pagefault_enable(void)
36 {
37 	/*
38 	 * make sure to issue those last loads/stores before enabling
39 	 * the pagefault handler again.
40 	 */
41 	barrier();
42 	pagefault_disabled_dec();
43 }
44 
45 /*
46  * Is the pagefault handler disabled? If so, user access methods will not sleep.
47  */
48 #define pagefault_disabled() (current->pagefault_disabled != 0)
49 
50 /*
51  * The pagefault handler is in general disabled by pagefault_disable() or
52  * when in irq context (via in_atomic()).
53  *
54  * This function should only be used by the fault handlers. Other users should
55  * stick to pagefault_disabled().
56  * Please NEVER use preempt_disable() to disable the fault handler. With
57  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
58  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
59  */
60 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
61 
62 #ifndef ARCH_HAS_NOCACHE_UACCESS
63 
64 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
65 				const void __user *from, unsigned long n)
66 {
67 	return __copy_from_user_inatomic(to, from, n);
68 }
69 
70 static inline unsigned long __copy_from_user_nocache(void *to,
71 				const void __user *from, unsigned long n)
72 {
73 	return __copy_from_user(to, from, n);
74 }
75 
76 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
77 
78 /*
79  * probe_kernel_read(): safely attempt to read from a location
80  * @dst: pointer to the buffer that shall take the data
81  * @src: address to read from
82  * @size: size of the data chunk
83  *
84  * Safely read from address @src to the buffer at @dst.  If a kernel fault
85  * happens, handle that and return -EFAULT.
86  */
87 extern long probe_kernel_read(void *dst, const void *src, size_t size);
88 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
89 
90 /*
91  * probe_kernel_write(): safely attempt to write to a location
92  * @dst: address to write to
93  * @src: pointer to the data that shall be written
94  * @size: size of the data chunk
95  *
96  * Safely write to address @dst from the buffer at @src.  If a kernel fault
97  * happens, handle that and return -EFAULT.
98  */
99 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
100 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
101 
102 extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
103 
104 /**
105  * probe_kernel_address(): safely attempt to read from a location
106  * @addr: address to read from
107  * @retval: read into this variable
108  *
109  * Returns 0 on success, or -EFAULT.
110  */
111 #define probe_kernel_address(addr, retval)		\
112 	probe_kernel_read(&retval, addr, sizeof(retval))
113 
114 #ifndef user_access_begin
115 #define user_access_begin() do { } while (0)
116 #define user_access_end() do { } while (0)
117 #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
118 #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
119 #endif
120 
121 #endif		/* __LINUX_UACCESS_H__ */
122