xref: /openbmc/linux/arch/x86/include/asm/uaccess_32.h (revision 2596e07a)
1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 
13 unsigned long __must_check __copy_to_user_ll
14 		(void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 		(void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 		(void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 		(void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 		(void *to, const void __user *from, unsigned long n);
23 
24 /**
25  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26  * @to:   Destination address, in user space.
27  * @from: Source address, in kernel space.
28  * @n:    Number of bytes to copy.
29  *
30  * Context: User context only.
31  *
32  * Copy data from kernel space to user space.  Caller must check
33  * the specified block with access_ok() before calling this function.
34  * The caller should also make sure he pins the user space address
35  * so that we don't result in page fault and sleep.
36  *
37  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
38  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39  * If a store crosses a page boundary and gets a fault, the x86 will not write
40  * anything, so this is accurate.
41  */
42 
43 static __always_inline unsigned long __must_check
44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45 {
46 	if (__builtin_constant_p(n)) {
47 		unsigned long ret;
48 
49 		switch (n) {
50 		case 1:
51 			__uaccess_begin();
52 			__put_user_size(*(u8 *)from, (u8 __user *)to,
53 					1, ret, 1);
54 			__uaccess_end();
55 			return ret;
56 		case 2:
57 			__uaccess_begin();
58 			__put_user_size(*(u16 *)from, (u16 __user *)to,
59 					2, ret, 2);
60 			__uaccess_end();
61 			return ret;
62 		case 4:
63 			__uaccess_begin();
64 			__put_user_size(*(u32 *)from, (u32 __user *)to,
65 					4, ret, 4);
66 			__uaccess_end();
67 			return ret;
68 		case 8:
69 			__uaccess_begin();
70 			__put_user_size(*(u64 *)from, (u64 __user *)to,
71 					8, ret, 8);
72 			__uaccess_end();
73 			return ret;
74 		}
75 	}
76 	return __copy_to_user_ll(to, from, n);
77 }
78 
79 /**
80  * __copy_to_user: - Copy a block of data into user space, with less checking.
81  * @to:   Destination address, in user space.
82  * @from: Source address, in kernel space.
83  * @n:    Number of bytes to copy.
84  *
85  * Context: User context only. This function may sleep if pagefaults are
86  *          enabled.
87  *
88  * Copy data from kernel space to user space.  Caller must check
89  * the specified block with access_ok() before calling this function.
90  *
91  * Returns number of bytes that could not be copied.
92  * On success, this will be zero.
93  */
94 static __always_inline unsigned long __must_check
95 __copy_to_user(void __user *to, const void *from, unsigned long n)
96 {
97 	might_fault();
98 	return __copy_to_user_inatomic(to, from, n);
99 }
100 
101 static __always_inline unsigned long
102 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
103 {
104 	/* Avoid zeroing the tail if the copy fails..
105 	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
106 	 * but as the zeroing behaviour is only significant when n is not
107 	 * constant, that shouldn't be a problem.
108 	 */
109 	if (__builtin_constant_p(n)) {
110 		unsigned long ret;
111 
112 		switch (n) {
113 		case 1:
114 			__uaccess_begin();
115 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
116 			__uaccess_end();
117 			return ret;
118 		case 2:
119 			__uaccess_begin();
120 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
121 			__uaccess_end();
122 			return ret;
123 		case 4:
124 			__uaccess_begin();
125 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
126 			__uaccess_end();
127 			return ret;
128 		}
129 	}
130 	return __copy_from_user_ll_nozero(to, from, n);
131 }
132 
133 /**
134  * __copy_from_user: - Copy a block of data from user space, with less checking.
135  * @to:   Destination address, in kernel space.
136  * @from: Source address, in user space.
137  * @n:    Number of bytes to copy.
138  *
139  * Context: User context only. This function may sleep if pagefaults are
140  *          enabled.
141  *
142  * Copy data from user space to kernel space.  Caller must check
143  * the specified block with access_ok() before calling this function.
144  *
145  * Returns number of bytes that could not be copied.
146  * On success, this will be zero.
147  *
148  * If some data could not be copied, this function will pad the copied
149  * data to the requested size using zero bytes.
150  *
151  * An alternate version - __copy_from_user_inatomic() - may be called from
152  * atomic context and will fail rather than sleep.  In this case the
153  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
154  * for explanation of why this is needed.
155  */
156 static __always_inline unsigned long
157 __copy_from_user(void *to, const void __user *from, unsigned long n)
158 {
159 	might_fault();
160 	if (__builtin_constant_p(n)) {
161 		unsigned long ret;
162 
163 		switch (n) {
164 		case 1:
165 			__uaccess_begin();
166 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
167 			__uaccess_end();
168 			return ret;
169 		case 2:
170 			__uaccess_begin();
171 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
172 			__uaccess_end();
173 			return ret;
174 		case 4:
175 			__uaccess_begin();
176 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
177 			__uaccess_end();
178 			return ret;
179 		}
180 	}
181 	return __copy_from_user_ll(to, from, n);
182 }
183 
184 static __always_inline unsigned long __copy_from_user_nocache(void *to,
185 				const void __user *from, unsigned long n)
186 {
187 	might_fault();
188 	if (__builtin_constant_p(n)) {
189 		unsigned long ret;
190 
191 		switch (n) {
192 		case 1:
193 			__uaccess_begin();
194 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
195 			__uaccess_end();
196 			return ret;
197 		case 2:
198 			__uaccess_begin();
199 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
200 			__uaccess_end();
201 			return ret;
202 		case 4:
203 			__uaccess_begin();
204 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
205 			__uaccess_end();
206 			return ret;
207 		}
208 	}
209 	return __copy_from_user_ll_nocache(to, from, n);
210 }
211 
212 static __always_inline unsigned long
213 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
214 				  unsigned long n)
215 {
216        return __copy_from_user_ll_nocache_nozero(to, from, n);
217 }
218 
219 #endif /* _ASM_X86_UACCESS_32_H */
220