xref: /openbmc/linux/arch/x86/include/asm/uaccess_32.h (revision 23c2b932)
1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 
13 unsigned long __must_check __copy_to_user_ll
14 		(void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 		(void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 		(void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 		(void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 		(void *to, const void __user *from, unsigned long n);
23 
24 /**
25  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26  * @to:   Destination address, in user space.
27  * @from: Source address, in kernel space.
28  * @n:    Number of bytes to copy.
29  *
30  * Context: User context only.
31  *
32  * Copy data from kernel space to user space.  Caller must check
33  * the specified block with access_ok() before calling this function.
34  * The caller should also make sure he pins the user space address
35  * so that we don't result in page fault and sleep.
36  */
37 static __always_inline unsigned long __must_check
38 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
39 {
40 	return __copy_to_user_ll(to, from, n);
41 }
42 
43 /**
44  * __copy_to_user: - Copy a block of data into user space, with less checking.
45  * @to:   Destination address, in user space.
46  * @from: Source address, in kernel space.
47  * @n:    Number of bytes to copy.
48  *
49  * Context: User context only. This function may sleep if pagefaults are
50  *          enabled.
51  *
52  * Copy data from kernel space to user space.  Caller must check
53  * the specified block with access_ok() before calling this function.
54  *
55  * Returns number of bytes that could not be copied.
56  * On success, this will be zero.
57  */
58 static __always_inline unsigned long __must_check
59 __copy_to_user(void __user *to, const void *from, unsigned long n)
60 {
61 	might_fault();
62 	return __copy_to_user_inatomic(to, from, n);
63 }
64 
65 static __always_inline unsigned long
66 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
67 {
68 	return __copy_from_user_ll_nozero(to, from, n);
69 }
70 
71 /**
72  * __copy_from_user: - Copy a block of data from user space, with less checking.
73  * @to:   Destination address, in kernel space.
74  * @from: Source address, in user space.
75  * @n:    Number of bytes to copy.
76  *
77  * Context: User context only. This function may sleep if pagefaults are
78  *          enabled.
79  *
80  * Copy data from user space to kernel space.  Caller must check
81  * the specified block with access_ok() before calling this function.
82  *
83  * Returns number of bytes that could not be copied.
84  * On success, this will be zero.
85  *
86  * If some data could not be copied, this function will pad the copied
87  * data to the requested size using zero bytes.
88  *
89  * An alternate version - __copy_from_user_inatomic() - may be called from
90  * atomic context and will fail rather than sleep.  In this case the
91  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
92  * for explanation of why this is needed.
93  */
94 static __always_inline unsigned long
95 __copy_from_user(void *to, const void __user *from, unsigned long n)
96 {
97 	might_fault();
98 	if (__builtin_constant_p(n)) {
99 		unsigned long ret;
100 
101 		switch (n) {
102 		case 1:
103 			__uaccess_begin();
104 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
105 			__uaccess_end();
106 			return ret;
107 		case 2:
108 			__uaccess_begin();
109 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
110 			__uaccess_end();
111 			return ret;
112 		case 4:
113 			__uaccess_begin();
114 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
115 			__uaccess_end();
116 			return ret;
117 		}
118 	}
119 	return __copy_from_user_ll(to, from, n);
120 }
121 
122 static __always_inline unsigned long __copy_from_user_nocache(void *to,
123 				const void __user *from, unsigned long n)
124 {
125 	might_fault();
126 	if (__builtin_constant_p(n)) {
127 		unsigned long ret;
128 
129 		switch (n) {
130 		case 1:
131 			__uaccess_begin();
132 			__get_user_size(*(u8 *)to, from, 1, ret, 1);
133 			__uaccess_end();
134 			return ret;
135 		case 2:
136 			__uaccess_begin();
137 			__get_user_size(*(u16 *)to, from, 2, ret, 2);
138 			__uaccess_end();
139 			return ret;
140 		case 4:
141 			__uaccess_begin();
142 			__get_user_size(*(u32 *)to, from, 4, ret, 4);
143 			__uaccess_end();
144 			return ret;
145 		}
146 	}
147 	return __copy_from_user_ll_nocache(to, from, n);
148 }
149 
150 static __always_inline unsigned long
151 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
152 				  unsigned long n)
153 {
154        return __copy_from_user_ll_nocache_nozero(to, from, n);
155 }
156 
157 #endif /* _ASM_X86_UACCESS_32_H */
158