1 #ifndef _ASM_X86_UACCESS_32_H 2 #define _ASM_X86_UACCESS_32_H 3 4 /* 5 * User space memory access functions 6 */ 7 #include <linux/errno.h> 8 #include <linux/thread_info.h> 9 #include <linux/string.h> 10 #include <asm/asm.h> 11 #include <asm/page.h> 12 13 unsigned long __must_check __copy_to_user_ll 14 (void __user *to, const void *from, unsigned long n); 15 unsigned long __must_check __copy_from_user_ll 16 (void *to, const void __user *from, unsigned long n); 17 unsigned long __must_check __copy_from_user_ll_nozero 18 (void *to, const void __user *from, unsigned long n); 19 unsigned long __must_check __copy_from_user_ll_nocache 20 (void *to, const void __user *from, unsigned long n); 21 unsigned long __must_check __copy_from_user_ll_nocache_nozero 22 (void *to, const void __user *from, unsigned long n); 23 24 /** 25 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 26 * @to: Destination address, in user space. 27 * @from: Source address, in kernel space. 28 * @n: Number of bytes to copy. 29 * 30 * Context: User context only. 31 * 32 * Copy data from kernel space to user space. Caller must check 33 * the specified block with access_ok() before calling this function. 34 * The caller should also make sure he pins the user space address 35 * so that we don't result in page fault and sleep. 36 */ 37 static __always_inline unsigned long __must_check 38 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 39 { 40 check_object_size(from, n, true); 41 return __copy_to_user_ll(to, from, n); 42 } 43 44 /** 45 * __copy_to_user: - Copy a block of data into user space, with less checking. 46 * @to: Destination address, in user space. 47 * @from: Source address, in kernel space. 48 * @n: Number of bytes to copy. 49 * 50 * Context: User context only. This function may sleep if pagefaults are 51 * enabled. 52 * 53 * Copy data from kernel space to user space. Caller must check 54 * the specified block with access_ok() before calling this function. 55 * 56 * Returns number of bytes that could not be copied. 57 * On success, this will be zero. 58 */ 59 static __always_inline unsigned long __must_check 60 __copy_to_user(void __user *to, const void *from, unsigned long n) 61 { 62 might_fault(); 63 return __copy_to_user_inatomic(to, from, n); 64 } 65 66 static __always_inline unsigned long 67 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 68 { 69 return __copy_from_user_ll_nozero(to, from, n); 70 } 71 72 /** 73 * __copy_from_user: - Copy a block of data from user space, with less checking. 74 * @to: Destination address, in kernel space. 75 * @from: Source address, in user space. 76 * @n: Number of bytes to copy. 77 * 78 * Context: User context only. This function may sleep if pagefaults are 79 * enabled. 80 * 81 * Copy data from user space to kernel space. Caller must check 82 * the specified block with access_ok() before calling this function. 83 * 84 * Returns number of bytes that could not be copied. 85 * On success, this will be zero. 86 * 87 * If some data could not be copied, this function will pad the copied 88 * data to the requested size using zero bytes. 89 * 90 * An alternate version - __copy_from_user_inatomic() - may be called from 91 * atomic context and will fail rather than sleep. In this case the 92 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h 93 * for explanation of why this is needed. 94 */ 95 static __always_inline unsigned long 96 __copy_from_user(void *to, const void __user *from, unsigned long n) 97 { 98 might_fault(); 99 check_object_size(to, n, false); 100 if (__builtin_constant_p(n)) { 101 unsigned long ret; 102 103 switch (n) { 104 case 1: 105 __uaccess_begin(); 106 __get_user_size(*(u8 *)to, from, 1, ret, 1); 107 __uaccess_end(); 108 return ret; 109 case 2: 110 __uaccess_begin(); 111 __get_user_size(*(u16 *)to, from, 2, ret, 2); 112 __uaccess_end(); 113 return ret; 114 case 4: 115 __uaccess_begin(); 116 __get_user_size(*(u32 *)to, from, 4, ret, 4); 117 __uaccess_end(); 118 return ret; 119 } 120 } 121 return __copy_from_user_ll(to, from, n); 122 } 123 124 static __always_inline unsigned long __copy_from_user_nocache(void *to, 125 const void __user *from, unsigned long n) 126 { 127 might_fault(); 128 if (__builtin_constant_p(n)) { 129 unsigned long ret; 130 131 switch (n) { 132 case 1: 133 __uaccess_begin(); 134 __get_user_size(*(u8 *)to, from, 1, ret, 1); 135 __uaccess_end(); 136 return ret; 137 case 2: 138 __uaccess_begin(); 139 __get_user_size(*(u16 *)to, from, 2, ret, 2); 140 __uaccess_end(); 141 return ret; 142 case 4: 143 __uaccess_begin(); 144 __get_user_size(*(u32 *)to, from, 4, ret, 4); 145 __uaccess_end(); 146 return ret; 147 } 148 } 149 return __copy_from_user_ll_nocache(to, from, n); 150 } 151 152 static __always_inline unsigned long 153 __copy_from_user_inatomic_nocache(void *to, const void __user *from, 154 unsigned long n) 155 { 156 return __copy_from_user_ll_nocache_nozero(to, from, n); 157 } 158 159 #endif /* _ASM_X86_UACCESS_32_H */ 160