1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com), 5 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * Derived from "include/asm-i386/uaccess.h" 8 */ 9 #ifndef __S390_UACCESS_H 10 #define __S390_UACCESS_H 11 12 /* 13 * User space memory access functions 14 */ 15 #include <linux/sched.h> 16 #include <linux/errno.h> 17 #include <asm/ctl_reg.h> 18 19 #define VERIFY_READ 0 20 #define VERIFY_WRITE 1 21 22 23 /* 24 * The fs value determines whether argument validity checking should be 25 * performed or not. If get_fs() == USER_DS, checking is performed, with 26 * get_fs() == KERNEL_DS, checking is bypassed. 27 * 28 * For historical reasons, these macros are grossly misnamed. 29 */ 30 31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) 32 33 34 #define KERNEL_DS MAKE_MM_SEG(0) 35 #define USER_DS MAKE_MM_SEG(1) 36 37 #define get_ds() (KERNEL_DS) 38 #define get_fs() (current->thread.mm_segment) 39 40 #define set_fs(x) \ 41 ({ \ 42 unsigned long __pto; \ 43 current->thread.mm_segment = (x); \ 44 __pto = current->thread.mm_segment.ar4 ? \ 45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 46 __ctl_load(__pto, 7, 7); \ 47 }) 48 49 #define segment_eq(a,b) ((a).ar4 == (b).ar4) 50 51 static inline int __range_ok(unsigned long addr, unsigned long size) 52 { 53 return 1; 54 } 55 56 #define __access_ok(addr, size) \ 57 ({ \ 58 __chk_user_ptr(addr); \ 59 __range_ok((unsigned long)(addr), (size)); \ 60 }) 61 62 #define access_ok(type, addr, size) __access_ok(addr, size) 63 64 /* 65 * The exception table consists of pairs of addresses: the first is the 66 * address of an instruction that is allowed to fault, and the second is 67 * the address at which the program should continue. No registers are 68 * modified, so it is entirely up to the continuation code to figure out 69 * what to do. 70 * 71 * All the routines below use bits of fixup code that are out of line 72 * with the main instruction path. This means when everything is well, 73 * we don't even have to jump over them. Further, they do not intrude 74 * on our cache or tlb entries. 75 */ 76 77 struct exception_table_entry 78 { 79 int insn, fixup; 80 }; 81 82 static inline unsigned long extable_insn(const struct exception_table_entry *x) 83 { 84 return (unsigned long)&x->insn + x->insn; 85 } 86 87 static inline unsigned long extable_fixup(const struct exception_table_entry *x) 88 { 89 return (unsigned long)&x->fixup + x->fixup; 90 } 91 92 #define ARCH_HAS_SORT_EXTABLE 93 #define ARCH_HAS_SEARCH_EXTABLE 94 95 /** 96 * __copy_from_user: - Copy a block of data from user space, with less checking. 97 * @to: Destination address, in kernel space. 98 * @from: Source address, in user space. 99 * @n: Number of bytes to copy. 100 * 101 * Context: User context only. This function may sleep. 102 * 103 * Copy data from user space to kernel space. Caller must check 104 * the specified block with access_ok() before calling this function. 105 * 106 * Returns number of bytes that could not be copied. 107 * On success, this will be zero. 108 * 109 * If some data could not be copied, this function will pad the copied 110 * data to the requested size using zero bytes. 111 */ 112 unsigned long __must_check __copy_from_user(void *to, const void __user *from, 113 unsigned long n); 114 115 /** 116 * __copy_to_user: - Copy a block of data into user space, with less checking. 117 * @to: Destination address, in user space. 118 * @from: Source address, in kernel space. 119 * @n: Number of bytes to copy. 120 * 121 * Context: User context only. This function may sleep. 122 * 123 * Copy data from kernel space to user space. Caller must check 124 * the specified block with access_ok() before calling this function. 125 * 126 * Returns number of bytes that could not be copied. 127 * On success, this will be zero. 128 */ 129 unsigned long __must_check __copy_to_user(void __user *to, const void *from, 130 unsigned long n); 131 132 #define __copy_to_user_inatomic __copy_to_user 133 #define __copy_from_user_inatomic __copy_from_user 134 135 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 136 { 137 size = __copy_to_user(ptr, x, size); 138 return size ? -EFAULT : 0; 139 } 140 141 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 142 { 143 size = __copy_from_user(x, ptr, size); 144 return size ? -EFAULT : 0; 145 } 146 147 /* 148 * These are the main single-value transfer routines. They automatically 149 * use the right size if we just have the right pointer type. 150 */ 151 #define __put_user(x, ptr) \ 152 ({ \ 153 __typeof__(*(ptr)) __x = (x); \ 154 int __pu_err = -EFAULT; \ 155 __chk_user_ptr(ptr); \ 156 switch (sizeof (*(ptr))) { \ 157 case 1: \ 158 case 2: \ 159 case 4: \ 160 case 8: \ 161 __pu_err = __put_user_fn(&__x, ptr, \ 162 sizeof(*(ptr))); \ 163 break; \ 164 default: \ 165 __put_user_bad(); \ 166 break; \ 167 } \ 168 __pu_err; \ 169 }) 170 171 #define put_user(x, ptr) \ 172 ({ \ 173 might_fault(); \ 174 __put_user(x, ptr); \ 175 }) 176 177 178 int __put_user_bad(void) __attribute__((noreturn)); 179 180 #define __get_user(x, ptr) \ 181 ({ \ 182 int __gu_err = -EFAULT; \ 183 __chk_user_ptr(ptr); \ 184 switch (sizeof(*(ptr))) { \ 185 case 1: { \ 186 unsigned char __x; \ 187 __gu_err = __get_user_fn(&__x, ptr, \ 188 sizeof(*(ptr))); \ 189 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 190 break; \ 191 }; \ 192 case 2: { \ 193 unsigned short __x; \ 194 __gu_err = __get_user_fn(&__x, ptr, \ 195 sizeof(*(ptr))); \ 196 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 197 break; \ 198 }; \ 199 case 4: { \ 200 unsigned int __x; \ 201 __gu_err = __get_user_fn(&__x, ptr, \ 202 sizeof(*(ptr))); \ 203 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 204 break; \ 205 }; \ 206 case 8: { \ 207 unsigned long long __x; \ 208 __gu_err = __get_user_fn(&__x, ptr, \ 209 sizeof(*(ptr))); \ 210 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 211 break; \ 212 }; \ 213 default: \ 214 __get_user_bad(); \ 215 break; \ 216 } \ 217 __gu_err; \ 218 }) 219 220 #define get_user(x, ptr) \ 221 ({ \ 222 might_fault(); \ 223 __get_user(x, ptr); \ 224 }) 225 226 int __get_user_bad(void) __attribute__((noreturn)); 227 228 #define __put_user_unaligned __put_user 229 #define __get_user_unaligned __get_user 230 231 /** 232 * copy_to_user: - Copy a block of data into user space. 233 * @to: Destination address, in user space. 234 * @from: Source address, in kernel space. 235 * @n: Number of bytes to copy. 236 * 237 * Context: User context only. This function may sleep. 238 * 239 * Copy data from kernel space to user space. 240 * 241 * Returns number of bytes that could not be copied. 242 * On success, this will be zero. 243 */ 244 static inline unsigned long __must_check 245 copy_to_user(void __user *to, const void *from, unsigned long n) 246 { 247 might_fault(); 248 return __copy_to_user(to, from, n); 249 } 250 251 void copy_from_user_overflow(void) 252 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 253 __compiletime_warning("copy_from_user() buffer size is not provably correct") 254 #endif 255 ; 256 257 /** 258 * copy_from_user: - Copy a block of data from user space. 259 * @to: Destination address, in kernel space. 260 * @from: Source address, in user space. 261 * @n: Number of bytes to copy. 262 * 263 * Context: User context only. This function may sleep. 264 * 265 * Copy data from user space to kernel space. 266 * 267 * Returns number of bytes that could not be copied. 268 * On success, this will be zero. 269 * 270 * If some data could not be copied, this function will pad the copied 271 * data to the requested size using zero bytes. 272 */ 273 static inline unsigned long __must_check 274 copy_from_user(void *to, const void __user *from, unsigned long n) 275 { 276 unsigned int sz = __compiletime_object_size(to); 277 278 might_fault(); 279 if (unlikely(sz != -1 && sz < n)) { 280 copy_from_user_overflow(); 281 return n; 282 } 283 return __copy_from_user(to, from, n); 284 } 285 286 unsigned long __must_check 287 __copy_in_user(void __user *to, const void __user *from, unsigned long n); 288 289 static inline unsigned long __must_check 290 copy_in_user(void __user *to, const void __user *from, unsigned long n) 291 { 292 might_fault(); 293 return __copy_in_user(to, from, n); 294 } 295 296 /* 297 * Copy a null terminated string from userspace. 298 */ 299 300 long __strncpy_from_user(char *dst, const char __user *src, long count); 301 302 static inline long __must_check 303 strncpy_from_user(char *dst, const char __user *src, long count) 304 { 305 might_fault(); 306 return __strncpy_from_user(dst, src, count); 307 } 308 309 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count); 310 311 static inline unsigned long strnlen_user(const char __user *src, unsigned long n) 312 { 313 might_fault(); 314 return __strnlen_user(src, n); 315 } 316 317 /** 318 * strlen_user: - Get the size of a string in user space. 319 * @str: The string to measure. 320 * 321 * Context: User context only. This function may sleep. 322 * 323 * Get the size of a NUL-terminated string in user space. 324 * 325 * Returns the size of the string INCLUDING the terminating NUL. 326 * On exception, returns 0. 327 * 328 * If there is a limit on the length of a valid string, you may wish to 329 * consider using strnlen_user() instead. 330 */ 331 #define strlen_user(str) strnlen_user(str, ~0UL) 332 333 /* 334 * Zero Userspace 335 */ 336 unsigned long __must_check __clear_user(void __user *to, unsigned long size); 337 338 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 339 { 340 might_fault(); 341 return __clear_user(to, n); 342 } 343 344 int copy_to_user_real(void __user *dest, void *src, unsigned long count); 345 346 #endif /* __S390_UACCESS_H */ 347