1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com), 5 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * Derived from "include/asm-i386/uaccess.h" 8 */ 9 #ifndef __S390_UACCESS_H 10 #define __S390_UACCESS_H 11 12 /* 13 * User space memory access functions 14 */ 15 #include <linux/sched.h> 16 #include <linux/errno.h> 17 #include <asm/ctl_reg.h> 18 19 #define VERIFY_READ 0 20 #define VERIFY_WRITE 1 21 22 23 /* 24 * The fs value determines whether argument validity checking should be 25 * performed or not. If get_fs() == USER_DS, checking is performed, with 26 * get_fs() == KERNEL_DS, checking is bypassed. 27 * 28 * For historical reasons, these macros are grossly misnamed. 29 */ 30 31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) 32 33 34 #define KERNEL_DS MAKE_MM_SEG(0) 35 #define USER_DS MAKE_MM_SEG(1) 36 37 #define get_ds() (KERNEL_DS) 38 #define get_fs() (current->thread.mm_segment) 39 40 #define set_fs(x) \ 41 ({ \ 42 unsigned long __pto; \ 43 current->thread.mm_segment = (x); \ 44 __pto = current->thread.mm_segment.ar4 ? \ 45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 46 __ctl_load(__pto, 7, 7); \ 47 }) 48 49 #define segment_eq(a,b) ((a).ar4 == (b).ar4) 50 51 static inline int __range_ok(unsigned long addr, unsigned long size) 52 { 53 return 1; 54 } 55 56 #define __access_ok(addr, size) \ 57 ({ \ 58 __chk_user_ptr(addr); \ 59 __range_ok((unsigned long)(addr), (size)); \ 60 }) 61 62 #define access_ok(type, addr, size) __access_ok(addr, size) 63 64 /* 65 * The exception table consists of pairs of addresses: the first is the 66 * address of an instruction that is allowed to fault, and the second is 67 * the address at which the program should continue. No registers are 68 * modified, so it is entirely up to the continuation code to figure out 69 * what to do. 70 * 71 * All the routines below use bits of fixup code that are out of line 72 * with the main instruction path. This means when everything is well, 73 * we don't even have to jump over them. Further, they do not intrude 74 * on our cache or tlb entries. 75 */ 76 77 struct exception_table_entry 78 { 79 int insn, fixup; 80 }; 81 82 static inline unsigned long extable_fixup(const struct exception_table_entry *x) 83 { 84 return (unsigned long)&x->fixup + x->fixup; 85 } 86 87 #define ARCH_HAS_RELATIVE_EXTABLE 88 89 /** 90 * __copy_from_user: - Copy a block of data from user space, with less checking. 91 * @to: Destination address, in kernel space. 92 * @from: Source address, in user space. 93 * @n: Number of bytes to copy. 94 * 95 * Context: User context only. This function may sleep if pagefaults are 96 * enabled. 97 * 98 * Copy data from user space to kernel space. Caller must check 99 * the specified block with access_ok() before calling this function. 100 * 101 * Returns number of bytes that could not be copied. 102 * On success, this will be zero. 103 * 104 * If some data could not be copied, this function will pad the copied 105 * data to the requested size using zero bytes. 106 */ 107 unsigned long __must_check __copy_from_user(void *to, const void __user *from, 108 unsigned long n); 109 110 /** 111 * __copy_to_user: - Copy a block of data into user space, with less checking. 112 * @to: Destination address, in user space. 113 * @from: Source address, in kernel space. 114 * @n: Number of bytes to copy. 115 * 116 * Context: User context only. This function may sleep if pagefaults are 117 * enabled. 118 * 119 * Copy data from kernel space to user space. Caller must check 120 * the specified block with access_ok() before calling this function. 121 * 122 * Returns number of bytes that could not be copied. 123 * On success, this will be zero. 124 */ 125 unsigned long __must_check __copy_to_user(void __user *to, const void *from, 126 unsigned long n); 127 128 #define __copy_to_user_inatomic __copy_to_user 129 #define __copy_from_user_inatomic __copy_from_user 130 131 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 132 133 #define __put_get_user_asm(to, from, size, spec) \ 134 ({ \ 135 register unsigned long __reg0 asm("0") = spec; \ 136 int __rc; \ 137 \ 138 asm volatile( \ 139 "0: mvcos %1,%3,%2\n" \ 140 "1: xr %0,%0\n" \ 141 "2:\n" \ 142 ".pushsection .fixup, \"ax\"\n" \ 143 "3: lhi %0,%5\n" \ 144 " jg 2b\n" \ 145 ".popsection\n" \ 146 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 147 : "=d" (__rc), "=Q" (*(to)) \ 148 : "d" (size), "Q" (*(from)), \ 149 "d" (__reg0), "K" (-EFAULT) \ 150 : "cc"); \ 151 __rc; \ 152 }) 153 154 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 155 { 156 unsigned long spec = 0x810000UL; 157 int rc; 158 159 switch (size) { 160 case 1: 161 rc = __put_get_user_asm((unsigned char __user *)ptr, 162 (unsigned char *)x, 163 size, spec); 164 break; 165 case 2: 166 rc = __put_get_user_asm((unsigned short __user *)ptr, 167 (unsigned short *)x, 168 size, spec); 169 break; 170 case 4: 171 rc = __put_get_user_asm((unsigned int __user *)ptr, 172 (unsigned int *)x, 173 size, spec); 174 break; 175 case 8: 176 rc = __put_get_user_asm((unsigned long __user *)ptr, 177 (unsigned long *)x, 178 size, spec); 179 break; 180 }; 181 return rc; 182 } 183 184 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 185 { 186 unsigned long spec = 0x81UL; 187 int rc; 188 189 switch (size) { 190 case 1: 191 rc = __put_get_user_asm((unsigned char *)x, 192 (unsigned char __user *)ptr, 193 size, spec); 194 break; 195 case 2: 196 rc = __put_get_user_asm((unsigned short *)x, 197 (unsigned short __user *)ptr, 198 size, spec); 199 break; 200 case 4: 201 rc = __put_get_user_asm((unsigned int *)x, 202 (unsigned int __user *)ptr, 203 size, spec); 204 break; 205 case 8: 206 rc = __put_get_user_asm((unsigned long *)x, 207 (unsigned long __user *)ptr, 208 size, spec); 209 break; 210 }; 211 return rc; 212 } 213 214 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 215 216 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 217 { 218 size = __copy_to_user(ptr, x, size); 219 return size ? -EFAULT : 0; 220 } 221 222 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 223 { 224 size = __copy_from_user(x, ptr, size); 225 return size ? -EFAULT : 0; 226 } 227 228 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 229 230 /* 231 * These are the main single-value transfer routines. They automatically 232 * use the right size if we just have the right pointer type. 233 */ 234 #define __put_user(x, ptr) \ 235 ({ \ 236 __typeof__(*(ptr)) __x = (x); \ 237 int __pu_err = -EFAULT; \ 238 __chk_user_ptr(ptr); \ 239 switch (sizeof (*(ptr))) { \ 240 case 1: \ 241 case 2: \ 242 case 4: \ 243 case 8: \ 244 __pu_err = __put_user_fn(&__x, ptr, \ 245 sizeof(*(ptr))); \ 246 break; \ 247 default: \ 248 __put_user_bad(); \ 249 break; \ 250 } \ 251 __builtin_expect(__pu_err, 0); \ 252 }) 253 254 #define put_user(x, ptr) \ 255 ({ \ 256 might_fault(); \ 257 __put_user(x, ptr); \ 258 }) 259 260 261 int __put_user_bad(void) __attribute__((noreturn)); 262 263 #define __get_user(x, ptr) \ 264 ({ \ 265 int __gu_err = -EFAULT; \ 266 __chk_user_ptr(ptr); \ 267 switch (sizeof(*(ptr))) { \ 268 case 1: { \ 269 unsigned char __x; \ 270 __gu_err = __get_user_fn(&__x, ptr, \ 271 sizeof(*(ptr))); \ 272 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 273 break; \ 274 }; \ 275 case 2: { \ 276 unsigned short __x; \ 277 __gu_err = __get_user_fn(&__x, ptr, \ 278 sizeof(*(ptr))); \ 279 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 280 break; \ 281 }; \ 282 case 4: { \ 283 unsigned int __x; \ 284 __gu_err = __get_user_fn(&__x, ptr, \ 285 sizeof(*(ptr))); \ 286 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 287 break; \ 288 }; \ 289 case 8: { \ 290 unsigned long long __x; \ 291 __gu_err = __get_user_fn(&__x, ptr, \ 292 sizeof(*(ptr))); \ 293 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 294 break; \ 295 }; \ 296 default: \ 297 __get_user_bad(); \ 298 break; \ 299 } \ 300 __builtin_expect(__gu_err, 0); \ 301 }) 302 303 #define get_user(x, ptr) \ 304 ({ \ 305 might_fault(); \ 306 __get_user(x, ptr); \ 307 }) 308 309 int __get_user_bad(void) __attribute__((noreturn)); 310 311 #define __put_user_unaligned __put_user 312 #define __get_user_unaligned __get_user 313 314 /** 315 * copy_to_user: - Copy a block of data into user space. 316 * @to: Destination address, in user space. 317 * @from: Source address, in kernel space. 318 * @n: Number of bytes to copy. 319 * 320 * Context: User context only. This function may sleep if pagefaults are 321 * enabled. 322 * 323 * Copy data from kernel space to user space. 324 * 325 * Returns number of bytes that could not be copied. 326 * On success, this will be zero. 327 */ 328 static inline unsigned long __must_check 329 copy_to_user(void __user *to, const void *from, unsigned long n) 330 { 331 might_fault(); 332 return __copy_to_user(to, from, n); 333 } 334 335 void copy_from_user_overflow(void) 336 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 337 __compiletime_warning("copy_from_user() buffer size is not provably correct") 338 #endif 339 ; 340 341 /** 342 * copy_from_user: - Copy a block of data from user space. 343 * @to: Destination address, in kernel space. 344 * @from: Source address, in user space. 345 * @n: Number of bytes to copy. 346 * 347 * Context: User context only. This function may sleep if pagefaults are 348 * enabled. 349 * 350 * Copy data from user space to kernel space. 351 * 352 * Returns number of bytes that could not be copied. 353 * On success, this will be zero. 354 * 355 * If some data could not be copied, this function will pad the copied 356 * data to the requested size using zero bytes. 357 */ 358 static inline unsigned long __must_check 359 copy_from_user(void *to, const void __user *from, unsigned long n) 360 { 361 unsigned int sz = __compiletime_object_size(to); 362 363 might_fault(); 364 if (unlikely(sz != -1 && sz < n)) { 365 copy_from_user_overflow(); 366 return n; 367 } 368 return __copy_from_user(to, from, n); 369 } 370 371 unsigned long __must_check 372 __copy_in_user(void __user *to, const void __user *from, unsigned long n); 373 374 static inline unsigned long __must_check 375 copy_in_user(void __user *to, const void __user *from, unsigned long n) 376 { 377 might_fault(); 378 return __copy_in_user(to, from, n); 379 } 380 381 /* 382 * Copy a null terminated string from userspace. 383 */ 384 385 long __strncpy_from_user(char *dst, const char __user *src, long count); 386 387 static inline long __must_check 388 strncpy_from_user(char *dst, const char __user *src, long count) 389 { 390 might_fault(); 391 return __strncpy_from_user(dst, src, count); 392 } 393 394 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count); 395 396 static inline unsigned long strnlen_user(const char __user *src, unsigned long n) 397 { 398 might_fault(); 399 return __strnlen_user(src, n); 400 } 401 402 /** 403 * strlen_user: - Get the size of a string in user space. 404 * @str: The string to measure. 405 * 406 * Context: User context only. This function may sleep if pagefaults are 407 * enabled. 408 * 409 * Get the size of a NUL-terminated string in user space. 410 * 411 * Returns the size of the string INCLUDING the terminating NUL. 412 * On exception, returns 0. 413 * 414 * If there is a limit on the length of a valid string, you may wish to 415 * consider using strnlen_user() instead. 416 */ 417 #define strlen_user(str) strnlen_user(str, ~0UL) 418 419 /* 420 * Zero Userspace 421 */ 422 unsigned long __must_check __clear_user(void __user *to, unsigned long size); 423 424 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 425 { 426 might_fault(); 427 return __clear_user(to, n); 428 } 429 430 int copy_to_user_real(void __user *dest, void *src, unsigned long count); 431 void s390_kernel_write(void *dst, const void *src, size_t size); 432 433 #endif /* __S390_UACCESS_H */ 434