1 /* 2 * User address space access functions. 3 * The non inlined parts of asm-i386/uaccess.h are here. 4 * 5 * Copyright 1997 Andi Kleen <ak@muc.de> 6 * Copyright 1997 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/highmem.h> 10 #include <linux/blkdev.h> 11 #include <linux/module.h> 12 #include <linux/backing-dev.h> 13 #include <linux/interrupt.h> 14 #include <asm/uaccess.h> 15 #include <asm/mmx.h> 16 17 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) 18 { 19 #ifdef CONFIG_X86_INTEL_USERCOPY 20 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask)) 21 return 0; 22 #endif 23 return 1; 24 } 25 #define movsl_is_ok(a1, a2, n) \ 26 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) 27 28 /* 29 * Copy a null terminated string from userspace. 30 */ 31 32 #define __do_strncpy_from_user(dst, src, count, res) \ 33 do { \ 34 int __d0, __d1, __d2; \ 35 might_sleep(); \ 36 __asm__ __volatile__( \ 37 " testl %1,%1\n" \ 38 " jz 2f\n" \ 39 "0: lodsb\n" \ 40 " stosb\n" \ 41 " testb %%al,%%al\n" \ 42 " jz 1f\n" \ 43 " decl %1\n" \ 44 " jnz 0b\n" \ 45 "1: subl %1,%0\n" \ 46 "2:\n" \ 47 ".section .fixup,\"ax\"\n" \ 48 "3: movl %5,%0\n" \ 49 " jmp 2b\n" \ 50 ".previous\n" \ 51 _ASM_EXTABLE(0b,3b) \ 52 : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ 53 "=&D" (__d2) \ 54 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ 55 : "memory"); \ 56 } while (0) 57 58 /** 59 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. 60 * @dst: Destination address, in kernel space. This buffer must be at 61 * least @count bytes long. 62 * @src: Source address, in user space. 63 * @count: Maximum number of bytes to copy, including the trailing NUL. 64 * 65 * Copies a NUL-terminated string from userspace to kernel space. 66 * Caller must check the specified block with access_ok() before calling 67 * this function. 68 * 69 * On success, returns the length of the string (not including the trailing 70 * NUL). 71 * 72 * If access to userspace fails, returns -EFAULT (some data may have been 73 * copied). 74 * 75 * If @count is smaller than the length of the string, copies @count bytes 76 * and returns @count. 77 */ 78 long 79 __strncpy_from_user(char *dst, const char __user *src, long count) 80 { 81 long res; 82 __do_strncpy_from_user(dst, src, count, res); 83 return res; 84 } 85 EXPORT_SYMBOL(__strncpy_from_user); 86 87 /** 88 * strncpy_from_user: - Copy a NUL terminated string from userspace. 89 * @dst: Destination address, in kernel space. This buffer must be at 90 * least @count bytes long. 91 * @src: Source address, in user space. 92 * @count: Maximum number of bytes to copy, including the trailing NUL. 93 * 94 * Copies a NUL-terminated string from userspace to kernel space. 95 * 96 * On success, returns the length of the string (not including the trailing 97 * NUL). 98 * 99 * If access to userspace fails, returns -EFAULT (some data may have been 100 * copied). 101 * 102 * If @count is smaller than the length of the string, copies @count bytes 103 * and returns @count. 104 */ 105 long 106 strncpy_from_user(char *dst, const char __user *src, long count) 107 { 108 long res = -EFAULT; 109 if (access_ok(VERIFY_READ, src, 1)) 110 __do_strncpy_from_user(dst, src, count, res); 111 return res; 112 } 113 EXPORT_SYMBOL(strncpy_from_user); 114 115 /* 116 * Zero Userspace 117 */ 118 119 #define __do_clear_user(addr,size) \ 120 do { \ 121 int __d0; \ 122 might_sleep(); \ 123 __asm__ __volatile__( \ 124 "0: rep; stosl\n" \ 125 " movl %2,%0\n" \ 126 "1: rep; stosb\n" \ 127 "2:\n" \ 128 ".section .fixup,\"ax\"\n" \ 129 "3: lea 0(%2,%0,4),%0\n" \ 130 " jmp 2b\n" \ 131 ".previous\n" \ 132 _ASM_EXTABLE(0b,3b) \ 133 _ASM_EXTABLE(1b,2b) \ 134 : "=&c"(size), "=&D" (__d0) \ 135 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ 136 } while (0) 137 138 /** 139 * clear_user: - Zero a block of memory in user space. 140 * @to: Destination address, in user space. 141 * @n: Number of bytes to zero. 142 * 143 * Zero a block of memory in user space. 144 * 145 * Returns number of bytes that could not be cleared. 146 * On success, this will be zero. 147 */ 148 unsigned long 149 clear_user(void __user *to, unsigned long n) 150 { 151 might_sleep(); 152 if (access_ok(VERIFY_WRITE, to, n)) 153 __do_clear_user(to, n); 154 return n; 155 } 156 EXPORT_SYMBOL(clear_user); 157 158 /** 159 * __clear_user: - Zero a block of memory in user space, with less checking. 160 * @to: Destination address, in user space. 161 * @n: Number of bytes to zero. 162 * 163 * Zero a block of memory in user space. Caller must check 164 * the specified block with access_ok() before calling this function. 165 * 166 * Returns number of bytes that could not be cleared. 167 * On success, this will be zero. 168 */ 169 unsigned long 170 __clear_user(void __user *to, unsigned long n) 171 { 172 __do_clear_user(to, n); 173 return n; 174 } 175 EXPORT_SYMBOL(__clear_user); 176 177 /** 178 * strnlen_user: - Get the size of a string in user space. 179 * @s: The string to measure. 180 * @n: The maximum valid length 181 * 182 * Get the size of a NUL-terminated string in user space. 183 * 184 * Returns the size of the string INCLUDING the terminating NUL. 185 * On exception, returns 0. 186 * If the string is too long, returns a value greater than @n. 187 */ 188 long strnlen_user(const char __user *s, long n) 189 { 190 unsigned long mask = -__addr_ok(s); 191 unsigned long res, tmp; 192 193 might_sleep(); 194 195 __asm__ __volatile__( 196 " testl %0, %0\n" 197 " jz 3f\n" 198 " andl %0,%%ecx\n" 199 "0: repne; scasb\n" 200 " setne %%al\n" 201 " subl %%ecx,%0\n" 202 " addl %0,%%eax\n" 203 "1:\n" 204 ".section .fixup,\"ax\"\n" 205 "2: xorl %%eax,%%eax\n" 206 " jmp 1b\n" 207 "3: movb $1,%%al\n" 208 " jmp 1b\n" 209 ".previous\n" 210 ".section __ex_table,\"a\"\n" 211 " .align 4\n" 212 " .long 0b,2b\n" 213 ".previous" 214 :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) 215 :"0" (n), "1" (s), "2" (0), "3" (mask) 216 :"cc"); 217 return res & mask; 218 } 219 EXPORT_SYMBOL(strnlen_user); 220 221 #ifdef CONFIG_X86_INTEL_USERCOPY 222 static unsigned long 223 __copy_user_intel(void __user *to, const void *from, unsigned long size) 224 { 225 int d0, d1; 226 __asm__ __volatile__( 227 " .align 2,0x90\n" 228 "1: movl 32(%4), %%eax\n" 229 " cmpl $67, %0\n" 230 " jbe 3f\n" 231 "2: movl 64(%4), %%eax\n" 232 " .align 2,0x90\n" 233 "3: movl 0(%4), %%eax\n" 234 "4: movl 4(%4), %%edx\n" 235 "5: movl %%eax, 0(%3)\n" 236 "6: movl %%edx, 4(%3)\n" 237 "7: movl 8(%4), %%eax\n" 238 "8: movl 12(%4),%%edx\n" 239 "9: movl %%eax, 8(%3)\n" 240 "10: movl %%edx, 12(%3)\n" 241 "11: movl 16(%4), %%eax\n" 242 "12: movl 20(%4), %%edx\n" 243 "13: movl %%eax, 16(%3)\n" 244 "14: movl %%edx, 20(%3)\n" 245 "15: movl 24(%4), %%eax\n" 246 "16: movl 28(%4), %%edx\n" 247 "17: movl %%eax, 24(%3)\n" 248 "18: movl %%edx, 28(%3)\n" 249 "19: movl 32(%4), %%eax\n" 250 "20: movl 36(%4), %%edx\n" 251 "21: movl %%eax, 32(%3)\n" 252 "22: movl %%edx, 36(%3)\n" 253 "23: movl 40(%4), %%eax\n" 254 "24: movl 44(%4), %%edx\n" 255 "25: movl %%eax, 40(%3)\n" 256 "26: movl %%edx, 44(%3)\n" 257 "27: movl 48(%4), %%eax\n" 258 "28: movl 52(%4), %%edx\n" 259 "29: movl %%eax, 48(%3)\n" 260 "30: movl %%edx, 52(%3)\n" 261 "31: movl 56(%4), %%eax\n" 262 "32: movl 60(%4), %%edx\n" 263 "33: movl %%eax, 56(%3)\n" 264 "34: movl %%edx, 60(%3)\n" 265 " addl $-64, %0\n" 266 " addl $64, %4\n" 267 " addl $64, %3\n" 268 " cmpl $63, %0\n" 269 " ja 1b\n" 270 "35: movl %0, %%eax\n" 271 " shrl $2, %0\n" 272 " andl $3, %%eax\n" 273 " cld\n" 274 "99: rep; movsl\n" 275 "36: movl %%eax, %0\n" 276 "37: rep; movsb\n" 277 "100:\n" 278 ".section .fixup,\"ax\"\n" 279 "101: lea 0(%%eax,%0,4),%0\n" 280 " jmp 100b\n" 281 ".previous\n" 282 ".section __ex_table,\"a\"\n" 283 " .align 4\n" 284 " .long 1b,100b\n" 285 " .long 2b,100b\n" 286 " .long 3b,100b\n" 287 " .long 4b,100b\n" 288 " .long 5b,100b\n" 289 " .long 6b,100b\n" 290 " .long 7b,100b\n" 291 " .long 8b,100b\n" 292 " .long 9b,100b\n" 293 " .long 10b,100b\n" 294 " .long 11b,100b\n" 295 " .long 12b,100b\n" 296 " .long 13b,100b\n" 297 " .long 14b,100b\n" 298 " .long 15b,100b\n" 299 " .long 16b,100b\n" 300 " .long 17b,100b\n" 301 " .long 18b,100b\n" 302 " .long 19b,100b\n" 303 " .long 20b,100b\n" 304 " .long 21b,100b\n" 305 " .long 22b,100b\n" 306 " .long 23b,100b\n" 307 " .long 24b,100b\n" 308 " .long 25b,100b\n" 309 " .long 26b,100b\n" 310 " .long 27b,100b\n" 311 " .long 28b,100b\n" 312 " .long 29b,100b\n" 313 " .long 30b,100b\n" 314 " .long 31b,100b\n" 315 " .long 32b,100b\n" 316 " .long 33b,100b\n" 317 " .long 34b,100b\n" 318 " .long 35b,100b\n" 319 " .long 36b,100b\n" 320 " .long 37b,100b\n" 321 " .long 99b,101b\n" 322 ".previous" 323 : "=&c"(size), "=&D" (d0), "=&S" (d1) 324 : "1"(to), "2"(from), "0"(size) 325 : "eax", "edx", "memory"); 326 return size; 327 } 328 329 static unsigned long 330 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) 331 { 332 int d0, d1; 333 __asm__ __volatile__( 334 " .align 2,0x90\n" 335 "0: movl 32(%4), %%eax\n" 336 " cmpl $67, %0\n" 337 " jbe 2f\n" 338 "1: movl 64(%4), %%eax\n" 339 " .align 2,0x90\n" 340 "2: movl 0(%4), %%eax\n" 341 "21: movl 4(%4), %%edx\n" 342 " movl %%eax, 0(%3)\n" 343 " movl %%edx, 4(%3)\n" 344 "3: movl 8(%4), %%eax\n" 345 "31: movl 12(%4),%%edx\n" 346 " movl %%eax, 8(%3)\n" 347 " movl %%edx, 12(%3)\n" 348 "4: movl 16(%4), %%eax\n" 349 "41: movl 20(%4), %%edx\n" 350 " movl %%eax, 16(%3)\n" 351 " movl %%edx, 20(%3)\n" 352 "10: movl 24(%4), %%eax\n" 353 "51: movl 28(%4), %%edx\n" 354 " movl %%eax, 24(%3)\n" 355 " movl %%edx, 28(%3)\n" 356 "11: movl 32(%4), %%eax\n" 357 "61: movl 36(%4), %%edx\n" 358 " movl %%eax, 32(%3)\n" 359 " movl %%edx, 36(%3)\n" 360 "12: movl 40(%4), %%eax\n" 361 "71: movl 44(%4), %%edx\n" 362 " movl %%eax, 40(%3)\n" 363 " movl %%edx, 44(%3)\n" 364 "13: movl 48(%4), %%eax\n" 365 "81: movl 52(%4), %%edx\n" 366 " movl %%eax, 48(%3)\n" 367 " movl %%edx, 52(%3)\n" 368 "14: movl 56(%4), %%eax\n" 369 "91: movl 60(%4), %%edx\n" 370 " movl %%eax, 56(%3)\n" 371 " movl %%edx, 60(%3)\n" 372 " addl $-64, %0\n" 373 " addl $64, %4\n" 374 " addl $64, %3\n" 375 " cmpl $63, %0\n" 376 " ja 0b\n" 377 "5: movl %0, %%eax\n" 378 " shrl $2, %0\n" 379 " andl $3, %%eax\n" 380 " cld\n" 381 "6: rep; movsl\n" 382 " movl %%eax,%0\n" 383 "7: rep; movsb\n" 384 "8:\n" 385 ".section .fixup,\"ax\"\n" 386 "9: lea 0(%%eax,%0,4),%0\n" 387 "16: pushl %0\n" 388 " pushl %%eax\n" 389 " xorl %%eax,%%eax\n" 390 " rep; stosb\n" 391 " popl %%eax\n" 392 " popl %0\n" 393 " jmp 8b\n" 394 ".previous\n" 395 ".section __ex_table,\"a\"\n" 396 " .align 4\n" 397 " .long 0b,16b\n" 398 " .long 1b,16b\n" 399 " .long 2b,16b\n" 400 " .long 21b,16b\n" 401 " .long 3b,16b\n" 402 " .long 31b,16b\n" 403 " .long 4b,16b\n" 404 " .long 41b,16b\n" 405 " .long 10b,16b\n" 406 " .long 51b,16b\n" 407 " .long 11b,16b\n" 408 " .long 61b,16b\n" 409 " .long 12b,16b\n" 410 " .long 71b,16b\n" 411 " .long 13b,16b\n" 412 " .long 81b,16b\n" 413 " .long 14b,16b\n" 414 " .long 91b,16b\n" 415 " .long 6b,9b\n" 416 " .long 7b,16b\n" 417 ".previous" 418 : "=&c"(size), "=&D" (d0), "=&S" (d1) 419 : "1"(to), "2"(from), "0"(size) 420 : "eax", "edx", "memory"); 421 return size; 422 } 423 424 /* 425 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware. 426 * hyoshiok@miraclelinux.com 427 */ 428 429 static unsigned long __copy_user_zeroing_intel_nocache(void *to, 430 const void __user *from, unsigned long size) 431 { 432 int d0, d1; 433 434 __asm__ __volatile__( 435 " .align 2,0x90\n" 436 "0: movl 32(%4), %%eax\n" 437 " cmpl $67, %0\n" 438 " jbe 2f\n" 439 "1: movl 64(%4), %%eax\n" 440 " .align 2,0x90\n" 441 "2: movl 0(%4), %%eax\n" 442 "21: movl 4(%4), %%edx\n" 443 " movnti %%eax, 0(%3)\n" 444 " movnti %%edx, 4(%3)\n" 445 "3: movl 8(%4), %%eax\n" 446 "31: movl 12(%4),%%edx\n" 447 " movnti %%eax, 8(%3)\n" 448 " movnti %%edx, 12(%3)\n" 449 "4: movl 16(%4), %%eax\n" 450 "41: movl 20(%4), %%edx\n" 451 " movnti %%eax, 16(%3)\n" 452 " movnti %%edx, 20(%3)\n" 453 "10: movl 24(%4), %%eax\n" 454 "51: movl 28(%4), %%edx\n" 455 " movnti %%eax, 24(%3)\n" 456 " movnti %%edx, 28(%3)\n" 457 "11: movl 32(%4), %%eax\n" 458 "61: movl 36(%4), %%edx\n" 459 " movnti %%eax, 32(%3)\n" 460 " movnti %%edx, 36(%3)\n" 461 "12: movl 40(%4), %%eax\n" 462 "71: movl 44(%4), %%edx\n" 463 " movnti %%eax, 40(%3)\n" 464 " movnti %%edx, 44(%3)\n" 465 "13: movl 48(%4), %%eax\n" 466 "81: movl 52(%4), %%edx\n" 467 " movnti %%eax, 48(%3)\n" 468 " movnti %%edx, 52(%3)\n" 469 "14: movl 56(%4), %%eax\n" 470 "91: movl 60(%4), %%edx\n" 471 " movnti %%eax, 56(%3)\n" 472 " movnti %%edx, 60(%3)\n" 473 " addl $-64, %0\n" 474 " addl $64, %4\n" 475 " addl $64, %3\n" 476 " cmpl $63, %0\n" 477 " ja 0b\n" 478 " sfence \n" 479 "5: movl %0, %%eax\n" 480 " shrl $2, %0\n" 481 " andl $3, %%eax\n" 482 " cld\n" 483 "6: rep; movsl\n" 484 " movl %%eax,%0\n" 485 "7: rep; movsb\n" 486 "8:\n" 487 ".section .fixup,\"ax\"\n" 488 "9: lea 0(%%eax,%0,4),%0\n" 489 "16: pushl %0\n" 490 " pushl %%eax\n" 491 " xorl %%eax,%%eax\n" 492 " rep; stosb\n" 493 " popl %%eax\n" 494 " popl %0\n" 495 " jmp 8b\n" 496 ".previous\n" 497 ".section __ex_table,\"a\"\n" 498 " .align 4\n" 499 " .long 0b,16b\n" 500 " .long 1b,16b\n" 501 " .long 2b,16b\n" 502 " .long 21b,16b\n" 503 " .long 3b,16b\n" 504 " .long 31b,16b\n" 505 " .long 4b,16b\n" 506 " .long 41b,16b\n" 507 " .long 10b,16b\n" 508 " .long 51b,16b\n" 509 " .long 11b,16b\n" 510 " .long 61b,16b\n" 511 " .long 12b,16b\n" 512 " .long 71b,16b\n" 513 " .long 13b,16b\n" 514 " .long 81b,16b\n" 515 " .long 14b,16b\n" 516 " .long 91b,16b\n" 517 " .long 6b,9b\n" 518 " .long 7b,16b\n" 519 ".previous" 520 : "=&c"(size), "=&D" (d0), "=&S" (d1) 521 : "1"(to), "2"(from), "0"(size) 522 : "eax", "edx", "memory"); 523 return size; 524 } 525 526 static unsigned long __copy_user_intel_nocache(void *to, 527 const void __user *from, unsigned long size) 528 { 529 int d0, d1; 530 531 __asm__ __volatile__( 532 " .align 2,0x90\n" 533 "0: movl 32(%4), %%eax\n" 534 " cmpl $67, %0\n" 535 " jbe 2f\n" 536 "1: movl 64(%4), %%eax\n" 537 " .align 2,0x90\n" 538 "2: movl 0(%4), %%eax\n" 539 "21: movl 4(%4), %%edx\n" 540 " movnti %%eax, 0(%3)\n" 541 " movnti %%edx, 4(%3)\n" 542 "3: movl 8(%4), %%eax\n" 543 "31: movl 12(%4),%%edx\n" 544 " movnti %%eax, 8(%3)\n" 545 " movnti %%edx, 12(%3)\n" 546 "4: movl 16(%4), %%eax\n" 547 "41: movl 20(%4), %%edx\n" 548 " movnti %%eax, 16(%3)\n" 549 " movnti %%edx, 20(%3)\n" 550 "10: movl 24(%4), %%eax\n" 551 "51: movl 28(%4), %%edx\n" 552 " movnti %%eax, 24(%3)\n" 553 " movnti %%edx, 28(%3)\n" 554 "11: movl 32(%4), %%eax\n" 555 "61: movl 36(%4), %%edx\n" 556 " movnti %%eax, 32(%3)\n" 557 " movnti %%edx, 36(%3)\n" 558 "12: movl 40(%4), %%eax\n" 559 "71: movl 44(%4), %%edx\n" 560 " movnti %%eax, 40(%3)\n" 561 " movnti %%edx, 44(%3)\n" 562 "13: movl 48(%4), %%eax\n" 563 "81: movl 52(%4), %%edx\n" 564 " movnti %%eax, 48(%3)\n" 565 " movnti %%edx, 52(%3)\n" 566 "14: movl 56(%4), %%eax\n" 567 "91: movl 60(%4), %%edx\n" 568 " movnti %%eax, 56(%3)\n" 569 " movnti %%edx, 60(%3)\n" 570 " addl $-64, %0\n" 571 " addl $64, %4\n" 572 " addl $64, %3\n" 573 " cmpl $63, %0\n" 574 " ja 0b\n" 575 " sfence \n" 576 "5: movl %0, %%eax\n" 577 " shrl $2, %0\n" 578 " andl $3, %%eax\n" 579 " cld\n" 580 "6: rep; movsl\n" 581 " movl %%eax,%0\n" 582 "7: rep; movsb\n" 583 "8:\n" 584 ".section .fixup,\"ax\"\n" 585 "9: lea 0(%%eax,%0,4),%0\n" 586 "16: jmp 8b\n" 587 ".previous\n" 588 ".section __ex_table,\"a\"\n" 589 " .align 4\n" 590 " .long 0b,16b\n" 591 " .long 1b,16b\n" 592 " .long 2b,16b\n" 593 " .long 21b,16b\n" 594 " .long 3b,16b\n" 595 " .long 31b,16b\n" 596 " .long 4b,16b\n" 597 " .long 41b,16b\n" 598 " .long 10b,16b\n" 599 " .long 51b,16b\n" 600 " .long 11b,16b\n" 601 " .long 61b,16b\n" 602 " .long 12b,16b\n" 603 " .long 71b,16b\n" 604 " .long 13b,16b\n" 605 " .long 81b,16b\n" 606 " .long 14b,16b\n" 607 " .long 91b,16b\n" 608 " .long 6b,9b\n" 609 " .long 7b,16b\n" 610 ".previous" 611 : "=&c"(size), "=&D" (d0), "=&S" (d1) 612 : "1"(to), "2"(from), "0"(size) 613 : "eax", "edx", "memory"); 614 return size; 615 } 616 617 #else 618 619 /* 620 * Leave these declared but undefined. They should not be any references to 621 * them 622 */ 623 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, 624 unsigned long size); 625 unsigned long __copy_user_intel(void __user *to, const void *from, 626 unsigned long size); 627 unsigned long __copy_user_zeroing_intel_nocache(void *to, 628 const void __user *from, unsigned long size); 629 #endif /* CONFIG_X86_INTEL_USERCOPY */ 630 631 /* Generic arbitrary sized copy. */ 632 #define __copy_user(to, from, size) \ 633 do { \ 634 int __d0, __d1, __d2; \ 635 __asm__ __volatile__( \ 636 " cmp $7,%0\n" \ 637 " jbe 1f\n" \ 638 " movl %1,%0\n" \ 639 " negl %0\n" \ 640 " andl $7,%0\n" \ 641 " subl %0,%3\n" \ 642 "4: rep; movsb\n" \ 643 " movl %3,%0\n" \ 644 " shrl $2,%0\n" \ 645 " andl $3,%3\n" \ 646 " .align 2,0x90\n" \ 647 "0: rep; movsl\n" \ 648 " movl %3,%0\n" \ 649 "1: rep; movsb\n" \ 650 "2:\n" \ 651 ".section .fixup,\"ax\"\n" \ 652 "5: addl %3,%0\n" \ 653 " jmp 2b\n" \ 654 "3: lea 0(%3,%0,4),%0\n" \ 655 " jmp 2b\n" \ 656 ".previous\n" \ 657 ".section __ex_table,\"a\"\n" \ 658 " .align 4\n" \ 659 " .long 4b,5b\n" \ 660 " .long 0b,3b\n" \ 661 " .long 1b,2b\n" \ 662 ".previous" \ 663 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ 664 : "3"(size), "0"(size), "1"(to), "2"(from) \ 665 : "memory"); \ 666 } while (0) 667 668 #define __copy_user_zeroing(to, from, size) \ 669 do { \ 670 int __d0, __d1, __d2; \ 671 __asm__ __volatile__( \ 672 " cmp $7,%0\n" \ 673 " jbe 1f\n" \ 674 " movl %1,%0\n" \ 675 " negl %0\n" \ 676 " andl $7,%0\n" \ 677 " subl %0,%3\n" \ 678 "4: rep; movsb\n" \ 679 " movl %3,%0\n" \ 680 " shrl $2,%0\n" \ 681 " andl $3,%3\n" \ 682 " .align 2,0x90\n" \ 683 "0: rep; movsl\n" \ 684 " movl %3,%0\n" \ 685 "1: rep; movsb\n" \ 686 "2:\n" \ 687 ".section .fixup,\"ax\"\n" \ 688 "5: addl %3,%0\n" \ 689 " jmp 6f\n" \ 690 "3: lea 0(%3,%0,4),%0\n" \ 691 "6: pushl %0\n" \ 692 " pushl %%eax\n" \ 693 " xorl %%eax,%%eax\n" \ 694 " rep; stosb\n" \ 695 " popl %%eax\n" \ 696 " popl %0\n" \ 697 " jmp 2b\n" \ 698 ".previous\n" \ 699 ".section __ex_table,\"a\"\n" \ 700 " .align 4\n" \ 701 " .long 4b,5b\n" \ 702 " .long 0b,3b\n" \ 703 " .long 1b,6b\n" \ 704 ".previous" \ 705 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ 706 : "3"(size), "0"(size), "1"(to), "2"(from) \ 707 : "memory"); \ 708 } while (0) 709 710 unsigned long __copy_to_user_ll(void __user *to, const void *from, 711 unsigned long n) 712 { 713 #ifndef CONFIG_X86_WP_WORKS_OK 714 if (unlikely(boot_cpu_data.wp_works_ok == 0) && 715 ((unsigned long)to) < TASK_SIZE) { 716 /* 717 * When we are in an atomic section (see 718 * mm/filemap.c:file_read_actor), return the full 719 * length to take the slow path. 720 */ 721 if (in_atomic()) 722 return n; 723 724 /* 725 * CPU does not honor the WP bit when writing 726 * from supervisory mode, and due to preemption or SMP, 727 * the page tables can change at any time. 728 * Do it manually. Manfred <manfred@colorfullife.com> 729 */ 730 while (n) { 731 unsigned long offset = ((unsigned long)to)%PAGE_SIZE; 732 unsigned long len = PAGE_SIZE - offset; 733 int retval; 734 struct page *pg; 735 void *maddr; 736 737 if (len > n) 738 len = n; 739 740 survive: 741 down_read(¤t->mm->mmap_sem); 742 retval = get_user_pages(current, current->mm, 743 (unsigned long)to, 1, 1, 0, &pg, NULL); 744 745 if (retval == -ENOMEM && is_global_init(current)) { 746 up_read(¤t->mm->mmap_sem); 747 congestion_wait(WRITE, HZ/50); 748 goto survive; 749 } 750 751 if (retval != 1) { 752 up_read(¤t->mm->mmap_sem); 753 break; 754 } 755 756 maddr = kmap_atomic(pg, KM_USER0); 757 memcpy(maddr + offset, from, len); 758 kunmap_atomic(maddr, KM_USER0); 759 set_page_dirty_lock(pg); 760 put_page(pg); 761 up_read(¤t->mm->mmap_sem); 762 763 from += len; 764 to += len; 765 n -= len; 766 } 767 return n; 768 } 769 #endif 770 if (movsl_is_ok(to, from, n)) 771 __copy_user(to, from, n); 772 else 773 n = __copy_user_intel(to, from, n); 774 return n; 775 } 776 EXPORT_SYMBOL(__copy_to_user_ll); 777 778 unsigned long __copy_from_user_ll(void *to, const void __user *from, 779 unsigned long n) 780 { 781 if (movsl_is_ok(to, from, n)) 782 __copy_user_zeroing(to, from, n); 783 else 784 n = __copy_user_zeroing_intel(to, from, n); 785 return n; 786 } 787 EXPORT_SYMBOL(__copy_from_user_ll); 788 789 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, 790 unsigned long n) 791 { 792 if (movsl_is_ok(to, from, n)) 793 __copy_user(to, from, n); 794 else 795 n = __copy_user_intel((void __user *)to, 796 (const void *)from, n); 797 return n; 798 } 799 EXPORT_SYMBOL(__copy_from_user_ll_nozero); 800 801 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, 802 unsigned long n) 803 { 804 #ifdef CONFIG_X86_INTEL_USERCOPY 805 if (n > 64 && cpu_has_xmm2) 806 n = __copy_user_zeroing_intel_nocache(to, from, n); 807 else 808 __copy_user_zeroing(to, from, n); 809 #else 810 __copy_user_zeroing(to, from, n); 811 #endif 812 return n; 813 } 814 EXPORT_SYMBOL(__copy_from_user_ll_nocache); 815 816 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, 817 unsigned long n) 818 { 819 #ifdef CONFIG_X86_INTEL_USERCOPY 820 if (n > 64 && cpu_has_xmm2) 821 n = __copy_user_intel_nocache(to, from, n); 822 else 823 __copy_user(to, from, n); 824 #else 825 __copy_user(to, from, n); 826 #endif 827 return n; 828 } 829 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); 830 831 /** 832 * copy_to_user: - Copy a block of data into user space. 833 * @to: Destination address, in user space. 834 * @from: Source address, in kernel space. 835 * @n: Number of bytes to copy. 836 * 837 * Context: User context only. This function may sleep. 838 * 839 * Copy data from kernel space to user space. 840 * 841 * Returns number of bytes that could not be copied. 842 * On success, this will be zero. 843 */ 844 unsigned long 845 copy_to_user(void __user *to, const void *from, unsigned long n) 846 { 847 if (access_ok(VERIFY_WRITE, to, n)) 848 n = __copy_to_user(to, from, n); 849 return n; 850 } 851 EXPORT_SYMBOL(copy_to_user); 852 853 /** 854 * copy_from_user: - Copy a block of data from user space. 855 * @to: Destination address, in kernel space. 856 * @from: Source address, in user space. 857 * @n: Number of bytes to copy. 858 * 859 * Context: User context only. This function may sleep. 860 * 861 * Copy data from user space to kernel space. 862 * 863 * Returns number of bytes that could not be copied. 864 * On success, this will be zero. 865 * 866 * If some data could not be copied, this function will pad the copied 867 * data to the requested size using zero bytes. 868 */ 869 unsigned long 870 copy_from_user(void *to, const void __user *from, unsigned long n) 871 { 872 if (access_ok(VERIFY_READ, from, n)) 873 n = __copy_from_user(to, from, n); 874 else 875 memset(to, 0, n); 876 return n; 877 } 878 EXPORT_SYMBOL(copy_from_user); 879