1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * vineetg: June 2010 9 * -__clear_user( ) called multiple times during elf load was byte loop 10 * converted to do as much word clear as possible. 11 * 12 * vineetg: Dec 2009 13 * -Hand crafted constant propagation for "constant" copy sizes 14 * -stock kernel shrunk by 33K at -O3 15 * 16 * vineetg: Sept 2009 17 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz 18 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1) 19 * -Enabled when doing -Os 20 * 21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 22 */ 23 24 #ifndef _ASM_ARC_UACCESS_H 25 #define _ASM_ARC_UACCESS_H 26 27 #include <linux/sched.h> 28 #include <asm/errno.h> 29 #include <linux/string.h> /* for generic string functions */ 30 31 32 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 33 34 /* 35 * Algorthmically, for __user_ok() we want do: 36 * (start < TASK_SIZE) && (start+len < TASK_SIZE) 37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or 38 * emitted directly in code. 39 * 40 * This can however be rewritten as follows: 41 * (len <= TASK_SIZE) && (start+len < TASK_SIZE) 42 * 43 * Because it essentially checks if buffer end is within limit and @len is 44 * non-ngeative, which implies that buffer start will be within limit too. 45 * 46 * The reason for rewriting being, for majority of cases, @len is generally 47 * compile time constant, causing first sub-expression to be compile time 48 * subsumed. 49 * 50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10), 51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem 52 * would already have been done at this call site for __kernel_ok() 53 * 54 */ 55 #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 56 ((addr) <= (get_fs() - (sz)))) 57 #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 58 likely(__user_ok((addr), (sz)))) 59 60 /*********** Single byte/hword/word copies ******************/ 61 62 #define __get_user_fn(sz, u, k) \ 63 ({ \ 64 long __ret = 0; /* success by default */ \ 65 switch (sz) { \ 66 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \ 67 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \ 68 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \ 69 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \ 70 } \ 71 __ret; \ 72 }) 73 74 /* 75 * Returns 0 on success, -EFAULT if not. 76 * @ret already contains 0 - given that errors will be less likely 77 * (hence +r asm constraint below). 78 * In case of error, fixup code will make it -EFAULT 79 */ 80 #define __arc_get_user_one(dst, src, op, ret) \ 81 __asm__ __volatile__( \ 82 "1: "op" %1,[%2]\n" \ 83 "2: ;nop\n" \ 84 " .section .fixup, \"ax\"\n" \ 85 " .align 4\n" \ 86 "3: mov %0, %3\n" \ 87 " j 2b\n" \ 88 " .previous\n" \ 89 " .section __ex_table, \"a\"\n" \ 90 " .align 4\n" \ 91 " .word 1b,3b\n" \ 92 " .previous\n" \ 93 \ 94 : "+r" (ret), "=r" (dst) \ 95 : "r" (src), "ir" (-EFAULT)) 96 97 #define __arc_get_user_one_64(dst, src, ret) \ 98 __asm__ __volatile__( \ 99 "1: ld %1,[%2]\n" \ 100 "4: ld %R1,[%2, 4]\n" \ 101 "2: ;nop\n" \ 102 " .section .fixup, \"ax\"\n" \ 103 " .align 4\n" \ 104 "3: mov %0, %3\n" \ 105 " j 2b\n" \ 106 " .previous\n" \ 107 " .section __ex_table, \"a\"\n" \ 108 " .align 4\n" \ 109 " .word 1b,3b\n" \ 110 " .word 4b,3b\n" \ 111 " .previous\n" \ 112 \ 113 : "+r" (ret), "=r" (dst) \ 114 : "r" (src), "ir" (-EFAULT)) 115 116 #define __put_user_fn(sz, u, k) \ 117 ({ \ 118 long __ret = 0; /* success by default */ \ 119 switch (sz) { \ 120 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \ 121 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \ 122 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \ 123 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \ 124 } \ 125 __ret; \ 126 }) 127 128 #define __arc_put_user_one(src, dst, op, ret) \ 129 __asm__ __volatile__( \ 130 "1: "op" %1,[%2]\n" \ 131 "2: ;nop\n" \ 132 " .section .fixup, \"ax\"\n" \ 133 " .align 4\n" \ 134 "3: mov %0, %3\n" \ 135 " j 2b\n" \ 136 " .previous\n" \ 137 " .section __ex_table, \"a\"\n" \ 138 " .align 4\n" \ 139 " .word 1b,3b\n" \ 140 " .previous\n" \ 141 \ 142 : "+r" (ret) \ 143 : "r" (src), "r" (dst), "ir" (-EFAULT)) 144 145 #define __arc_put_user_one_64(src, dst, ret) \ 146 __asm__ __volatile__( \ 147 "1: st %1,[%2]\n" \ 148 "4: st %R1,[%2, 4]\n" \ 149 "2: ;nop\n" \ 150 " .section .fixup, \"ax\"\n" \ 151 " .align 4\n" \ 152 "3: mov %0, %3\n" \ 153 " j 2b\n" \ 154 " .previous\n" \ 155 " .section __ex_table, \"a\"\n" \ 156 " .align 4\n" \ 157 " .word 1b,3b\n" \ 158 " .word 4b,3b\n" \ 159 " .previous\n" \ 160 \ 161 : "+r" (ret) \ 162 : "r" (src), "r" (dst), "ir" (-EFAULT)) 163 164 165 static inline unsigned long 166 __arc_copy_from_user(void *to, const void __user *from, unsigned long n) 167 { 168 long res = 0; 169 char val; 170 unsigned long tmp1, tmp2, tmp3, tmp4; 171 unsigned long orig_n = n; 172 173 if (n == 0) 174 return 0; 175 176 /* unaligned */ 177 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { 178 179 unsigned char tmp; 180 181 __asm__ __volatile__ ( 182 " mov.f lp_count, %0 \n" 183 " lpnz 2f \n" 184 "1: ldb.ab %1, [%3, 1] \n" 185 " stb.ab %1, [%2, 1] \n" 186 " sub %0,%0,1 \n" 187 "2: ;nop \n" 188 " .section .fixup, \"ax\" \n" 189 " .align 4 \n" 190 "3: j 2b \n" 191 " .previous \n" 192 " .section __ex_table, \"a\" \n" 193 " .align 4 \n" 194 " .word 1b, 3b \n" 195 " .previous \n" 196 197 : "+r" (n), 198 /* 199 * Note as an '&' earlyclobber operand to make sure the 200 * temporary register inside the loop is not the same as 201 * FROM or TO. 202 */ 203 "=&r" (tmp), "+r" (to), "+r" (from) 204 : 205 : "lp_count", "lp_start", "lp_end", "memory"); 206 207 return n; 208 } 209 210 /* 211 * Hand-crafted constant propagation to reduce code sz of the 212 * laddered copy 16x,8,4,2,1 213 */ 214 if (__builtin_constant_p(orig_n)) { 215 res = orig_n; 216 217 if (orig_n / 16) { 218 orig_n = orig_n % 16; 219 220 __asm__ __volatile__( 221 " lsr lp_count, %7,4 \n" 222 " lp 3f \n" 223 "1: ld.ab %3, [%2, 4] \n" 224 "11: ld.ab %4, [%2, 4] \n" 225 "12: ld.ab %5, [%2, 4] \n" 226 "13: ld.ab %6, [%2, 4] \n" 227 " st.ab %3, [%1, 4] \n" 228 " st.ab %4, [%1, 4] \n" 229 " st.ab %5, [%1, 4] \n" 230 " st.ab %6, [%1, 4] \n" 231 " sub %0,%0,16 \n" 232 "3: ;nop \n" 233 " .section .fixup, \"ax\" \n" 234 " .align 4 \n" 235 "4: j 3b \n" 236 " .previous \n" 237 " .section __ex_table, \"a\" \n" 238 " .align 4 \n" 239 " .word 1b, 4b \n" 240 " .word 11b,4b \n" 241 " .word 12b,4b \n" 242 " .word 13b,4b \n" 243 " .previous \n" 244 : "+r" (res), "+r"(to), "+r"(from), 245 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) 246 : "ir"(n) 247 : "lp_count", "memory"); 248 } 249 if (orig_n / 8) { 250 orig_n = orig_n % 8; 251 252 __asm__ __volatile__( 253 "14: ld.ab %3, [%2,4] \n" 254 "15: ld.ab %4, [%2,4] \n" 255 " st.ab %3, [%1,4] \n" 256 " st.ab %4, [%1,4] \n" 257 " sub %0,%0,8 \n" 258 "31: ;nop \n" 259 " .section .fixup, \"ax\" \n" 260 " .align 4 \n" 261 "4: j 31b \n" 262 " .previous \n" 263 " .section __ex_table, \"a\" \n" 264 " .align 4 \n" 265 " .word 14b,4b \n" 266 " .word 15b,4b \n" 267 " .previous \n" 268 : "+r" (res), "+r"(to), "+r"(from), 269 "=r"(tmp1), "=r"(tmp2) 270 : 271 : "memory"); 272 } 273 if (orig_n / 4) { 274 orig_n = orig_n % 4; 275 276 __asm__ __volatile__( 277 "16: ld.ab %3, [%2,4] \n" 278 " st.ab %3, [%1,4] \n" 279 " sub %0,%0,4 \n" 280 "32: ;nop \n" 281 " .section .fixup, \"ax\" \n" 282 " .align 4 \n" 283 "4: j 32b \n" 284 " .previous \n" 285 " .section __ex_table, \"a\" \n" 286 " .align 4 \n" 287 " .word 16b,4b \n" 288 " .previous \n" 289 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 290 : 291 : "memory"); 292 } 293 if (orig_n / 2) { 294 orig_n = orig_n % 2; 295 296 __asm__ __volatile__( 297 "17: ldw.ab %3, [%2,2] \n" 298 " stw.ab %3, [%1,2] \n" 299 " sub %0,%0,2 \n" 300 "33: ;nop \n" 301 " .section .fixup, \"ax\" \n" 302 " .align 4 \n" 303 "4: j 33b \n" 304 " .previous \n" 305 " .section __ex_table, \"a\" \n" 306 " .align 4 \n" 307 " .word 17b,4b \n" 308 " .previous \n" 309 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 310 : 311 : "memory"); 312 } 313 if (orig_n & 1) { 314 __asm__ __volatile__( 315 "18: ldb.ab %3, [%2,2] \n" 316 " stb.ab %3, [%1,2] \n" 317 " sub %0,%0,1 \n" 318 "34: ; nop \n" 319 " .section .fixup, \"ax\" \n" 320 " .align 4 \n" 321 "4: j 34b \n" 322 " .previous \n" 323 " .section __ex_table, \"a\" \n" 324 " .align 4 \n" 325 " .word 18b,4b \n" 326 " .previous \n" 327 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 328 : 329 : "memory"); 330 } 331 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */ 332 333 __asm__ __volatile__( 334 " mov %0,%3 \n" 335 " lsr.f lp_count, %3,4 \n" /* 16x bytes */ 336 " lpnz 3f \n" 337 "1: ld.ab %5, [%2, 4] \n" 338 "11: ld.ab %6, [%2, 4] \n" 339 "12: ld.ab %7, [%2, 4] \n" 340 "13: ld.ab %8, [%2, 4] \n" 341 " st.ab %5, [%1, 4] \n" 342 " st.ab %6, [%1, 4] \n" 343 " st.ab %7, [%1, 4] \n" 344 " st.ab %8, [%1, 4] \n" 345 " sub %0,%0,16 \n" 346 "3: and.f %3,%3,0xf \n" /* stragglers */ 347 " bz 34f \n" 348 " bbit0 %3,3,31f \n" /* 8 bytes left */ 349 "14: ld.ab %5, [%2,4] \n" 350 "15: ld.ab %6, [%2,4] \n" 351 " st.ab %5, [%1,4] \n" 352 " st.ab %6, [%1,4] \n" 353 " sub.f %0,%0,8 \n" 354 "31: bbit0 %3,2,32f \n" /* 4 bytes left */ 355 "16: ld.ab %5, [%2,4] \n" 356 " st.ab %5, [%1,4] \n" 357 " sub.f %0,%0,4 \n" 358 "32: bbit0 %3,1,33f \n" /* 2 bytes left */ 359 "17: ldw.ab %5, [%2,2] \n" 360 " stw.ab %5, [%1,2] \n" 361 " sub.f %0,%0,2 \n" 362 "33: bbit0 %3,0,34f \n" 363 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */ 364 " stb.ab %5, [%1,1] \n" 365 " sub.f %0,%0,1 \n" 366 "34: ;nop \n" 367 " .section .fixup, \"ax\" \n" 368 " .align 4 \n" 369 "4: j 34b \n" 370 " .previous \n" 371 " .section __ex_table, \"a\" \n" 372 " .align 4 \n" 373 " .word 1b, 4b \n" 374 " .word 11b,4b \n" 375 " .word 12b,4b \n" 376 " .word 13b,4b \n" 377 " .word 14b,4b \n" 378 " .word 15b,4b \n" 379 " .word 16b,4b \n" 380 " .word 17b,4b \n" 381 " .word 18b,4b \n" 382 " .previous \n" 383 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), 384 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) 385 : 386 : "lp_count", "memory"); 387 } 388 389 return res; 390 } 391 392 extern unsigned long slowpath_copy_to_user(void __user *to, const void *from, 393 unsigned long n); 394 395 static inline unsigned long 396 __arc_copy_to_user(void __user *to, const void *from, unsigned long n) 397 { 398 long res = 0; 399 char val; 400 unsigned long tmp1, tmp2, tmp3, tmp4; 401 unsigned long orig_n = n; 402 403 if (n == 0) 404 return 0; 405 406 /* unaligned */ 407 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { 408 409 unsigned char tmp; 410 411 __asm__ __volatile__( 412 " mov.f lp_count, %0 \n" 413 " lpnz 3f \n" 414 " ldb.ab %1, [%3, 1] \n" 415 "1: stb.ab %1, [%2, 1] \n" 416 " sub %0, %0, 1 \n" 417 "3: ;nop \n" 418 " .section .fixup, \"ax\" \n" 419 " .align 4 \n" 420 "4: j 3b \n" 421 " .previous \n" 422 " .section __ex_table, \"a\" \n" 423 " .align 4 \n" 424 " .word 1b, 4b \n" 425 " .previous \n" 426 427 : "+r" (n), 428 /* Note as an '&' earlyclobber operand to make sure the 429 * temporary register inside the loop is not the same as 430 * FROM or TO. 431 */ 432 "=&r" (tmp), "+r" (to), "+r" (from) 433 : 434 : "lp_count", "lp_start", "lp_end", "memory"); 435 436 return n; 437 } 438 439 if (__builtin_constant_p(orig_n)) { 440 res = orig_n; 441 442 if (orig_n / 16) { 443 orig_n = orig_n % 16; 444 445 __asm__ __volatile__( 446 " lsr lp_count, %7,4 \n" 447 " lp 3f \n" 448 " ld.ab %3, [%2, 4] \n" 449 " ld.ab %4, [%2, 4] \n" 450 " ld.ab %5, [%2, 4] \n" 451 " ld.ab %6, [%2, 4] \n" 452 "1: st.ab %3, [%1, 4] \n" 453 "11: st.ab %4, [%1, 4] \n" 454 "12: st.ab %5, [%1, 4] \n" 455 "13: st.ab %6, [%1, 4] \n" 456 " sub %0, %0, 16 \n" 457 "3:;nop \n" 458 " .section .fixup, \"ax\" \n" 459 " .align 4 \n" 460 "4: j 3b \n" 461 " .previous \n" 462 " .section __ex_table, \"a\" \n" 463 " .align 4 \n" 464 " .word 1b, 4b \n" 465 " .word 11b,4b \n" 466 " .word 12b,4b \n" 467 " .word 13b,4b \n" 468 " .previous \n" 469 : "+r" (res), "+r"(to), "+r"(from), 470 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) 471 : "ir"(n) 472 : "lp_count", "memory"); 473 } 474 if (orig_n / 8) { 475 orig_n = orig_n % 8; 476 477 __asm__ __volatile__( 478 " ld.ab %3, [%2,4] \n" 479 " ld.ab %4, [%2,4] \n" 480 "14: st.ab %3, [%1,4] \n" 481 "15: st.ab %4, [%1,4] \n" 482 " sub %0, %0, 8 \n" 483 "31:;nop \n" 484 " .section .fixup, \"ax\" \n" 485 " .align 4 \n" 486 "4: j 31b \n" 487 " .previous \n" 488 " .section __ex_table, \"a\" \n" 489 " .align 4 \n" 490 " .word 14b,4b \n" 491 " .word 15b,4b \n" 492 " .previous \n" 493 : "+r" (res), "+r"(to), "+r"(from), 494 "=r"(tmp1), "=r"(tmp2) 495 : 496 : "memory"); 497 } 498 if (orig_n / 4) { 499 orig_n = orig_n % 4; 500 501 __asm__ __volatile__( 502 " ld.ab %3, [%2,4] \n" 503 "16: st.ab %3, [%1,4] \n" 504 " sub %0, %0, 4 \n" 505 "32:;nop \n" 506 " .section .fixup, \"ax\" \n" 507 " .align 4 \n" 508 "4: j 32b \n" 509 " .previous \n" 510 " .section __ex_table, \"a\" \n" 511 " .align 4 \n" 512 " .word 16b,4b \n" 513 " .previous \n" 514 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 515 : 516 : "memory"); 517 } 518 if (orig_n / 2) { 519 orig_n = orig_n % 2; 520 521 __asm__ __volatile__( 522 " ldw.ab %3, [%2,2] \n" 523 "17: stw.ab %3, [%1,2] \n" 524 " sub %0, %0, 2 \n" 525 "33:;nop \n" 526 " .section .fixup, \"ax\" \n" 527 " .align 4 \n" 528 "4: j 33b \n" 529 " .previous \n" 530 " .section __ex_table, \"a\" \n" 531 " .align 4 \n" 532 " .word 17b,4b \n" 533 " .previous \n" 534 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 535 : 536 : "memory"); 537 } 538 if (orig_n & 1) { 539 __asm__ __volatile__( 540 " ldb.ab %3, [%2,1] \n" 541 "18: stb.ab %3, [%1,1] \n" 542 " sub %0, %0, 1 \n" 543 "34: ;nop \n" 544 " .section .fixup, \"ax\" \n" 545 " .align 4 \n" 546 "4: j 34b \n" 547 " .previous \n" 548 " .section __ex_table, \"a\" \n" 549 " .align 4 \n" 550 " .word 18b,4b \n" 551 " .previous \n" 552 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) 553 : 554 : "memory"); 555 } 556 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */ 557 558 __asm__ __volatile__( 559 " mov %0,%3 \n" 560 " lsr.f lp_count, %3,4 \n" /* 16x bytes */ 561 " lpnz 3f \n" 562 " ld.ab %5, [%2, 4] \n" 563 " ld.ab %6, [%2, 4] \n" 564 " ld.ab %7, [%2, 4] \n" 565 " ld.ab %8, [%2, 4] \n" 566 "1: st.ab %5, [%1, 4] \n" 567 "11: st.ab %6, [%1, 4] \n" 568 "12: st.ab %7, [%1, 4] \n" 569 "13: st.ab %8, [%1, 4] \n" 570 " sub %0, %0, 16 \n" 571 "3: and.f %3,%3,0xf \n" /* stragglers */ 572 " bz 34f \n" 573 " bbit0 %3,3,31f \n" /* 8 bytes left */ 574 " ld.ab %5, [%2,4] \n" 575 " ld.ab %6, [%2,4] \n" 576 "14: st.ab %5, [%1,4] \n" 577 "15: st.ab %6, [%1,4] \n" 578 " sub.f %0, %0, 8 \n" 579 "31: bbit0 %3,2,32f \n" /* 4 bytes left */ 580 " ld.ab %5, [%2,4] \n" 581 "16: st.ab %5, [%1,4] \n" 582 " sub.f %0, %0, 4 \n" 583 "32: bbit0 %3,1,33f \n" /* 2 bytes left */ 584 " ldw.ab %5, [%2,2] \n" 585 "17: stw.ab %5, [%1,2] \n" 586 " sub.f %0, %0, 2 \n" 587 "33: bbit0 %3,0,34f \n" 588 " ldb.ab %5, [%2,1] \n" /* 1 byte left */ 589 "18: stb.ab %5, [%1,1] \n" 590 " sub.f %0, %0, 1 \n" 591 "34: ;nop \n" 592 " .section .fixup, \"ax\" \n" 593 " .align 4 \n" 594 "4: j 34b \n" 595 " .previous \n" 596 " .section __ex_table, \"a\" \n" 597 " .align 4 \n" 598 " .word 1b, 4b \n" 599 " .word 11b,4b \n" 600 " .word 12b,4b \n" 601 " .word 13b,4b \n" 602 " .word 14b,4b \n" 603 " .word 15b,4b \n" 604 " .word 16b,4b \n" 605 " .word 17b,4b \n" 606 " .word 18b,4b \n" 607 " .previous \n" 608 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), 609 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4) 610 : 611 : "lp_count", "memory"); 612 } 613 614 return res; 615 } 616 617 static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) 618 { 619 long res = n; 620 unsigned char *d_char = to; 621 622 __asm__ __volatile__( 623 " bbit0 %0, 0, 1f \n" 624 "75: stb.ab %2, [%0,1] \n" 625 " sub %1, %1, 1 \n" 626 "1: bbit0 %0, 1, 2f \n" 627 "76: stw.ab %2, [%0,2] \n" 628 " sub %1, %1, 2 \n" 629 "2: asr.f lp_count, %1, 2 \n" 630 " lpnz 3f \n" 631 "77: st.ab %2, [%0,4] \n" 632 " sub %1, %1, 4 \n" 633 "3: bbit0 %1, 1, 4f \n" 634 "78: stw.ab %2, [%0,2] \n" 635 " sub %1, %1, 2 \n" 636 "4: bbit0 %1, 0, 5f \n" 637 "79: stb.ab %2, [%0,1] \n" 638 " sub %1, %1, 1 \n" 639 "5: \n" 640 " .section .fixup, \"ax\" \n" 641 " .align 4 \n" 642 "3: j 5b \n" 643 " .previous \n" 644 " .section __ex_table, \"a\" \n" 645 " .align 4 \n" 646 " .word 75b, 3b \n" 647 " .word 76b, 3b \n" 648 " .word 77b, 3b \n" 649 " .word 78b, 3b \n" 650 " .word 79b, 3b \n" 651 " .previous \n" 652 : "+r"(d_char), "+r"(res) 653 : "i"(0) 654 : "lp_count", "lp_start", "lp_end", "memory"); 655 656 return res; 657 } 658 659 static inline long 660 __arc_strncpy_from_user(char *dst, const char __user *src, long count) 661 { 662 long res = count; 663 char val; 664 unsigned int hw_count; 665 666 if (count == 0) 667 return 0; 668 669 __asm__ __volatile__( 670 " lp 2f \n" 671 "1: ldb.ab %3, [%2, 1] \n" 672 " breq.d %3, 0, 2f \n" 673 " stb.ab %3, [%1, 1] \n" 674 "2: sub %0, %6, %4 \n" 675 "3: ;nop \n" 676 " .section .fixup, \"ax\" \n" 677 " .align 4 \n" 678 "4: mov %0, %5 \n" 679 " j 3b \n" 680 " .previous \n" 681 " .section __ex_table, \"a\" \n" 682 " .align 4 \n" 683 " .word 1b, 4b \n" 684 " .previous \n" 685 : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count) 686 : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */ 687 : "memory"); 688 689 return res; 690 } 691 692 static inline long __arc_strnlen_user(const char __user *s, long n) 693 { 694 long res, tmp1, cnt; 695 char val; 696 697 __asm__ __volatile__( 698 " mov %2, %1 \n" 699 "1: ldb.ab %3, [%0, 1] \n" 700 " breq.d %3, 0, 2f \n" 701 " sub.f %2, %2, 1 \n" 702 " bnz 1b \n" 703 " sub %2, %2, 1 \n" 704 "2: sub %0, %1, %2 \n" 705 "3: ;nop \n" 706 " .section .fixup, \"ax\" \n" 707 " .align 4 \n" 708 "4: mov %0, 0 \n" 709 " j 3b \n" 710 " .previous \n" 711 " .section __ex_table, \"a\" \n" 712 " .align 4 \n" 713 " .word 1b, 4b \n" 714 " .previous \n" 715 : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val) 716 : "0"(s), "1"(n) 717 : "memory"); 718 719 return res; 720 } 721 722 #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 723 #define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n) 724 #define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n) 725 #define __clear_user(d, n) __arc_clear_user(d, n) 726 #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) 727 #define __strnlen_user(s, n) __arc_strnlen_user(s, n) 728 #else 729 extern long arc_copy_from_user_noinline(void *to, const void __user * from, 730 unsigned long n); 731 extern long arc_copy_to_user_noinline(void __user *to, const void *from, 732 unsigned long n); 733 extern unsigned long arc_clear_user_noinline(void __user *to, 734 unsigned long n); 735 extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, 736 long count); 737 extern long arc_strnlen_user_noinline(const char __user *src, long n); 738 739 #define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n) 740 #define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n) 741 #define __clear_user(d, n) arc_clear_user_noinline(d, n) 742 #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) 743 #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) 744 745 #endif 746 747 #include <asm-generic/uaccess.h> 748 749 extern int fixup_exception(struct pt_regs *regs); 750 751 #endif 752