1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Quick'n'dirty IP checksum ... 7 * 8 * Copyright (C) 1998, 1999 Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2007 Maciej W. Rozycki 11 * Copyright (C) 2014 Imagination Technologies Ltd. 12 */ 13#include <linux/errno.h> 14#include <asm/asm.h> 15#include <asm/asm-offsets.h> 16#include <asm/regdef.h> 17 18#ifdef CONFIG_64BIT 19/* 20 * As we are sharing code base with the mips32 tree (which use the o32 ABI 21 * register definitions). We need to redefine the register definitions from 22 * the n64 ABI register naming to the o32 ABI register naming. 23 */ 24#undef t0 25#undef t1 26#undef t2 27#undef t3 28#define t0 $8 29#define t1 $9 30#define t2 $10 31#define t3 $11 32#define t4 $12 33#define t5 $13 34#define t6 $14 35#define t7 $15 36 37#define USE_DOUBLE 38#endif 39 40#ifdef USE_DOUBLE 41 42#define LOAD ld 43#define LOAD32 lwu 44#define ADD daddu 45#define NBYTES 8 46 47#else 48 49#define LOAD lw 50#define LOAD32 lw 51#define ADD addu 52#define NBYTES 4 53 54#endif /* USE_DOUBLE */ 55 56#define UNIT(unit) ((unit)*NBYTES) 57 58#define ADDC(sum,reg) \ 59 .set push; \ 60 .set noat; \ 61 ADD sum, reg; \ 62 sltu v1, sum, reg; \ 63 ADD sum, v1; \ 64 .set pop 65 66#define ADDC32(sum,reg) \ 67 .set push; \ 68 .set noat; \ 69 addu sum, reg; \ 70 sltu v1, sum, reg; \ 71 addu sum, v1; \ 72 .set pop 73 74#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ 75 LOAD _t0, (offset + UNIT(0))(src); \ 76 LOAD _t1, (offset + UNIT(1))(src); \ 77 LOAD _t2, (offset + UNIT(2))(src); \ 78 LOAD _t3, (offset + UNIT(3))(src); \ 79 ADDC(sum, _t0); \ 80 ADDC(sum, _t1); \ 81 ADDC(sum, _t2); \ 82 ADDC(sum, _t3) 83 84#ifdef USE_DOUBLE 85#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ 86 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) 87#else 88#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \ 89 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \ 90 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3) 91#endif 92 93/* 94 * a0: source address 95 * a1: length of the area to checksum 96 * a2: partial checksum 97 */ 98 99#define src a0 100#define sum v0 101 102 .text 103 .set noreorder 104 .align 5 105LEAF(csum_partial) 106 move sum, zero 107 move t7, zero 108 109 sltiu t8, a1, 0x8 110 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */ 111 move t2, a1 112 113 andi t7, src, 0x1 /* odd buffer? */ 114 115.Lhword_align: 116 beqz t7, .Lword_align 117 andi t8, src, 0x2 118 119 lbu t0, (src) 120 LONG_SUBU a1, a1, 0x1 121#ifdef __MIPSEL__ 122 sll t0, t0, 8 123#endif 124 ADDC(sum, t0) 125 PTR_ADDU src, src, 0x1 126 andi t8, src, 0x2 127 128.Lword_align: 129 beqz t8, .Ldword_align 130 sltiu t8, a1, 56 131 132 lhu t0, (src) 133 LONG_SUBU a1, a1, 0x2 134 ADDC(sum, t0) 135 sltiu t8, a1, 56 136 PTR_ADDU src, src, 0x2 137 138.Ldword_align: 139 bnez t8, .Ldo_end_words 140 move t8, a1 141 142 andi t8, src, 0x4 143 beqz t8, .Lqword_align 144 andi t8, src, 0x8 145 146 LOAD32 t0, 0x00(src) 147 LONG_SUBU a1, a1, 0x4 148 ADDC(sum, t0) 149 PTR_ADDU src, src, 0x4 150 andi t8, src, 0x8 151 152.Lqword_align: 153 beqz t8, .Loword_align 154 andi t8, src, 0x10 155 156#ifdef USE_DOUBLE 157 ld t0, 0x00(src) 158 LONG_SUBU a1, a1, 0x8 159 ADDC(sum, t0) 160#else 161 lw t0, 0x00(src) 162 lw t1, 0x04(src) 163 LONG_SUBU a1, a1, 0x8 164 ADDC(sum, t0) 165 ADDC(sum, t1) 166#endif 167 PTR_ADDU src, src, 0x8 168 andi t8, src, 0x10 169 170.Loword_align: 171 beqz t8, .Lbegin_movement 172 LONG_SRL t8, a1, 0x7 173 174#ifdef USE_DOUBLE 175 ld t0, 0x00(src) 176 ld t1, 0x08(src) 177 ADDC(sum, t0) 178 ADDC(sum, t1) 179#else 180 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4) 181#endif 182 LONG_SUBU a1, a1, 0x10 183 PTR_ADDU src, src, 0x10 184 LONG_SRL t8, a1, 0x7 185 186.Lbegin_movement: 187 beqz t8, 1f 188 andi t2, a1, 0x40 189 190.Lmove_128bytes: 191 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 192 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 193 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) 194 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) 195 LONG_SUBU t8, t8, 0x01 196 .set reorder /* DADDI_WAR */ 197 PTR_ADDU src, src, 0x80 198 bnez t8, .Lmove_128bytes 199 .set noreorder 200 2011: 202 beqz t2, 1f 203 andi t2, a1, 0x20 204 205.Lmove_64bytes: 206 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 207 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 208 PTR_ADDU src, src, 0x40 209 2101: 211 beqz t2, .Ldo_end_words 212 andi t8, a1, 0x1c 213 214.Lmove_32bytes: 215 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 216 andi t8, a1, 0x1c 217 PTR_ADDU src, src, 0x20 218 219.Ldo_end_words: 220 beqz t8, .Lsmall_csumcpy 221 andi t2, a1, 0x3 222 LONG_SRL t8, t8, 0x2 223 224.Lend_words: 225 LOAD32 t0, (src) 226 LONG_SUBU t8, t8, 0x1 227 ADDC(sum, t0) 228 .set reorder /* DADDI_WAR */ 229 PTR_ADDU src, src, 0x4 230 bnez t8, .Lend_words 231 .set noreorder 232 233/* unknown src alignment and < 8 bytes to go */ 234.Lsmall_csumcpy: 235 move a1, t2 236 237 andi t0, a1, 4 238 beqz t0, 1f 239 andi t0, a1, 2 240 241 /* Still a full word to go */ 242 ulw t1, (src) 243 PTR_ADDIU src, 4 244#ifdef USE_DOUBLE 245 dsll t1, t1, 32 /* clear lower 32bit */ 246#endif 247 ADDC(sum, t1) 248 2491: move t1, zero 250 beqz t0, 1f 251 andi t0, a1, 1 252 253 /* Still a halfword to go */ 254 ulhu t1, (src) 255 PTR_ADDIU src, 2 256 2571: beqz t0, 1f 258 sll t1, t1, 16 259 260 lbu t2, (src) 261 nop 262 263#ifdef __MIPSEB__ 264 sll t2, t2, 8 265#endif 266 or t1, t2 267 2681: ADDC(sum, t1) 269 270 /* fold checksum */ 271#ifdef USE_DOUBLE 272 dsll32 v1, sum, 0 273 daddu sum, v1 274 sltu v1, sum, v1 275 dsra32 sum, sum, 0 276 addu sum, v1 277#endif 278 279 /* odd buffer alignment? */ 280#ifdef CONFIG_CPU_MIPSR2 281 wsbh v1, sum 282 movn sum, v1, t7 283#else 284 beqz t7, 1f /* odd buffer alignment? */ 285 lui v1, 0x00ff 286 addu v1, 0x00ff 287 and t0, sum, v1 288 sll t0, t0, 8 289 srl sum, sum, 8 290 and sum, sum, v1 291 or sum, sum, t0 2921: 293#endif 294 .set reorder 295 /* Add the passed partial csum. */ 296 ADDC32(sum, a2) 297 jr ra 298 .set noreorder 299 END(csum_partial) 300 301 302/* 303 * checksum and copy routines based on memcpy.S 304 * 305 * csum_partial_copy_nocheck(src, dst, len, sum) 306 * __csum_partial_copy_kernel(src, dst, len, sum, errp) 307 * 308 * See "Spec" in memcpy.S for details. Unlike __copy_user, all 309 * function in this file use the standard calling convention. 310 */ 311 312#define src a0 313#define dst a1 314#define len a2 315#define psum a3 316#define sum v0 317#define odd t8 318#define errptr t9 319 320/* 321 * The exception handler for loads requires that: 322 * 1- AT contain the address of the byte just past the end of the source 323 * of the copy, 324 * 2- src_entry <= src < AT, and 325 * 3- (dst - src) == (dst_entry - src_entry), 326 * The _entry suffix denotes values when __copy_user was called. 327 * 328 * (1) is set up up by __csum_partial_copy_from_user and maintained by 329 * not writing AT in __csum_partial_copy 330 * (2) is met by incrementing src by the number of bytes copied 331 * (3) is met by not doing loads between a pair of increments of dst and src 332 * 333 * The exception handlers for stores stores -EFAULT to errptr and return. 334 * These handlers do not need to overwrite any data. 335 */ 336 337/* Instruction type */ 338#define LD_INSN 1 339#define ST_INSN 2 340#define LEGACY_MODE 1 341#define EVA_MODE 2 342#define USEROP 1 343#define KERNELOP 2 344 345/* 346 * Wrapper to add an entry in the exception table 347 * in case the insn causes a memory exception. 348 * Arguments: 349 * insn : Load/store instruction 350 * type : Instruction type 351 * reg : Register 352 * addr : Address 353 * handler : Exception handler 354 */ 355#define EXC(insn, type, reg, addr, handler) \ 356 .if \mode == LEGACY_MODE; \ 3579: insn reg, addr; \ 358 .section __ex_table,"a"; \ 359 PTR 9b, handler; \ 360 .previous; \ 361 /* This is enabled in EVA mode */ \ 362 .else; \ 363 /* If loading from user or storing to user */ \ 364 .if ((\from == USEROP) && (type == LD_INSN)) || \ 365 ((\to == USEROP) && (type == ST_INSN)); \ 3669: __BUILD_EVA_INSN(insn##e, reg, addr); \ 367 .section __ex_table,"a"; \ 368 PTR 9b, handler; \ 369 .previous; \ 370 .else; \ 371 /* EVA without exception */ \ 372 insn reg, addr; \ 373 .endif; \ 374 .endif 375 376#undef LOAD 377 378#ifdef USE_DOUBLE 379 380#define LOADK ld /* No exception */ 381#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler) 382#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) 383#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler) 384#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler) 385#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) 386#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler) 387#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler) 388#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler) 389#define ADD daddu 390#define SUB dsubu 391#define SRL dsrl 392#define SLL dsll 393#define SLLV dsllv 394#define SRLV dsrlv 395#define NBYTES 8 396#define LOG_NBYTES 3 397 398#else 399 400#define LOADK lw /* No exception */ 401#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler) 402#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler) 403#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler) 404#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler) 405#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) 406#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler) 407#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler) 408#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler) 409#define ADD addu 410#define SUB subu 411#define SRL srl 412#define SLL sll 413#define SLLV sllv 414#define SRLV srlv 415#define NBYTES 4 416#define LOG_NBYTES 2 417 418#endif /* USE_DOUBLE */ 419 420#ifdef CONFIG_CPU_LITTLE_ENDIAN 421#define LDFIRST LOADR 422#define LDREST LOADL 423#define STFIRST STORER 424#define STREST STOREL 425#define SHIFT_DISCARD SLLV 426#define SHIFT_DISCARD_REVERT SRLV 427#else 428#define LDFIRST LOADL 429#define LDREST LOADR 430#define STFIRST STOREL 431#define STREST STORER 432#define SHIFT_DISCARD SRLV 433#define SHIFT_DISCARD_REVERT SLLV 434#endif 435 436#define FIRST(unit) ((unit)*NBYTES) 437#define REST(unit) (FIRST(unit)+NBYTES-1) 438 439#define ADDRMASK (NBYTES-1) 440 441#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 442 .set noat 443#else 444 .set at=v1 445#endif 446 447 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck 448 449 PTR_ADDU AT, src, len /* See (1) above. */ 450 /* initialize __nocheck if this the first time we execute this 451 * macro 452 */ 453#ifdef CONFIG_64BIT 454 move errptr, a4 455#else 456 lw errptr, 16(sp) 457#endif 458 .if \__nocheck == 1 459 FEXPORT(csum_partial_copy_nocheck) 460 .endif 461 move sum, zero 462 move odd, zero 463 /* 464 * Note: dst & src may be unaligned, len may be 0 465 * Temps 466 */ 467 /* 468 * The "issue break"s below are very approximate. 469 * Issue delays for dcache fills will perturb the schedule, as will 470 * load queue full replay traps, etc. 471 * 472 * If len < NBYTES use byte operations. 473 */ 474 sltu t2, len, NBYTES 475 and t1, dst, ADDRMASK 476 bnez t2, .Lcopy_bytes_checklen\@ 477 and t0, src, ADDRMASK 478 andi odd, dst, 0x1 /* odd buffer? */ 479 bnez t1, .Ldst_unaligned\@ 480 nop 481 bnez t0, .Lsrc_unaligned_dst_aligned\@ 482 /* 483 * use delay slot for fall-through 484 * src and dst are aligned; need to compute rem 485 */ 486.Lboth_aligned\@: 487 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 488 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES 489 nop 490 SUB len, 8*NBYTES # subtract here for bgez loop 491 .align 4 4921: 493 LOAD(t0, UNIT(0)(src), .Ll_exc\@) 494 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) 495 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) 496 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) 497 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) 498 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) 499 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) 500 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@) 501 SUB len, len, 8*NBYTES 502 ADD src, src, 8*NBYTES 503 STORE(t0, UNIT(0)(dst), .Ls_exc\@) 504 ADDC(sum, t0) 505 STORE(t1, UNIT(1)(dst), .Ls_exc\@) 506 ADDC(sum, t1) 507 STORE(t2, UNIT(2)(dst), .Ls_exc\@) 508 ADDC(sum, t2) 509 STORE(t3, UNIT(3)(dst), .Ls_exc\@) 510 ADDC(sum, t3) 511 STORE(t4, UNIT(4)(dst), .Ls_exc\@) 512 ADDC(sum, t4) 513 STORE(t5, UNIT(5)(dst), .Ls_exc\@) 514 ADDC(sum, t5) 515 STORE(t6, UNIT(6)(dst), .Ls_exc\@) 516 ADDC(sum, t6) 517 STORE(t7, UNIT(7)(dst), .Ls_exc\@) 518 ADDC(sum, t7) 519 .set reorder /* DADDI_WAR */ 520 ADD dst, dst, 8*NBYTES 521 bgez len, 1b 522 .set noreorder 523 ADD len, 8*NBYTES # revert len (see above) 524 525 /* 526 * len == the number of bytes left to copy < 8*NBYTES 527 */ 528.Lcleanup_both_aligned\@: 529#define rem t7 530 beqz len, .Ldone\@ 531 sltu t0, len, 4*NBYTES 532 bnez t0, .Lless_than_4units\@ 533 and rem, len, (NBYTES-1) # rem = len % NBYTES 534 /* 535 * len >= 4*NBYTES 536 */ 537 LOAD(t0, UNIT(0)(src), .Ll_exc\@) 538 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) 539 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) 540 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) 541 SUB len, len, 4*NBYTES 542 ADD src, src, 4*NBYTES 543 STORE(t0, UNIT(0)(dst), .Ls_exc\@) 544 ADDC(sum, t0) 545 STORE(t1, UNIT(1)(dst), .Ls_exc\@) 546 ADDC(sum, t1) 547 STORE(t2, UNIT(2)(dst), .Ls_exc\@) 548 ADDC(sum, t2) 549 STORE(t3, UNIT(3)(dst), .Ls_exc\@) 550 ADDC(sum, t3) 551 .set reorder /* DADDI_WAR */ 552 ADD dst, dst, 4*NBYTES 553 beqz len, .Ldone\@ 554 .set noreorder 555.Lless_than_4units\@: 556 /* 557 * rem = len % NBYTES 558 */ 559 beq rem, len, .Lcopy_bytes\@ 560 nop 5611: 562 LOAD(t0, 0(src), .Ll_exc\@) 563 ADD src, src, NBYTES 564 SUB len, len, NBYTES 565 STORE(t0, 0(dst), .Ls_exc\@) 566 ADDC(sum, t0) 567 .set reorder /* DADDI_WAR */ 568 ADD dst, dst, NBYTES 569 bne rem, len, 1b 570 .set noreorder 571 572 /* 573 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 574 * A loop would do only a byte at a time with possible branch 575 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE 576 * because can't assume read-access to dst. Instead, use 577 * STREST dst, which doesn't require read access to dst. 578 * 579 * This code should perform better than a simple loop on modern, 580 * wide-issue mips processors because the code has fewer branches and 581 * more instruction-level parallelism. 582 */ 583#define bits t2 584 beqz len, .Ldone\@ 585 ADD t1, dst, len # t1 is just past last byte of dst 586 li bits, 8*NBYTES 587 SLL rem, len, 3 # rem = number of bits to keep 588 LOAD(t0, 0(src), .Ll_exc\@) 589 SUB bits, bits, rem # bits = number of bits to discard 590 SHIFT_DISCARD t0, t0, bits 591 STREST(t0, -1(t1), .Ls_exc\@) 592 SHIFT_DISCARD_REVERT t0, t0, bits 593 .set reorder 594 ADDC(sum, t0) 595 b .Ldone\@ 596 .set noreorder 597.Ldst_unaligned\@: 598 /* 599 * dst is unaligned 600 * t0 = src & ADDRMASK 601 * t1 = dst & ADDRMASK; T1 > 0 602 * len >= NBYTES 603 * 604 * Copy enough bytes to align dst 605 * Set match = (src and dst have same alignment) 606 */ 607#define match rem 608 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@) 609 ADD t2, zero, NBYTES 610 LDREST(t3, REST(0)(src), .Ll_exc_copy\@) 611 SUB t2, t2, t1 # t2 = number of bytes copied 612 xor match, t0, t1 613 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) 614 SLL t4, t1, 3 # t4 = number of bits to discard 615 SHIFT_DISCARD t3, t3, t4 616 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ 617 ADDC(sum, t3) 618 beq len, t2, .Ldone\@ 619 SUB len, len, t2 620 ADD dst, dst, t2 621 beqz match, .Lboth_aligned\@ 622 ADD src, src, t2 623 624.Lsrc_unaligned_dst_aligned\@: 625 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 626 beqz t0, .Lcleanup_src_unaligned\@ 627 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 6281: 629/* 630 * Avoid consecutive LD*'s to the same register since some mips 631 * implementations can't issue them in the same cycle. 632 * It's OK to load FIRST(N+1) before REST(N) because the two addresses 633 * are to the same unit (unless src is aligned, but it's not). 634 */ 635 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) 636 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@) 637 SUB len, len, 4*NBYTES 638 LDREST(t0, REST(0)(src), .Ll_exc_copy\@) 639 LDREST(t1, REST(1)(src), .Ll_exc_copy\@) 640 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) 641 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) 642 LDREST(t2, REST(2)(src), .Ll_exc_copy\@) 643 LDREST(t3, REST(3)(src), .Ll_exc_copy\@) 644 ADD src, src, 4*NBYTES 645#ifdef CONFIG_CPU_SB1 646 nop # improves slotting 647#endif 648 STORE(t0, UNIT(0)(dst), .Ls_exc\@) 649 ADDC(sum, t0) 650 STORE(t1, UNIT(1)(dst), .Ls_exc\@) 651 ADDC(sum, t1) 652 STORE(t2, UNIT(2)(dst), .Ls_exc\@) 653 ADDC(sum, t2) 654 STORE(t3, UNIT(3)(dst), .Ls_exc\@) 655 ADDC(sum, t3) 656 .set reorder /* DADDI_WAR */ 657 ADD dst, dst, 4*NBYTES 658 bne len, rem, 1b 659 .set noreorder 660 661.Lcleanup_src_unaligned\@: 662 beqz len, .Ldone\@ 663 and rem, len, NBYTES-1 # rem = len % NBYTES 664 beq rem, len, .Lcopy_bytes\@ 665 nop 6661: 667 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) 668 LDREST(t0, REST(0)(src), .Ll_exc_copy\@) 669 ADD src, src, NBYTES 670 SUB len, len, NBYTES 671 STORE(t0, 0(dst), .Ls_exc\@) 672 ADDC(sum, t0) 673 .set reorder /* DADDI_WAR */ 674 ADD dst, dst, NBYTES 675 bne len, rem, 1b 676 .set noreorder 677 678.Lcopy_bytes_checklen\@: 679 beqz len, .Ldone\@ 680 nop 681.Lcopy_bytes\@: 682 /* 0 < len < NBYTES */ 683#ifdef CONFIG_CPU_LITTLE_ENDIAN 684#define SHIFT_START 0 685#define SHIFT_INC 8 686#else 687#define SHIFT_START 8*(NBYTES-1) 688#define SHIFT_INC -8 689#endif 690 move t2, zero # partial word 691 li t3, SHIFT_START # shift 692/* use .Ll_exc_copy here to return correct sum on fault */ 693#define COPY_BYTE(N) \ 694 LOADBU(t0, N(src), .Ll_exc_copy\@); \ 695 SUB len, len, 1; \ 696 STOREB(t0, N(dst), .Ls_exc\@); \ 697 SLLV t0, t0, t3; \ 698 addu t3, SHIFT_INC; \ 699 beqz len, .Lcopy_bytes_done\@; \ 700 or t2, t0 701 702 COPY_BYTE(0) 703 COPY_BYTE(1) 704#ifdef USE_DOUBLE 705 COPY_BYTE(2) 706 COPY_BYTE(3) 707 COPY_BYTE(4) 708 COPY_BYTE(5) 709#endif 710 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@) 711 SUB len, len, 1 712 STOREB(t0, NBYTES-2(dst), .Ls_exc\@) 713 SLLV t0, t0, t3 714 or t2, t0 715.Lcopy_bytes_done\@: 716 ADDC(sum, t2) 717.Ldone\@: 718 /* fold checksum */ 719 .set push 720 .set noat 721#ifdef USE_DOUBLE 722 dsll32 v1, sum, 0 723 daddu sum, v1 724 sltu v1, sum, v1 725 dsra32 sum, sum, 0 726 addu sum, v1 727#endif 728 729#ifdef CONFIG_CPU_MIPSR2 730 wsbh v1, sum 731 movn sum, v1, odd 732#else 733 beqz odd, 1f /* odd buffer alignment? */ 734 lui v1, 0x00ff 735 addu v1, 0x00ff 736 and t0, sum, v1 737 sll t0, t0, 8 738 srl sum, sum, 8 739 and sum, sum, v1 740 or sum, sum, t0 7411: 742#endif 743 .set pop 744 .set reorder 745 ADDC32(sum, psum) 746 jr ra 747 .set noreorder 748 749.Ll_exc_copy\@: 750 /* 751 * Copy bytes from src until faulting load address (or until a 752 * lb faults) 753 * 754 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) 755 * may be more than a byte beyond the last address. 756 * Hence, the lb below may get an exception. 757 * 758 * Assumes src < THREAD_BUADDR($28) 759 */ 760 LOADK t0, TI_TASK($28) 761 li t2, SHIFT_START 762 LOADK t0, THREAD_BUADDR(t0) 7631: 764 LOADBU(t1, 0(src), .Ll_exc\@) 765 ADD src, src, 1 766 sb t1, 0(dst) # can't fault -- we're copy_from_user 767 SLLV t1, t1, t2 768 addu t2, SHIFT_INC 769 ADDC(sum, t1) 770 .set reorder /* DADDI_WAR */ 771 ADD dst, dst, 1 772 bne src, t0, 1b 773 .set noreorder 774.Ll_exc\@: 775 LOADK t0, TI_TASK($28) 776 nop 777 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address 778 nop 779 SUB len, AT, t0 # len number of uncopied bytes 780 /* 781 * Here's where we rely on src and dst being incremented in tandem, 782 * See (3) above. 783 * dst += (fault addr - src) to put dst at first byte to clear 784 */ 785 ADD dst, t0 # compute start address in a1 786 SUB dst, src 787 /* 788 * Clear len bytes starting at dst. Can't call __bzero because it 789 * might modify len. An inefficient loop for these rare times... 790 */ 791 .set reorder /* DADDI_WAR */ 792 SUB src, len, 1 793 beqz len, .Ldone\@ 794 .set noreorder 7951: sb zero, 0(dst) 796 ADD dst, dst, 1 797 .set push 798 .set noat 799#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 800 bnez src, 1b 801 SUB src, src, 1 802#else 803 li v1, 1 804 bnez src, 1b 805 SUB src, src, v1 806#endif 807 li v1, -EFAULT 808 b .Ldone\@ 809 sw v1, (errptr) 810 811.Ls_exc\@: 812 li v0, -1 /* invalid checksum */ 813 li v1, -EFAULT 814 jr ra 815 sw v1, (errptr) 816 .set pop 817 .endm 818 819LEAF(__csum_partial_copy_kernel) 820#ifndef CONFIG_EVA 821FEXPORT(__csum_partial_copy_to_user) 822FEXPORT(__csum_partial_copy_from_user) 823#endif 824__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 825END(__csum_partial_copy_kernel) 826 827#ifdef CONFIG_EVA 828LEAF(__csum_partial_copy_to_user) 829__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 830END(__csum_partial_copy_to_user) 831 832LEAF(__csum_partial_copy_from_user) 833__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 834END(__csum_partial_copy_from_user) 835#endif 836