1/* 2 * Copyright (C) Paul Mackerras 1997. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 10 */ 11 12#include "ppc_asm.h" 13 14 .text 15 .globl strcpy 16strcpy: 17 addi r5,r3,-1 18 addi r4,r4,-1 191: lbzu r0,1(r4) 20 cmpwi 0,r0,0 21 stbu r0,1(r5) 22 bne 1b 23 blr 24 25 .globl strncpy 26strncpy: 27 cmpwi 0,r5,0 28 beqlr 29 mtctr r5 30 addi r6,r3,-1 31 addi r4,r4,-1 321: lbzu r0,1(r4) 33 cmpwi 0,r0,0 34 stbu r0,1(r6) 35 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ 36 blr 37 38 .globl strcat 39strcat: 40 addi r5,r3,-1 41 addi r4,r4,-1 421: lbzu r0,1(r5) 43 cmpwi 0,r0,0 44 bne 1b 45 addi r5,r5,-1 461: lbzu r0,1(r4) 47 cmpwi 0,r0,0 48 stbu r0,1(r5) 49 bne 1b 50 blr 51 52 .globl strcmp 53strcmp: 54 addi r5,r3,-1 55 addi r4,r4,-1 561: lbzu r3,1(r5) 57 cmpwi 1,r3,0 58 lbzu r0,1(r4) 59 subf. r3,r0,r3 60 beqlr 1 61 beq 1b 62 blr 63 64 .globl strlen 65strlen: 66 addi r4,r3,-1 671: lbzu r0,1(r4) 68 cmpwi 0,r0,0 69 bne 1b 70 subf r3,r3,r4 71 blr 72 73 .globl memset 74memset: 75 rlwimi r4,r4,8,16,23 76 rlwimi r4,r4,16,0,15 77 addi r6,r3,-4 78 cmplwi 0,r5,4 79 blt 7f 80 stwu r4,4(r6) 81 beqlr 82 andi. r0,r6,3 83 add r5,r0,r5 84 subf r6,r0,r6 85 rlwinm r0,r5,32-2,2,31 86 mtctr r0 87 bdz 6f 881: stwu r4,4(r6) 89 bdnz 1b 906: andi. r5,r5,3 917: cmpwi 0,r5,0 92 beqlr 93 mtctr r5 94 addi r6,r6,3 958: stbu r4,1(r6) 96 bdnz 8b 97 blr 98 99 .globl memmove 100memmove: 101 cmplw 0,r3,r4 102 bgt backwards_memcpy 103 /* fall through */ 104 105 .globl memcpy 106memcpy: 107 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ 108 addi r6,r3,-4 109 addi r4,r4,-4 110 beq 3f /* if less than 8 bytes to do */ 111 andi. r0,r6,3 /* get dest word aligned */ 112 mtctr r7 113 bne 5f 114 andi. r0,r4,3 /* check src word aligned too */ 115 bne 3f 1161: lwz r7,4(r4) 117 lwzu r8,8(r4) 118 stw r7,4(r6) 119 stwu r8,8(r6) 120 bdnz 1b 121 andi. r5,r5,7 1222: cmplwi 0,r5,4 123 blt 3f 124 lwzu r0,4(r4) 125 addi r5,r5,-4 126 stwu r0,4(r6) 1273: cmpwi 0,r5,0 128 beqlr 129 mtctr r5 130 addi r4,r4,3 131 addi r6,r6,3 1324: lbzu r0,1(r4) 133 stbu r0,1(r6) 134 bdnz 4b 135 blr 1365: subfic r0,r0,4 137 cmpw cr1,r0,r5 138 add r7,r0,r4 139 andi. r7,r7,3 /* will source be word-aligned too? */ 140 ble cr1,3b 141 bne 3b /* do byte-by-byte if not */ 142 mtctr r0 1436: lbz r7,4(r4) 144 addi r4,r4,1 145 stb r7,4(r6) 146 addi r6,r6,1 147 bdnz 6b 148 subf r5,r0,r5 149 rlwinm. r7,r5,32-3,3,31 150 beq 2b 151 mtctr r7 152 b 1b 153 154 .globl backwards_memcpy 155backwards_memcpy: 156 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */ 157 add r6,r3,r5 158 add r4,r4,r5 159 beq 3f 160 andi. r0,r6,3 161 mtctr r7 162 bne 5f 163 andi. r0,r4,3 164 bne 3f 1651: lwz r7,-4(r4) 166 lwzu r8,-8(r4) 167 stw r7,-4(r6) 168 stwu r8,-8(r6) 169 bdnz 1b 170 andi. r5,r5,7 1712: cmplwi 0,r5,4 172 blt 3f 173 lwzu r0,-4(r4) 174 subi r5,r5,4 175 stwu r0,-4(r6) 1763: cmpwi 0,r5,0 177 beqlr 178 mtctr r5 1794: lbzu r0,-1(r4) 180 stbu r0,-1(r6) 181 bdnz 4b 182 blr 1835: cmpw cr1,r0,r5 184 subf r7,r0,r4 185 andi. r7,r7,3 186 ble cr1,3b 187 bne 3b 188 mtctr r0 1896: lbzu r7,-1(r4) 190 stbu r7,-1(r6) 191 bdnz 6b 192 subf r5,r0,r5 193 rlwinm. r7,r5,32-3,3,31 194 beq 2b 195 mtctr r7 196 b 1b 197 198 .globl memcmp 199memcmp: 200 cmpwi 0,r5,0 201 blelr 202 mtctr r5 203 addi r6,r3,-1 204 addi r4,r4,-1 2051: lbzu r3,1(r6) 206 lbzu r0,1(r4) 207 subf. r3,r0,r3 208 bdnzt 2,1b 209 blr 210 211 212/* 213 * Flush the dcache and invalidate the icache for a range of addresses. 214 * 215 * flush_cache(addr, len) 216 */ 217 .global flush_cache 218flush_cache: 219 addi 4,4,0x1f /* len = (len + 0x1f) / 0x20 */ 220 rlwinm. 4,4,27,5,31 221 mtctr 4 222 beqlr 2231: dcbf 0,3 224 icbi 0,3 225 addi 3,3,0x20 226 bdnz 1b 227 sync 228 isync 229 blr 230 231