1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/lib/memset.S 4 * 5 * Copyright (C) 1995-2000 Russell King 6 * 7 * ASM optimised string functions 8 */ 9#include <linux/linkage.h> 10#include <asm/assembler.h> 11#include <asm/unwind.h> 12 13 .text 14 .align 5 15 16ENTRY(__memset) 17ENTRY(mmioset) 18WEAK(memset) 19UNWIND( .fnstart ) 20 ands r3, r0, #3 @ 1 unaligned? 21 mov ip, r0 @ preserve r0 as return value 22 bne 6f @ 1 23/* 24 * we know that the pointer in ip is aligned to a word boundary. 25 */ 261: orr r1, r1, r1, lsl #8 27 orr r1, r1, r1, lsl #16 28 mov r3, r1 297: cmp r2, #16 30 blt 4f 31 32#if ! CALGN(1)+0 33 34/* 35 * We need 2 extra registers for this loop - use r8 and the LR 36 */ 37 stmfd sp!, {r8, lr} 38UNWIND( .fnend ) 39UNWIND( .fnstart ) 40UNWIND( .save {r8, lr} ) 41 mov r8, r1 42 mov lr, r3 43 442: subs r2, r2, #64 45 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time. 46 stmiage ip!, {r1, r3, r8, lr} 47 stmiage ip!, {r1, r3, r8, lr} 48 stmiage ip!, {r1, r3, r8, lr} 49 bgt 2b 50 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go. 51/* 52 * No need to correct the count; we're only testing bits from now on 53 */ 54 tst r2, #32 55 stmiane ip!, {r1, r3, r8, lr} 56 stmiane ip!, {r1, r3, r8, lr} 57 tst r2, #16 58 stmiane ip!, {r1, r3, r8, lr} 59 ldmfd sp!, {r8, lr} 60UNWIND( .fnend ) 61 62#else 63 64/* 65 * This version aligns the destination pointer in order to write 66 * whole cache lines at once. 67 */ 68 69 stmfd sp!, {r4-r8, lr} 70UNWIND( .fnend ) 71UNWIND( .fnstart ) 72UNWIND( .save {r4-r8, lr} ) 73 mov r4, r1 74 mov r5, r3 75 mov r6, r1 76 mov r7, r3 77 mov r8, r1 78 mov lr, r3 79 80 cmp r2, #96 81 tstgt ip, #31 82 ble 3f 83 84 and r8, ip, #31 85 rsb r8, r8, #32 86 sub r2, r2, r8 87 movs r8, r8, lsl #(32 - 4) 88 stmiacs ip!, {r4, r5, r6, r7} 89 stmiami ip!, {r4, r5} 90 tst r8, #(1 << 30) 91 mov r8, r1 92 strne r1, [ip], #4 93 943: subs r2, r2, #64 95 stmiage ip!, {r1, r3-r8, lr} 96 stmiage ip!, {r1, r3-r8, lr} 97 bgt 3b 98 ldmfdeq sp!, {r4-r8, pc} 99 100 tst r2, #32 101 stmiane ip!, {r1, r3-r8, lr} 102 tst r2, #16 103 stmiane ip!, {r4-r7} 104 ldmfd sp!, {r4-r8, lr} 105UNWIND( .fnend ) 106 107#endif 108 109UNWIND( .fnstart ) 1104: tst r2, #8 111 stmiane ip!, {r1, r3} 112 tst r2, #4 113 strne r1, [ip], #4 114/* 115 * When we get here, we've got less than 4 bytes to set. We 116 * may have an unaligned pointer as well. 117 */ 1185: tst r2, #2 119 strbne r1, [ip], #1 120 strbne r1, [ip], #1 121 tst r2, #1 122 strbne r1, [ip], #1 123 ret lr 124 1256: subs r2, r2, #4 @ 1 do we have enough 126 blt 5b @ 1 bytes to align with? 127 cmp r3, #2 @ 1 128 strblt r1, [ip], #1 @ 1 129 strble r1, [ip], #1 @ 1 130 strb r1, [ip], #1 @ 1 131 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 132 b 1b 133UNWIND( .fnend ) 134ENDPROC(memset) 135ENDPROC(mmioset) 136ENDPROC(__memset) 137 138ENTRY(__memset32) 139UNWIND( .fnstart ) 140 mov r3, r1 @ copy r1 to r3 and fall into memset64 141UNWIND( .fnend ) 142ENDPROC(__memset32) 143ENTRY(__memset64) 144UNWIND( .fnstart ) 145 mov ip, r0 @ preserve r0 as return value 146 b 7b @ jump into the middle of memset 147UNWIND( .fnend ) 148ENDPROC(__memset64) 149