1*b3a04ed5SBabu Moger/* 2*b3a04ed5SBabu Moger * M7memset.S: SPARC M7 optimized memset. 3*b3a04ed5SBabu Moger * 4*b3a04ed5SBabu Moger * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 5*b3a04ed5SBabu Moger */ 6*b3a04ed5SBabu Moger 7*b3a04ed5SBabu Moger/* 8*b3a04ed5SBabu Moger * M7memset.S: M7 optimized memset. 9*b3a04ed5SBabu Moger * 10*b3a04ed5SBabu Moger * char *memset(sp, c, n) 11*b3a04ed5SBabu Moger * 12*b3a04ed5SBabu Moger * Set an array of n chars starting at sp to the character c. 13*b3a04ed5SBabu Moger * Return sp. 14*b3a04ed5SBabu Moger * 15*b3a04ed5SBabu Moger * Fast assembler language version of the following C-program for memset 16*b3a04ed5SBabu Moger * which represents the `standard' for the C-library. 17*b3a04ed5SBabu Moger * 18*b3a04ed5SBabu Moger * void * 19*b3a04ed5SBabu Moger * memset(void *sp1, int c, size_t n) 20*b3a04ed5SBabu Moger * { 21*b3a04ed5SBabu Moger * if (n != 0) { 22*b3a04ed5SBabu Moger * char *sp = sp1; 23*b3a04ed5SBabu Moger * do { 24*b3a04ed5SBabu Moger * *sp++ = (char)c; 25*b3a04ed5SBabu Moger * } while (--n != 0); 26*b3a04ed5SBabu Moger * } 27*b3a04ed5SBabu Moger * return (sp1); 28*b3a04ed5SBabu Moger * } 29*b3a04ed5SBabu Moger * 30*b3a04ed5SBabu Moger * The algorithm is as follows : 31*b3a04ed5SBabu Moger * 32*b3a04ed5SBabu Moger * For small 6 or fewer bytes stores, bytes will be stored. 33*b3a04ed5SBabu Moger * 34*b3a04ed5SBabu Moger * For less than 32 bytes stores, align the address on 4 byte boundary. 35*b3a04ed5SBabu Moger * Then store as many 4-byte chunks, followed by trailing bytes. 36*b3a04ed5SBabu Moger * 37*b3a04ed5SBabu Moger * For sizes greater than 32 bytes, align the address on 8 byte boundary. 38*b3a04ed5SBabu Moger * if (count >= 64) { 39*b3a04ed5SBabu Moger * store 8-bytes chunks to align the address on 64 byte boundary 40*b3a04ed5SBabu Moger * if (value to be set is zero && count >= MIN_ZERO) { 41*b3a04ed5SBabu Moger * Using BIS stores, set the first long word of each 42*b3a04ed5SBabu Moger * 64-byte cache line to zero which will also clear the 43*b3a04ed5SBabu Moger * other seven long words of the cache line. 44*b3a04ed5SBabu Moger * } 45*b3a04ed5SBabu Moger * else if (count >= MIN_LOOP) { 46*b3a04ed5SBabu Moger * Using BIS stores, set the first long word of each of 47*b3a04ed5SBabu Moger * ST_CHUNK cache lines (64 bytes each) before the main 48*b3a04ed5SBabu Moger * loop is entered. 49*b3a04ed5SBabu Moger * In the main loop, continue pre-setting the first long 50*b3a04ed5SBabu Moger * word of each cache line ST_CHUNK lines in advance while 51*b3a04ed5SBabu Moger * setting the other seven long words (56 bytes) of each 52*b3a04ed5SBabu Moger * cache line until fewer than ST_CHUNK*64 bytes remain. 53*b3a04ed5SBabu Moger * Then set the remaining seven long words of each cache 54*b3a04ed5SBabu Moger * line that has already had its first long word set. 55*b3a04ed5SBabu Moger * } 56*b3a04ed5SBabu Moger * store remaining data in 64-byte chunks until less than 57*b3a04ed5SBabu Moger * 64 bytes remain. 58*b3a04ed5SBabu Moger * } 59*b3a04ed5SBabu Moger * Store as many 8-byte chunks, followed by trailing bytes. 60*b3a04ed5SBabu Moger * 61*b3a04ed5SBabu Moger * BIS = Block Init Store 62*b3a04ed5SBabu Moger * Doing the advance store of the first element of the cache line 63*b3a04ed5SBabu Moger * initiates the displacement of a cache line while only using a single 64*b3a04ed5SBabu Moger * instruction in the pipeline. That avoids various pipeline delays, 65*b3a04ed5SBabu Moger * such as filling the miss buffer. The performance effect is 66*b3a04ed5SBabu Moger * similar to prefetching for normal stores. 67*b3a04ed5SBabu Moger * The special case for zero fills runs faster and uses fewer instruction 68*b3a04ed5SBabu Moger * cycles than the normal memset loop. 69*b3a04ed5SBabu Moger * 70*b3a04ed5SBabu Moger * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence 71*b3a04ed5SBabu Moger * BIS stores must be followed by a membar #StoreStore. The benefit of 72*b3a04ed5SBabu Moger * the BIS store must be balanced against the cost of the membar operation. 73*b3a04ed5SBabu Moger */ 74*b3a04ed5SBabu Moger 75*b3a04ed5SBabu Moger/* 76*b3a04ed5SBabu Moger * ASI_STBI_P marks the cache line as "least recently used" 77*b3a04ed5SBabu Moger * which means if many threads are active, it has a high chance 78*b3a04ed5SBabu Moger * of being pushed out of the cache between the first initializing 79*b3a04ed5SBabu Moger * store and the final stores. 80*b3a04ed5SBabu Moger * Thus, we use ASI_STBIMRU_P which marks the cache line as 81*b3a04ed5SBabu Moger * "most recently used" for all but the last store to the cache line. 82*b3a04ed5SBabu Moger */ 83*b3a04ed5SBabu Moger 84*b3a04ed5SBabu Moger#include <asm/asi.h> 85*b3a04ed5SBabu Moger#include <asm/page.h> 86*b3a04ed5SBabu Moger 87*b3a04ed5SBabu Moger#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P 88*b3a04ed5SBabu Moger#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P 89*b3a04ed5SBabu Moger 90*b3a04ed5SBabu Moger 91*b3a04ed5SBabu Moger#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */ 92*b3a04ed5SBabu Moger#define MIN_LOOP 16320 93*b3a04ed5SBabu Moger#define MIN_ZERO 512 94*b3a04ed5SBabu Moger 95*b3a04ed5SBabu Moger .section ".text" 96*b3a04ed5SBabu Moger .align 32 97*b3a04ed5SBabu Moger 98*b3a04ed5SBabu Moger/* 99*b3a04ed5SBabu Moger * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE) 100*b3a04ed5SBabu Moger * (can create a more optimized version later.) 101*b3a04ed5SBabu Moger */ 102*b3a04ed5SBabu Moger .globl M7clear_page 103*b3a04ed5SBabu Moger .globl M7clear_user_page 104*b3a04ed5SBabu MogerM7clear_page: /* clear_page(dest) */ 105*b3a04ed5SBabu MogerM7clear_user_page: 106*b3a04ed5SBabu Moger set PAGE_SIZE, %o1 107*b3a04ed5SBabu Moger /* fall through into bzero code */ 108*b3a04ed5SBabu Moger 109*b3a04ed5SBabu Moger .size M7clear_page,.-M7clear_page 110*b3a04ed5SBabu Moger .size M7clear_user_page,.-M7clear_user_page 111*b3a04ed5SBabu Moger 112*b3a04ed5SBabu Moger/* 113*b3a04ed5SBabu Moger * Define bzero(dest, n) as memset(dest, 0, n) 114*b3a04ed5SBabu Moger * (can create a more optimized version later.) 115*b3a04ed5SBabu Moger */ 116*b3a04ed5SBabu Moger .globl M7bzero 117*b3a04ed5SBabu MogerM7bzero: /* bzero(dest, size) */ 118*b3a04ed5SBabu Moger mov %o1, %o2 119*b3a04ed5SBabu Moger mov 0, %o1 120*b3a04ed5SBabu Moger /* fall through into memset code */ 121*b3a04ed5SBabu Moger 122*b3a04ed5SBabu Moger .size M7bzero,.-M7bzero 123*b3a04ed5SBabu Moger 124*b3a04ed5SBabu Moger .global M7memset 125*b3a04ed5SBabu Moger .type M7memset, #function 126*b3a04ed5SBabu Moger .register %g3, #scratch 127*b3a04ed5SBabu MogerM7memset: 128*b3a04ed5SBabu Moger mov %o0, %o5 ! copy sp1 before using it 129*b3a04ed5SBabu Moger cmp %o2, 7 ! if small counts, just write bytes 130*b3a04ed5SBabu Moger bleu,pn %xcc, .wrchar 131*b3a04ed5SBabu Moger and %o1, 0xff, %o1 ! o1 is (char)c 132*b3a04ed5SBabu Moger 133*b3a04ed5SBabu Moger sll %o1, 8, %o3 134*b3a04ed5SBabu Moger or %o1, %o3, %o1 ! now o1 has 2 bytes of c 135*b3a04ed5SBabu Moger sll %o1, 16, %o3 136*b3a04ed5SBabu Moger cmp %o2, 32 137*b3a04ed5SBabu Moger blu,pn %xcc, .wdalign 138*b3a04ed5SBabu Moger or %o1, %o3, %o1 ! now o1 has 4 bytes of c 139*b3a04ed5SBabu Moger 140*b3a04ed5SBabu Moger sllx %o1, 32, %o3 141*b3a04ed5SBabu Moger or %o1, %o3, %o1 ! now o1 has 8 bytes of c 142*b3a04ed5SBabu Moger 143*b3a04ed5SBabu Moger.dbalign: 144*b3a04ed5SBabu Moger andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? 145*b3a04ed5SBabu Moger bz,pt %xcc, .blkalign ! already long word aligned 146*b3a04ed5SBabu Moger sub %o3, 8, %o3 ! -(bytes till long word aligned) 147*b3a04ed5SBabu Moger 148*b3a04ed5SBabu Moger add %o2, %o3, %o2 ! update o2 with new count 149*b3a04ed5SBabu Moger ! Set -(%o3) bytes till sp1 long word aligned 150*b3a04ed5SBabu Moger1: stb %o1, [%o5] ! there is at least 1 byte to set 151*b3a04ed5SBabu Moger inccc %o3 ! byte clearing loop 152*b3a04ed5SBabu Moger bl,pt %xcc, 1b 153*b3a04ed5SBabu Moger inc %o5 154*b3a04ed5SBabu Moger 155*b3a04ed5SBabu Moger ! Now sp1 is long word aligned (sp1 is found in %o5) 156*b3a04ed5SBabu Moger.blkalign: 157*b3a04ed5SBabu Moger cmp %o2, 64 ! check if there are 64 bytes to set 158*b3a04ed5SBabu Moger blu,pn %xcc, .wrshort 159*b3a04ed5SBabu Moger mov %o2, %o3 160*b3a04ed5SBabu Moger 161*b3a04ed5SBabu Moger andcc %o5, 63, %o3 ! is sp1 block aligned? 162*b3a04ed5SBabu Moger bz,pt %xcc, .blkwr ! now block aligned 163*b3a04ed5SBabu Moger sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) 164*b3a04ed5SBabu Moger add %o2, %o3, %o2 ! o2 is the remainder 165*b3a04ed5SBabu Moger 166*b3a04ed5SBabu Moger ! Store -(%o3) bytes till dst is block (64 byte) aligned. 167*b3a04ed5SBabu Moger ! Use long word stores. 168*b3a04ed5SBabu Moger ! Recall that dst is already long word aligned 169*b3a04ed5SBabu Moger1: 170*b3a04ed5SBabu Moger addcc %o3, 8, %o3 171*b3a04ed5SBabu Moger stx %o1, [%o5] 172*b3a04ed5SBabu Moger bl,pt %xcc, 1b 173*b3a04ed5SBabu Moger add %o5, 8, %o5 174*b3a04ed5SBabu Moger 175*b3a04ed5SBabu Moger ! Now sp1 is block aligned 176*b3a04ed5SBabu Moger.blkwr: 177*b3a04ed5SBabu Moger andn %o2, 63, %o4 ! calculate size of blocks in bytes 178*b3a04ed5SBabu Moger brz,pn %o1, .wrzero ! special case if c == 0 179*b3a04ed5SBabu Moger and %o2, 63, %o3 ! %o3 = bytes left after blk stores. 180*b3a04ed5SBabu Moger 181*b3a04ed5SBabu Moger set MIN_LOOP, %g1 182*b3a04ed5SBabu Moger cmp %o4, %g1 ! check there are enough bytes to set 183*b3a04ed5SBabu Moger blu,pn %xcc, .short_set ! to justify cost of membar 184*b3a04ed5SBabu Moger ! must be > pre-cleared lines 185*b3a04ed5SBabu Moger nop 186*b3a04ed5SBabu Moger 187*b3a04ed5SBabu Moger ! initial cache-clearing stores 188*b3a04ed5SBabu Moger ! get store pipeline moving 189*b3a04ed5SBabu Moger rd %asi, %g3 ! save %asi to be restored later 190*b3a04ed5SBabu Moger wr %g0, ASI_STBIMRU_P, %asi 191*b3a04ed5SBabu Moger 192*b3a04ed5SBabu Moger ! Primary memset loop for large memsets 193*b3a04ed5SBabu Moger.wr_loop: 194*b3a04ed5SBabu Moger sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment 195*b3a04ed5SBabu Moger mov ST_CHUNK, %g1 196*b3a04ed5SBabu Moger.wr_loop_start: 197*b3a04ed5SBabu Moger stxa %o1, [%o5+8]%asi 198*b3a04ed5SBabu Moger subcc %g1, 4, %g1 199*b3a04ed5SBabu Moger stxa %o1, [%o5+8+64]%asi 200*b3a04ed5SBabu Moger add %o5, 256, %o5 201*b3a04ed5SBabu Moger stxa %o1, [%o5+8-128]%asi 202*b3a04ed5SBabu Moger bgu %xcc, .wr_loop_start 203*b3a04ed5SBabu Moger stxa %o1, [%o5+8-64]%asi 204*b3a04ed5SBabu Moger 205*b3a04ed5SBabu Moger sub %o5, ST_CHUNK*64, %o5 ! reset %o5 206*b3a04ed5SBabu Moger mov ST_CHUNK, %g1 207*b3a04ed5SBabu Moger 208*b3a04ed5SBabu Moger.wr_loop_rest: 209*b3a04ed5SBabu Moger stxa %o1, [%o5+8+8]%asi 210*b3a04ed5SBabu Moger sub %o4, 64, %o4 211*b3a04ed5SBabu Moger stxa %o1, [%o5+16+8]%asi 212*b3a04ed5SBabu Moger subcc %g1, 1, %g1 213*b3a04ed5SBabu Moger stxa %o1, [%o5+24+8]%asi 214*b3a04ed5SBabu Moger stxa %o1, [%o5+32+8]%asi 215*b3a04ed5SBabu Moger stxa %o1, [%o5+40+8]%asi 216*b3a04ed5SBabu Moger add %o5, 64, %o5 217*b3a04ed5SBabu Moger stxa %o1, [%o5-8]%asi 218*b3a04ed5SBabu Moger bgu %xcc, .wr_loop_rest 219*b3a04ed5SBabu Moger stxa %o1, [%o5]ASI_STBI_P 220*b3a04ed5SBabu Moger 221*b3a04ed5SBabu Moger ! If more than ST_CHUNK*64 bytes remain to set, continue 222*b3a04ed5SBabu Moger ! setting the first long word of each cache line in advance 223*b3a04ed5SBabu Moger ! to keep the store pipeline moving. 224*b3a04ed5SBabu Moger 225*b3a04ed5SBabu Moger cmp %o4, ST_CHUNK*64 226*b3a04ed5SBabu Moger bge,pt %xcc, .wr_loop_start 227*b3a04ed5SBabu Moger mov ST_CHUNK, %g1 228*b3a04ed5SBabu Moger 229*b3a04ed5SBabu Moger brz,a,pn %o4, .asi_done 230*b3a04ed5SBabu Moger add %o5, 8, %o5 ! restore %o5 offset 231*b3a04ed5SBabu Moger 232*b3a04ed5SBabu Moger.wr_loop_small: 233*b3a04ed5SBabu Moger stxa %o1, [%o5+8]%asi 234*b3a04ed5SBabu Moger stxa %o1, [%o5+8+8]%asi 235*b3a04ed5SBabu Moger stxa %o1, [%o5+16+8]%asi 236*b3a04ed5SBabu Moger stxa %o1, [%o5+24+8]%asi 237*b3a04ed5SBabu Moger stxa %o1, [%o5+32+8]%asi 238*b3a04ed5SBabu Moger subcc %o4, 64, %o4 239*b3a04ed5SBabu Moger stxa %o1, [%o5+40+8]%asi 240*b3a04ed5SBabu Moger add %o5, 64, %o5 241*b3a04ed5SBabu Moger stxa %o1, [%o5-8]%asi 242*b3a04ed5SBabu Moger bgu,pt %xcc, .wr_loop_small 243*b3a04ed5SBabu Moger stxa %o1, [%o5]ASI_STBI_P 244*b3a04ed5SBabu Moger 245*b3a04ed5SBabu Moger ba .asi_done 246*b3a04ed5SBabu Moger add %o5, 8, %o5 ! restore %o5 offset 247*b3a04ed5SBabu Moger 248*b3a04ed5SBabu Moger ! Special case loop for zero fill memsets 249*b3a04ed5SBabu Moger ! For each 64 byte cache line, single STBI to first element 250*b3a04ed5SBabu Moger ! clears line 251*b3a04ed5SBabu Moger.wrzero: 252*b3a04ed5SBabu Moger cmp %o4, MIN_ZERO ! check if enough bytes to set 253*b3a04ed5SBabu Moger ! to pay %asi + membar cost 254*b3a04ed5SBabu Moger blu %xcc, .short_set 255*b3a04ed5SBabu Moger nop 256*b3a04ed5SBabu Moger sub %o4, 256, %o4 257*b3a04ed5SBabu Moger 258*b3a04ed5SBabu Moger.wrzero_loop: 259*b3a04ed5SBabu Moger mov 64, %g3 260*b3a04ed5SBabu Moger stxa %o1, [%o5]ASI_STBI_P 261*b3a04ed5SBabu Moger subcc %o4, 256, %o4 262*b3a04ed5SBabu Moger stxa %o1, [%o5+%g3]ASI_STBI_P 263*b3a04ed5SBabu Moger add %o5, 256, %o5 264*b3a04ed5SBabu Moger sub %g3, 192, %g3 265*b3a04ed5SBabu Moger stxa %o1, [%o5+%g3]ASI_STBI_P 266*b3a04ed5SBabu Moger add %g3, 64, %g3 267*b3a04ed5SBabu Moger bge,pt %xcc, .wrzero_loop 268*b3a04ed5SBabu Moger stxa %o1, [%o5+%g3]ASI_STBI_P 269*b3a04ed5SBabu Moger add %o4, 256, %o4 270*b3a04ed5SBabu Moger 271*b3a04ed5SBabu Moger brz,pn %o4, .bsi_done 272*b3a04ed5SBabu Moger nop 273*b3a04ed5SBabu Moger 274*b3a04ed5SBabu Moger.wrzero_small: 275*b3a04ed5SBabu Moger stxa %o1, [%o5]ASI_STBI_P 276*b3a04ed5SBabu Moger subcc %o4, 64, %o4 277*b3a04ed5SBabu Moger bgu,pt %xcc, .wrzero_small 278*b3a04ed5SBabu Moger add %o5, 64, %o5 279*b3a04ed5SBabu Moger ba,a .bsi_done 280*b3a04ed5SBabu Moger 281*b3a04ed5SBabu Moger.asi_done: 282*b3a04ed5SBabu Moger wr %g3, 0x0, %asi ! restored saved %asi 283*b3a04ed5SBabu Moger.bsi_done: 284*b3a04ed5SBabu Moger membar #StoreStore ! required by use of Block Store Init 285*b3a04ed5SBabu Moger 286*b3a04ed5SBabu Moger.short_set: 287*b3a04ed5SBabu Moger cmp %o4, 64 ! check if 64 bytes to set 288*b3a04ed5SBabu Moger blu %xcc, 5f 289*b3a04ed5SBabu Moger nop 290*b3a04ed5SBabu Moger4: ! set final blocks of 64 bytes 291*b3a04ed5SBabu Moger stx %o1, [%o5] 292*b3a04ed5SBabu Moger stx %o1, [%o5+8] 293*b3a04ed5SBabu Moger stx %o1, [%o5+16] 294*b3a04ed5SBabu Moger stx %o1, [%o5+24] 295*b3a04ed5SBabu Moger subcc %o4, 64, %o4 296*b3a04ed5SBabu Moger stx %o1, [%o5+32] 297*b3a04ed5SBabu Moger stx %o1, [%o5+40] 298*b3a04ed5SBabu Moger add %o5, 64, %o5 299*b3a04ed5SBabu Moger stx %o1, [%o5-16] 300*b3a04ed5SBabu Moger bgu,pt %xcc, 4b 301*b3a04ed5SBabu Moger stx %o1, [%o5-8] 302*b3a04ed5SBabu Moger 303*b3a04ed5SBabu Moger5: 304*b3a04ed5SBabu Moger ! Set the remaining long words 305*b3a04ed5SBabu Moger.wrshort: 306*b3a04ed5SBabu Moger subcc %o3, 8, %o3 ! Can we store any long words? 307*b3a04ed5SBabu Moger blu,pn %xcc, .wrchars 308*b3a04ed5SBabu Moger and %o2, 7, %o2 ! calc bytes left after long words 309*b3a04ed5SBabu Moger6: 310*b3a04ed5SBabu Moger subcc %o3, 8, %o3 311*b3a04ed5SBabu Moger stx %o1, [%o5] ! store the long words 312*b3a04ed5SBabu Moger bgeu,pt %xcc, 6b 313*b3a04ed5SBabu Moger add %o5, 8, %o5 314*b3a04ed5SBabu Moger 315*b3a04ed5SBabu Moger.wrchars: ! check for extra chars 316*b3a04ed5SBabu Moger brnz %o2, .wrfin 317*b3a04ed5SBabu Moger nop 318*b3a04ed5SBabu Moger retl 319*b3a04ed5SBabu Moger nop 320*b3a04ed5SBabu Moger 321*b3a04ed5SBabu Moger.wdalign: 322*b3a04ed5SBabu Moger andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary 323*b3a04ed5SBabu Moger bz,pn %xcc, .wrword 324*b3a04ed5SBabu Moger andn %o2, 3, %o3 ! create word sized count in %o3 325*b3a04ed5SBabu Moger 326*b3a04ed5SBabu Moger dec %o2 ! decrement count 327*b3a04ed5SBabu Moger stb %o1, [%o5] ! clear a byte 328*b3a04ed5SBabu Moger b .wdalign 329*b3a04ed5SBabu Moger inc %o5 ! next byte 330*b3a04ed5SBabu Moger 331*b3a04ed5SBabu Moger.wrword: 332*b3a04ed5SBabu Moger subcc %o3, 4, %o3 333*b3a04ed5SBabu Moger st %o1, [%o5] ! 4-byte writing loop 334*b3a04ed5SBabu Moger bnz,pt %xcc, .wrword 335*b3a04ed5SBabu Moger add %o5, 4, %o5 336*b3a04ed5SBabu Moger 337*b3a04ed5SBabu Moger and %o2, 3, %o2 ! leftover count, if any 338*b3a04ed5SBabu Moger 339*b3a04ed5SBabu Moger.wrchar: 340*b3a04ed5SBabu Moger ! Set the remaining bytes, if any 341*b3a04ed5SBabu Moger brz %o2, .exit 342*b3a04ed5SBabu Moger nop 343*b3a04ed5SBabu Moger.wrfin: 344*b3a04ed5SBabu Moger deccc %o2 345*b3a04ed5SBabu Moger stb %o1, [%o5] 346*b3a04ed5SBabu Moger bgu,pt %xcc, .wrfin 347*b3a04ed5SBabu Moger inc %o5 348*b3a04ed5SBabu Moger.exit: 349*b3a04ed5SBabu Moger retl ! %o0 was preserved 350*b3a04ed5SBabu Moger nop 351*b3a04ed5SBabu Moger 352*b3a04ed5SBabu Moger .size M7memset,.-M7memset 353