xref: /openbmc/linux/arch/sparc/lib/M7memset.S (revision b3a04ed5)
1b3a04ed5SBabu Moger/*
2b3a04ed5SBabu Moger * M7memset.S: SPARC M7 optimized memset.
3b3a04ed5SBabu Moger *
4b3a04ed5SBabu Moger * Copyright (c) 2016, Oracle and/or its affiliates.  All rights reserved.
5b3a04ed5SBabu Moger */
6b3a04ed5SBabu Moger
7b3a04ed5SBabu Moger/*
8b3a04ed5SBabu Moger * M7memset.S: M7 optimized memset.
9b3a04ed5SBabu Moger *
10b3a04ed5SBabu Moger * char *memset(sp, c, n)
11b3a04ed5SBabu Moger *
12b3a04ed5SBabu Moger * Set an array of n chars starting at sp to the character c.
13b3a04ed5SBabu Moger * Return sp.
14b3a04ed5SBabu Moger *
15b3a04ed5SBabu Moger * Fast assembler language version of the following C-program for memset
16b3a04ed5SBabu Moger * which represents the `standard' for the C-library.
17b3a04ed5SBabu Moger *
18b3a04ed5SBabu Moger *	void *
19b3a04ed5SBabu Moger *	memset(void *sp1, int c, size_t n)
20b3a04ed5SBabu Moger *	{
21b3a04ed5SBabu Moger *	    if (n != 0) {
22b3a04ed5SBabu Moger *		char *sp = sp1;
23b3a04ed5SBabu Moger *		do {
24b3a04ed5SBabu Moger *		    *sp++ = (char)c;
25b3a04ed5SBabu Moger *		} while (--n != 0);
26b3a04ed5SBabu Moger *	    }
27b3a04ed5SBabu Moger *	    return (sp1);
28b3a04ed5SBabu Moger *	}
29b3a04ed5SBabu Moger *
30b3a04ed5SBabu Moger * The algorithm is as follows :
31b3a04ed5SBabu Moger *
32b3a04ed5SBabu Moger *	For small 6 or fewer bytes stores, bytes will be stored.
33b3a04ed5SBabu Moger *
34b3a04ed5SBabu Moger *	For less than 32 bytes stores, align the address on 4 byte boundary.
35b3a04ed5SBabu Moger *	Then store as many 4-byte chunks, followed by trailing bytes.
36b3a04ed5SBabu Moger *
37b3a04ed5SBabu Moger *	For sizes greater than 32 bytes, align the address on 8 byte boundary.
38b3a04ed5SBabu Moger *	if (count >= 64) {
39b3a04ed5SBabu Moger *      	store 8-bytes chunks to align the address on 64 byte boundary
40b3a04ed5SBabu Moger *		if (value to be set is zero && count >= MIN_ZERO) {
41b3a04ed5SBabu Moger *              	Using BIS stores, set the first long word of each
42b3a04ed5SBabu Moger *			64-byte cache line to zero which will also clear the
43b3a04ed5SBabu Moger *			other seven long words of the cache line.
44b3a04ed5SBabu Moger *       	}
45b3a04ed5SBabu Moger *       	else if (count >= MIN_LOOP) {
46b3a04ed5SBabu Moger *       		Using BIS stores, set the first long word of each of
47b3a04ed5SBabu Moger *              	ST_CHUNK cache lines (64 bytes each) before the main
48b3a04ed5SBabu Moger *			loop is entered.
49b3a04ed5SBabu Moger *              	In the main loop, continue pre-setting the first long
50b3a04ed5SBabu Moger *              	word of each cache line ST_CHUNK lines in advance while
51b3a04ed5SBabu Moger *              	setting the other seven long words (56 bytes) of each
52b3a04ed5SBabu Moger * 			cache line until fewer than ST_CHUNK*64 bytes remain.
53b3a04ed5SBabu Moger *			Then set the remaining seven long words of each cache
54b3a04ed5SBabu Moger * 			line that has already had its first long word set.
55b3a04ed5SBabu Moger *       	}
56b3a04ed5SBabu Moger *       	store remaining data in 64-byte chunks until less than
57b3a04ed5SBabu Moger *       	64 bytes remain.
58b3a04ed5SBabu Moger *       }
59b3a04ed5SBabu Moger *       Store as many 8-byte chunks, followed by trailing bytes.
60b3a04ed5SBabu Moger *
61b3a04ed5SBabu Moger * BIS = Block Init Store
62b3a04ed5SBabu Moger *   Doing the advance store of the first element of the cache line
63b3a04ed5SBabu Moger *   initiates the displacement of a cache line while only using a single
64b3a04ed5SBabu Moger *   instruction in the pipeline. That avoids various pipeline delays,
65b3a04ed5SBabu Moger *   such as filling the miss buffer. The performance effect is
66b3a04ed5SBabu Moger *   similar to prefetching for normal stores.
67b3a04ed5SBabu Moger *   The special case for zero fills runs faster and uses fewer instruction
68b3a04ed5SBabu Moger *   cycles than the normal memset loop.
69b3a04ed5SBabu Moger *
70b3a04ed5SBabu Moger * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence
71b3a04ed5SBabu Moger * BIS stores must be followed by a membar #StoreStore. The benefit of
72b3a04ed5SBabu Moger * the BIS store must be balanced against the cost of the membar operation.
73b3a04ed5SBabu Moger */
74b3a04ed5SBabu Moger
75b3a04ed5SBabu Moger/*
76b3a04ed5SBabu Moger * ASI_STBI_P marks the cache line as "least recently used"
77b3a04ed5SBabu Moger * which means if many threads are active, it has a high chance
78b3a04ed5SBabu Moger * of being pushed out of the cache between the first initializing
79b3a04ed5SBabu Moger * store and the final stores.
80b3a04ed5SBabu Moger * Thus, we use ASI_STBIMRU_P which marks the cache line as
81b3a04ed5SBabu Moger * "most recently used" for all but the last store to the cache line.
82b3a04ed5SBabu Moger */
83b3a04ed5SBabu Moger
84b3a04ed5SBabu Moger#include <asm/asi.h>
85b3a04ed5SBabu Moger#include <asm/page.h>
86b3a04ed5SBabu Moger
87b3a04ed5SBabu Moger#define ASI_STBI_P      ASI_BLK_INIT_QUAD_LDD_P
88b3a04ed5SBabu Moger#define ASI_STBIMRU_P   ASI_ST_BLKINIT_MRU_P
89b3a04ed5SBabu Moger
90b3a04ed5SBabu Moger
91b3a04ed5SBabu Moger#define ST_CHUNK        24   /* multiple of 4 due to loop unrolling */
92b3a04ed5SBabu Moger#define MIN_LOOP        16320
93b3a04ed5SBabu Moger#define MIN_ZERO        512
94b3a04ed5SBabu Moger
95b3a04ed5SBabu Moger	.section	".text"
96b3a04ed5SBabu Moger	.align		32
97b3a04ed5SBabu Moger
98b3a04ed5SBabu Moger/*
99b3a04ed5SBabu Moger * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE)
100b3a04ed5SBabu Moger * (can create a more optimized version later.)
101b3a04ed5SBabu Moger */
102b3a04ed5SBabu Moger	.globl		M7clear_page
103b3a04ed5SBabu Moger	.globl		M7clear_user_page
104b3a04ed5SBabu MogerM7clear_page:		/* clear_page(dest) */
105b3a04ed5SBabu MogerM7clear_user_page:
106b3a04ed5SBabu Moger	set	PAGE_SIZE, %o1
107b3a04ed5SBabu Moger	/* fall through into bzero code */
108b3a04ed5SBabu Moger
109b3a04ed5SBabu Moger	.size		M7clear_page,.-M7clear_page
110b3a04ed5SBabu Moger	.size		M7clear_user_page,.-M7clear_user_page
111b3a04ed5SBabu Moger
112b3a04ed5SBabu Moger/*
113b3a04ed5SBabu Moger * Define bzero(dest, n) as memset(dest, 0, n)
114b3a04ed5SBabu Moger * (can create a more optimized version later.)
115b3a04ed5SBabu Moger */
116b3a04ed5SBabu Moger	.globl		M7bzero
117b3a04ed5SBabu MogerM7bzero:		/* bzero(dest, size) */
118b3a04ed5SBabu Moger	mov	%o1, %o2
119b3a04ed5SBabu Moger	mov	0, %o1
120b3a04ed5SBabu Moger	/* fall through into memset code */
121b3a04ed5SBabu Moger
122b3a04ed5SBabu Moger	.size		M7bzero,.-M7bzero
123b3a04ed5SBabu Moger
124b3a04ed5SBabu Moger	.global		M7memset
125b3a04ed5SBabu Moger	.type		M7memset, #function
126b3a04ed5SBabu Moger	.register	%g3, #scratch
127b3a04ed5SBabu MogerM7memset:
128b3a04ed5SBabu Moger	mov     %o0, %o5                ! copy sp1 before using it
129b3a04ed5SBabu Moger	cmp     %o2, 7                  ! if small counts, just write bytes
130b3a04ed5SBabu Moger	bleu,pn %xcc, .wrchar
131b3a04ed5SBabu Moger	 and     %o1, 0xff, %o1          ! o1 is (char)c
132b3a04ed5SBabu Moger
133b3a04ed5SBabu Moger	sll     %o1, 8, %o3
134b3a04ed5SBabu Moger	or      %o1, %o3, %o1           ! now o1 has 2 bytes of c
135b3a04ed5SBabu Moger	sll     %o1, 16, %o3
136b3a04ed5SBabu Moger	cmp     %o2, 32
137b3a04ed5SBabu Moger	blu,pn  %xcc, .wdalign
138b3a04ed5SBabu Moger	 or      %o1, %o3, %o1           ! now o1 has 4 bytes of c
139b3a04ed5SBabu Moger
140b3a04ed5SBabu Moger	sllx    %o1, 32, %o3
141b3a04ed5SBabu Moger	or      %o1, %o3, %o1           ! now o1 has 8 bytes of c
142b3a04ed5SBabu Moger
143b3a04ed5SBabu Moger.dbalign:
144b3a04ed5SBabu Moger	andcc   %o5, 7, %o3             ! is sp1 aligned on a 8 byte bound?
145b3a04ed5SBabu Moger	bz,pt   %xcc, .blkalign         ! already long word aligned
146b3a04ed5SBabu Moger	 sub     %o3, 8, %o3             ! -(bytes till long word aligned)
147b3a04ed5SBabu Moger
148b3a04ed5SBabu Moger	add     %o2, %o3, %o2           ! update o2 with new count
149b3a04ed5SBabu Moger	! Set -(%o3) bytes till sp1 long word aligned
150b3a04ed5SBabu Moger1:	stb     %o1, [%o5]              ! there is at least 1 byte to set
151b3a04ed5SBabu Moger	inccc   %o3                     ! byte clearing loop
152b3a04ed5SBabu Moger	bl,pt   %xcc, 1b
153b3a04ed5SBabu Moger	 inc     %o5
154b3a04ed5SBabu Moger
155b3a04ed5SBabu Moger	! Now sp1 is long word aligned (sp1 is found in %o5)
156b3a04ed5SBabu Moger.blkalign:
157b3a04ed5SBabu Moger	cmp     %o2, 64                 ! check if there are 64 bytes to set
158b3a04ed5SBabu Moger	blu,pn  %xcc, .wrshort
159b3a04ed5SBabu Moger	 mov     %o2, %o3
160b3a04ed5SBabu Moger
161b3a04ed5SBabu Moger	andcc   %o5, 63, %o3            ! is sp1 block aligned?
162b3a04ed5SBabu Moger	bz,pt   %xcc, .blkwr            ! now block aligned
163b3a04ed5SBabu Moger	 sub     %o3, 64, %o3            ! o3 is -(bytes till block aligned)
164b3a04ed5SBabu Moger	add     %o2, %o3, %o2           ! o2 is the remainder
165b3a04ed5SBabu Moger
166b3a04ed5SBabu Moger	! Store -(%o3) bytes till dst is block (64 byte) aligned.
167b3a04ed5SBabu Moger	! Use long word stores.
168b3a04ed5SBabu Moger	! Recall that dst is already long word aligned
169b3a04ed5SBabu Moger1:
170b3a04ed5SBabu Moger	addcc   %o3, 8, %o3
171b3a04ed5SBabu Moger	stx     %o1, [%o5]
172b3a04ed5SBabu Moger	bl,pt   %xcc, 1b
173b3a04ed5SBabu Moger	 add     %o5, 8, %o5
174b3a04ed5SBabu Moger
175b3a04ed5SBabu Moger	! Now sp1 is block aligned
176b3a04ed5SBabu Moger.blkwr:
177b3a04ed5SBabu Moger	andn    %o2, 63, %o4            ! calculate size of blocks in bytes
178b3a04ed5SBabu Moger	brz,pn  %o1, .wrzero            ! special case if c == 0
179b3a04ed5SBabu Moger	 and     %o2, 63, %o3            ! %o3 = bytes left after blk stores.
180b3a04ed5SBabu Moger
181b3a04ed5SBabu Moger	set     MIN_LOOP, %g1
182b3a04ed5SBabu Moger	cmp     %o4, %g1                ! check there are enough bytes to set
183b3a04ed5SBabu Moger	blu,pn  %xcc, .short_set        ! to justify cost of membar
184b3a04ed5SBabu Moger	                                ! must be > pre-cleared lines
185b3a04ed5SBabu Moger	 nop
186b3a04ed5SBabu Moger
187b3a04ed5SBabu Moger	! initial cache-clearing stores
188b3a04ed5SBabu Moger	! get store pipeline moving
189b3a04ed5SBabu Moger	rd	%asi, %g3		! save %asi to be restored later
190b3a04ed5SBabu Moger	wr     %g0, ASI_STBIMRU_P, %asi
191b3a04ed5SBabu Moger
192b3a04ed5SBabu Moger	! Primary memset loop for large memsets
193b3a04ed5SBabu Moger.wr_loop:
194b3a04ed5SBabu Moger	sub     %o5, 8, %o5		! adjust %o5 for ASI store alignment
195b3a04ed5SBabu Moger	mov     ST_CHUNK, %g1
196b3a04ed5SBabu Moger.wr_loop_start:
197b3a04ed5SBabu Moger	stxa    %o1, [%o5+8]%asi
198b3a04ed5SBabu Moger	subcc   %g1, 4, %g1
199b3a04ed5SBabu Moger	stxa    %o1, [%o5+8+64]%asi
200b3a04ed5SBabu Moger	add     %o5, 256, %o5
201b3a04ed5SBabu Moger	stxa    %o1, [%o5+8-128]%asi
202b3a04ed5SBabu Moger	bgu     %xcc, .wr_loop_start
203b3a04ed5SBabu Moger	 stxa    %o1, [%o5+8-64]%asi
204b3a04ed5SBabu Moger
205b3a04ed5SBabu Moger	sub     %o5, ST_CHUNK*64, %o5	! reset %o5
206b3a04ed5SBabu Moger	mov     ST_CHUNK, %g1
207b3a04ed5SBabu Moger
208b3a04ed5SBabu Moger.wr_loop_rest:
209b3a04ed5SBabu Moger	stxa    %o1, [%o5+8+8]%asi
210b3a04ed5SBabu Moger	sub     %o4, 64, %o4
211b3a04ed5SBabu Moger	stxa    %o1, [%o5+16+8]%asi
212b3a04ed5SBabu Moger	subcc   %g1, 1, %g1
213b3a04ed5SBabu Moger	stxa    %o1, [%o5+24+8]%asi
214b3a04ed5SBabu Moger	stxa    %o1, [%o5+32+8]%asi
215b3a04ed5SBabu Moger	stxa    %o1, [%o5+40+8]%asi
216b3a04ed5SBabu Moger	add     %o5, 64, %o5
217b3a04ed5SBabu Moger	stxa    %o1, [%o5-8]%asi
218b3a04ed5SBabu Moger	bgu     %xcc, .wr_loop_rest
219b3a04ed5SBabu Moger	 stxa    %o1, [%o5]ASI_STBI_P
220b3a04ed5SBabu Moger
221b3a04ed5SBabu Moger	! If more than ST_CHUNK*64 bytes remain to set, continue
222b3a04ed5SBabu Moger	! setting the first long word of each cache line in advance
223b3a04ed5SBabu Moger	! to keep the store pipeline moving.
224b3a04ed5SBabu Moger
225b3a04ed5SBabu Moger	cmp     %o4, ST_CHUNK*64
226b3a04ed5SBabu Moger	bge,pt  %xcc, .wr_loop_start
227b3a04ed5SBabu Moger	 mov     ST_CHUNK, %g1
228b3a04ed5SBabu Moger
229b3a04ed5SBabu Moger	brz,a,pn %o4, .asi_done
230b3a04ed5SBabu Moger	 add     %o5, 8, %o5             ! restore %o5 offset
231b3a04ed5SBabu Moger
232b3a04ed5SBabu Moger.wr_loop_small:
233b3a04ed5SBabu Moger	stxa    %o1, [%o5+8]%asi
234b3a04ed5SBabu Moger	stxa    %o1, [%o5+8+8]%asi
235b3a04ed5SBabu Moger	stxa    %o1, [%o5+16+8]%asi
236b3a04ed5SBabu Moger	stxa    %o1, [%o5+24+8]%asi
237b3a04ed5SBabu Moger	stxa    %o1, [%o5+32+8]%asi
238b3a04ed5SBabu Moger	subcc   %o4, 64, %o4
239b3a04ed5SBabu Moger	stxa    %o1, [%o5+40+8]%asi
240b3a04ed5SBabu Moger	add     %o5, 64, %o5
241b3a04ed5SBabu Moger	stxa    %o1, [%o5-8]%asi
242b3a04ed5SBabu Moger	bgu,pt  %xcc, .wr_loop_small
243b3a04ed5SBabu Moger	 stxa    %o1, [%o5]ASI_STBI_P
244b3a04ed5SBabu Moger
245b3a04ed5SBabu Moger	ba      .asi_done
246b3a04ed5SBabu Moger	 add     %o5, 8, %o5             ! restore %o5 offset
247b3a04ed5SBabu Moger
248b3a04ed5SBabu Moger	! Special case loop for zero fill memsets
249b3a04ed5SBabu Moger	! For each 64 byte cache line, single STBI to first element
250b3a04ed5SBabu Moger	! clears line
251b3a04ed5SBabu Moger.wrzero:
252b3a04ed5SBabu Moger	cmp     %o4, MIN_ZERO           ! check if enough bytes to set
253b3a04ed5SBabu Moger					! to pay %asi + membar cost
254b3a04ed5SBabu Moger	blu     %xcc, .short_set
255b3a04ed5SBabu Moger	 nop
256b3a04ed5SBabu Moger	sub     %o4, 256, %o4
257b3a04ed5SBabu Moger
258b3a04ed5SBabu Moger.wrzero_loop:
259b3a04ed5SBabu Moger	mov     64, %g3
260b3a04ed5SBabu Moger	stxa    %o1, [%o5]ASI_STBI_P
261b3a04ed5SBabu Moger	subcc   %o4, 256, %o4
262b3a04ed5SBabu Moger	stxa    %o1, [%o5+%g3]ASI_STBI_P
263b3a04ed5SBabu Moger	add     %o5, 256, %o5
264b3a04ed5SBabu Moger	sub     %g3, 192, %g3
265b3a04ed5SBabu Moger	stxa    %o1, [%o5+%g3]ASI_STBI_P
266b3a04ed5SBabu Moger	add %g3, 64, %g3
267b3a04ed5SBabu Moger	bge,pt  %xcc, .wrzero_loop
268b3a04ed5SBabu Moger	 stxa    %o1, [%o5+%g3]ASI_STBI_P
269b3a04ed5SBabu Moger	add     %o4, 256, %o4
270b3a04ed5SBabu Moger
271b3a04ed5SBabu Moger	brz,pn  %o4, .bsi_done
272b3a04ed5SBabu Moger	 nop
273b3a04ed5SBabu Moger
274b3a04ed5SBabu Moger.wrzero_small:
275b3a04ed5SBabu Moger	stxa    %o1, [%o5]ASI_STBI_P
276b3a04ed5SBabu Moger	subcc   %o4, 64, %o4
277b3a04ed5SBabu Moger	bgu,pt  %xcc, .wrzero_small
278b3a04ed5SBabu Moger	 add     %o5, 64, %o5
279b3a04ed5SBabu Moger	ba,a	.bsi_done
280b3a04ed5SBabu Moger
281b3a04ed5SBabu Moger.asi_done:
282b3a04ed5SBabu Moger	wr	%g3, 0x0, %asi		! restored saved %asi
283b3a04ed5SBabu Moger.bsi_done:
284b3a04ed5SBabu Moger	membar  #StoreStore             ! required by use of Block Store Init
285b3a04ed5SBabu Moger
286b3a04ed5SBabu Moger.short_set:
287b3a04ed5SBabu Moger	cmp     %o4, 64                 ! check if 64 bytes to set
288b3a04ed5SBabu Moger	blu     %xcc, 5f
289b3a04ed5SBabu Moger	 nop
290b3a04ed5SBabu Moger4:                                      ! set final blocks of 64 bytes
291b3a04ed5SBabu Moger	stx     %o1, [%o5]
292b3a04ed5SBabu Moger	stx     %o1, [%o5+8]
293b3a04ed5SBabu Moger	stx     %o1, [%o5+16]
294b3a04ed5SBabu Moger	stx     %o1, [%o5+24]
295b3a04ed5SBabu Moger	subcc   %o4, 64, %o4
296b3a04ed5SBabu Moger	stx     %o1, [%o5+32]
297b3a04ed5SBabu Moger	stx     %o1, [%o5+40]
298b3a04ed5SBabu Moger	add     %o5, 64, %o5
299b3a04ed5SBabu Moger	stx     %o1, [%o5-16]
300b3a04ed5SBabu Moger	bgu,pt  %xcc, 4b
301b3a04ed5SBabu Moger	 stx     %o1, [%o5-8]
302b3a04ed5SBabu Moger
303b3a04ed5SBabu Moger5:
304b3a04ed5SBabu Moger	! Set the remaining long words
305b3a04ed5SBabu Moger.wrshort:
306b3a04ed5SBabu Moger	subcc   %o3, 8, %o3             ! Can we store any long words?
307b3a04ed5SBabu Moger	blu,pn  %xcc, .wrchars
308b3a04ed5SBabu Moger	 and     %o2, 7, %o2             ! calc bytes left after long words
309b3a04ed5SBabu Moger6:
310b3a04ed5SBabu Moger	subcc   %o3, 8, %o3
311b3a04ed5SBabu Moger	stx     %o1, [%o5]              ! store the long words
312b3a04ed5SBabu Moger	bgeu,pt %xcc, 6b
313b3a04ed5SBabu Moger	 add     %o5, 8, %o5
314b3a04ed5SBabu Moger
315b3a04ed5SBabu Moger.wrchars:                               ! check for extra chars
316b3a04ed5SBabu Moger	brnz    %o2, .wrfin
317b3a04ed5SBabu Moger	 nop
318b3a04ed5SBabu Moger	retl
319b3a04ed5SBabu Moger	 nop
320b3a04ed5SBabu Moger
321b3a04ed5SBabu Moger.wdalign:
322b3a04ed5SBabu Moger	andcc   %o5, 3, %o3             ! is sp1 aligned on a word boundary
323b3a04ed5SBabu Moger	bz,pn   %xcc, .wrword
324b3a04ed5SBabu Moger	 andn    %o2, 3, %o3             ! create word sized count in %o3
325b3a04ed5SBabu Moger
326b3a04ed5SBabu Moger	dec     %o2                     ! decrement count
327b3a04ed5SBabu Moger	stb     %o1, [%o5]              ! clear a byte
328b3a04ed5SBabu Moger	b       .wdalign
329b3a04ed5SBabu Moger	 inc     %o5                     ! next byte
330b3a04ed5SBabu Moger
331b3a04ed5SBabu Moger.wrword:
332b3a04ed5SBabu Moger	subcc   %o3, 4, %o3
333b3a04ed5SBabu Moger	st      %o1, [%o5]              ! 4-byte writing loop
334b3a04ed5SBabu Moger	bnz,pt  %xcc, .wrword
335b3a04ed5SBabu Moger	 add     %o5, 4, %o5
336b3a04ed5SBabu Moger
337b3a04ed5SBabu Moger	and     %o2, 3, %o2             ! leftover count, if any
338b3a04ed5SBabu Moger
339b3a04ed5SBabu Moger.wrchar:
340b3a04ed5SBabu Moger	! Set the remaining bytes, if any
341b3a04ed5SBabu Moger	brz     %o2, .exit
342b3a04ed5SBabu Moger	 nop
343b3a04ed5SBabu Moger.wrfin:
344b3a04ed5SBabu Moger	deccc   %o2
345b3a04ed5SBabu Moger	stb     %o1, [%o5]
346b3a04ed5SBabu Moger	bgu,pt  %xcc, .wrfin
347b3a04ed5SBabu Moger	 inc     %o5
348b3a04ed5SBabu Moger.exit:
349b3a04ed5SBabu Moger	retl                            ! %o0 was preserved
350b3a04ed5SBabu Moger	 nop
351b3a04ed5SBabu Moger
352b3a04ed5SBabu Moger	.size		M7memset,.-M7memset
353