xref: /openbmc/linux/arch/ia64/lib/flush.S (revision ebd09753)
1/*
2 * Cache flushing routines.
3 *
4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
5 *	David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * 05/28/05 Zoltan Menyhart	Dynamic stride size
8 */
9
10#include <asm/asmmacro.h>
11#include <asm/export.h>
12
13
14	/*
15	 * flush_icache_range(start,end)
16	 *
17	 *	Make i-cache(s) coherent with d-caches.
18	 *
19	 *	Must deal with range from start to end-1 but nothing else (need to
20	 *	be careful not to touch addresses that may be unmapped).
21	 *
22	 *	Note: "in0" and "in1" are preserved for debugging purposes.
23	 */
24	.section .kprobes.text,"ax"
25GLOBAL_ENTRY(flush_icache_range)
26
27	.prologue
28	alloc	r2=ar.pfs,2,0,0,0
29	movl	r3=ia64_i_cache_stride_shift
30 	mov	r21=1
31	;;
32	ld8	r20=[r3]		// r20: stride shift
33	sub	r22=in1,r0,1		// last byte address
34	;;
35	shr.u	r23=in0,r20		// start / (stride size)
36	shr.u	r22=r22,r20		// (last byte address) / (stride size)
37	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
38	;;
39	sub	r8=r22,r23		// number of strides - 1
40	shl	r24=r23,r20		// r24: addresses for "fc.i" =
41					//	"start" rounded down to stride boundary
42	.save	ar.lc,r3
43	mov	r3=ar.lc		// save ar.lc
44	;;
45
46	.body
47	mov	ar.lc=r8
48	;;
49	/*
50	 * 32 byte aligned loop, even number of (actually 2) bundles
51	 */
52.Loop:	fc.i	r24			// issuable on M0 only
53	add	r24=r21,r24		// we flush "stride size" bytes per iteration
54	nop.i	0
55	br.cloop.sptk.few .Loop
56	;;
57	sync.i
58	;;
59	srlz.i
60	;;
61	mov	ar.lc=r3		// restore ar.lc
62	br.ret.sptk.many rp
63END(flush_icache_range)
64EXPORT_SYMBOL_GPL(flush_icache_range)
65
66	/*
67	 * clflush_cache_range(start,size)
68	 *
69	 *	Flush cache lines from start to start+size-1.
70	 *
71	 *	Must deal with range from start to start+size-1 but nothing else
72	 *	(need to be careful not to touch addresses that may be
73	 *	unmapped).
74	 *
75	 *	Note: "in0" and "in1" are preserved for debugging purposes.
76	 */
77	.section .kprobes.text,"ax"
78GLOBAL_ENTRY(clflush_cache_range)
79
80	.prologue
81	alloc	r2=ar.pfs,2,0,0,0
82	movl	r3=ia64_cache_stride_shift
83	mov	r21=1
84	add     r22=in1,in0
85	;;
86	ld8	r20=[r3]		// r20: stride shift
87	sub	r22=r22,r0,1		// last byte address
88	;;
89	shr.u	r23=in0,r20		// start / (stride size)
90	shr.u	r22=r22,r20		// (last byte address) / (stride size)
91	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
92	;;
93	sub	r8=r22,r23		// number of strides - 1
94	shl	r24=r23,r20		// r24: addresses for "fc" =
95					//	"start" rounded down to stride
96					//	boundary
97	.save	ar.lc,r3
98	mov	r3=ar.lc		// save ar.lc
99	;;
100
101	.body
102	mov	ar.lc=r8
103	;;
104	/*
105	 * 32 byte aligned loop, even number of (actually 2) bundles
106	 */
107.Loop_fc:
108	fc	r24		// issuable on M0 only
109	add	r24=r21,r24	// we flush "stride size" bytes per iteration
110	nop.i	0
111	br.cloop.sptk.few .Loop_fc
112	;;
113	sync.i
114	;;
115	srlz.i
116	;;
117	mov	ar.lc=r3		// restore ar.lc
118	br.ret.sptk.many rp
119END(clflush_cache_range)
120