xref: /openbmc/linux/arch/xtensa/mm/misc.S (revision e3d786a3)
1/*
2 * arch/xtensa/mm/misc.S
3 *
4 * Miscellaneous assembly functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2007 Tensilica Inc.
11 *
12 * Chris Zankel	<chris@zankel.net>
13 */
14
15
16#include <linux/linkage.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/asmmacro.h>
20#include <asm/cacheasm.h>
21#include <asm/tlbflush.h>
22
23
24/*
25 * clear_page and clear_user_page are the same for non-cache-aliased configs.
26 *
27 * clear_page (unsigned long page)
28 *                    a2
29 */
30
31ENTRY(clear_page)
32
33	entry	a1, 16
34
35	movi	a3, 0
36	__loopi	a2, a7, PAGE_SIZE, 32
37	s32i	a3, a2, 0
38	s32i	a3, a2, 4
39	s32i	a3, a2, 8
40	s32i	a3, a2, 12
41	s32i	a3, a2, 16
42	s32i	a3, a2, 20
43	s32i	a3, a2, 24
44	s32i	a3, a2, 28
45	__endla	a2, a7, 32
46
47	retw
48
49ENDPROC(clear_page)
50
51/*
52 * copy_page and copy_user_page are the same for non-cache-aliased configs.
53 *
54 * copy_page (void *to, void *from)
55 *               a2          a3
56 */
57
58ENTRY(copy_page)
59
60	entry	a1, 16
61
62	__loopi a2, a4, PAGE_SIZE, 32
63
64	l32i    a8, a3, 0
65	l32i    a9, a3, 4
66	s32i    a8, a2, 0
67	s32i    a9, a2, 4
68
69	l32i    a8, a3, 8
70	l32i    a9, a3, 12
71	s32i    a8, a2, 8
72	s32i    a9, a2, 12
73
74	l32i    a8, a3, 16
75	l32i    a9, a3, 20
76	s32i    a8, a2, 16
77	s32i    a9, a2, 20
78
79	l32i    a8, a3, 24
80	l32i    a9, a3, 28
81	s32i    a8, a2, 24
82	s32i    a9, a2, 28
83
84	addi    a2, a2, 32
85	addi    a3, a3, 32
86
87	__endl  a2, a4
88
89	retw
90
91ENDPROC(copy_page)
92
93#ifdef CONFIG_MMU
94/*
95 * If we have to deal with cache aliasing, we use temporary memory mappings
96 * to ensure that the source and destination pages have the same color as
97 * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
98 *
99 * The temporary DTLB entries shouldn't be flushed by interrupts, but are
100 * flushed by preemptive task switches. Special code in the
101 * fast_second_level_miss handler re-established the temporary mapping.
102 * It requires that the PPNs for the destination and source addresses are
103 * in a6, and a7, respectively.
104 */
105
106/* TLB miss exceptions are treated special in the following region */
107
108ENTRY(__tlbtemp_mapping_start)
109
110#if (DCACHE_WAY_SIZE > PAGE_SIZE)
111
112/*
113 * clear_page_alias(void *addr, unsigned long paddr)
114 *                     a2              a3
115 */
116
117ENTRY(clear_page_alias)
118
119	entry	a1, 32
120
121	/* Skip setting up a temporary DTLB if not aliased low page. */
122
123	movi	a5, PAGE_OFFSET
124	movi	a6, 0
125	beqz	a3, 1f
126
127	/* Setup a temporary DTLB for the addr. */
128
129	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
130	mov	a4, a2
131	wdtlb	a6, a2
132	dsync
133
1341:	movi	a3, 0
135	__loopi	a2, a7, PAGE_SIZE, 32
136	s32i	a3, a2, 0
137	s32i	a3, a2, 4
138	s32i	a3, a2, 8
139	s32i	a3, a2, 12
140	s32i	a3, a2, 16
141	s32i	a3, a2, 20
142	s32i	a3, a2, 24
143	s32i	a3, a2, 28
144	__endla	a2, a7, 32
145
146	bnez	a6, 1f
147	retw
148
149	/* We need to invalidate the temporary idtlb entry, if any. */
150
1511:	idtlb	a4
152	dsync
153
154	retw
155
156ENDPROC(clear_page_alias)
157
158/*
159 * copy_page_alias(void *to, void *from,
160 *			a2	  a3
161 *                 unsigned long to_paddr, unsigned long from_paddr)
162 *	        		 a4			 a5
163 */
164
165ENTRY(copy_page_alias)
166
167	entry	a1, 32
168
169	/* Skip setting up a temporary DTLB for destination if not aliased. */
170
171	movi	a6, 0
172	movi	a7, 0
173	beqz	a4, 1f
174
175	/* Setup a temporary DTLB for destination. */
176
177	addi	a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
178	wdtlb	a6, a2
179	dsync
180
181	/* Skip setting up a temporary DTLB for source if not aliased. */
182
1831:	beqz	a5, 1f
184
185	/* Setup a temporary DTLB for source. */
186
187	addi	a7, a5, PAGE_KERNEL
188	addi	a8, a3, 1				# way1
189
190	wdtlb	a7, a8
191	dsync
192
1931:	__loopi a2, a4, PAGE_SIZE, 32
194
195	l32i    a8, a3, 0
196	l32i    a9, a3, 4
197	s32i    a8, a2, 0
198	s32i    a9, a2, 4
199
200	l32i    a8, a3, 8
201	l32i    a9, a3, 12
202	s32i    a8, a2, 8
203	s32i    a9, a2, 12
204
205	l32i    a8, a3, 16
206	l32i    a9, a3, 20
207	s32i    a8, a2, 16
208	s32i    a9, a2, 20
209
210	l32i    a8, a3, 24
211	l32i    a9, a3, 28
212	s32i    a8, a2, 24
213	s32i    a9, a2, 28
214
215	addi    a2, a2, 32
216	addi    a3, a3, 32
217
218	__endl  a2, a4
219
220	/* We need to invalidate any temporary mapping! */
221
222	bnez	a6, 1f
223	bnez	a7, 2f
224	retw
225
2261:	addi	a2, a2, -PAGE_SIZE
227	idtlb	a2
228	dsync
229	bnez	a7, 2f
230	retw
231
2322:	addi	a3, a3, -PAGE_SIZE+1
233	idtlb	a3
234	dsync
235
236	retw
237
238ENDPROC(copy_page_alias)
239
240#endif
241
242#if (DCACHE_WAY_SIZE > PAGE_SIZE)
243
244/*
245 * void __flush_invalidate_dcache_page_alias (addr, phys)
246 *                                             a2    a3
247 */
248
249ENTRY(__flush_invalidate_dcache_page_alias)
250
251	entry	sp, 16
252
253	movi	a7, 0			# required for exception handler
254	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
255	mov	a4, a2
256	wdtlb	a6, a2
257	dsync
258
259	___flush_invalidate_dcache_page a2 a3
260
261	idtlb	a4
262	dsync
263
264	retw
265
266ENDPROC(__flush_invalidate_dcache_page_alias)
267
268/*
269 * void __invalidate_dcache_page_alias (addr, phys)
270 *                                       a2    a3
271 */
272
273ENTRY(__invalidate_dcache_page_alias)
274
275	entry	sp, 16
276
277	movi	a7, 0			# required for exception handler
278	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
279	mov	a4, a2
280	wdtlb	a6, a2
281	dsync
282
283	___invalidate_dcache_page a2 a3
284
285	idtlb	a4
286	dsync
287
288	retw
289
290ENDPROC(__invalidate_dcache_page_alias)
291#endif
292
293ENTRY(__tlbtemp_mapping_itlb)
294
295#if (ICACHE_WAY_SIZE > PAGE_SIZE)
296
297ENTRY(__invalidate_icache_page_alias)
298
299	entry	sp, 16
300
301	addi	a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
302	mov	a4, a2
303	witlb	a6, a2
304	isync
305
306	___invalidate_icache_page a2 a3
307
308	iitlb	a4
309	isync
310	retw
311
312ENDPROC(__invalidate_icache_page_alias)
313
314#endif
315
316/* End of special treatment in tlb miss exception */
317
318ENTRY(__tlbtemp_mapping_end)
319
320#endif /* CONFIG_MMU
321
322/*
323 * void __invalidate_icache_page(ulong start)
324 */
325
326ENTRY(__invalidate_icache_page)
327
328	entry	sp, 16
329
330	___invalidate_icache_page a2 a3
331	isync
332
333	retw
334
335ENDPROC(__invalidate_icache_page)
336
337/*
338 * void __invalidate_dcache_page(ulong start)
339 */
340
341ENTRY(__invalidate_dcache_page)
342
343	entry	sp, 16
344
345	___invalidate_dcache_page a2 a3
346	dsync
347
348	retw
349
350ENDPROC(__invalidate_dcache_page)
351
352/*
353 * void __flush_invalidate_dcache_page(ulong start)
354 */
355
356ENTRY(__flush_invalidate_dcache_page)
357
358	entry	sp, 16
359
360	___flush_invalidate_dcache_page a2 a3
361
362	dsync
363	retw
364
365ENDPROC(__flush_invalidate_dcache_page)
366
367/*
368 * void __flush_dcache_page(ulong start)
369 */
370
371ENTRY(__flush_dcache_page)
372
373	entry	sp, 16
374
375	___flush_dcache_page a2 a3
376
377	dsync
378	retw
379
380ENDPROC(__flush_dcache_page)
381
382/*
383 * void __invalidate_icache_range(ulong start, ulong size)
384 */
385
386ENTRY(__invalidate_icache_range)
387
388	entry	sp, 16
389
390	___invalidate_icache_range a2 a3 a4
391	isync
392
393	retw
394
395ENDPROC(__invalidate_icache_range)
396
397/*
398 * void __flush_invalidate_dcache_range(ulong start, ulong size)
399 */
400
401ENTRY(__flush_invalidate_dcache_range)
402
403	entry	sp, 16
404
405	___flush_invalidate_dcache_range a2 a3 a4
406	dsync
407
408	retw
409
410ENDPROC(__flush_invalidate_dcache_range)
411
412/*
413 * void _flush_dcache_range(ulong start, ulong size)
414 */
415
416ENTRY(__flush_dcache_range)
417
418	entry	sp, 16
419
420	___flush_dcache_range a2 a3 a4
421	dsync
422
423	retw
424
425ENDPROC(__flush_dcache_range)
426
427/*
428 * void _invalidate_dcache_range(ulong start, ulong size)
429 */
430
431ENTRY(__invalidate_dcache_range)
432
433	entry	sp, 16
434
435	___invalidate_dcache_range a2 a3 a4
436
437	retw
438
439ENDPROC(__invalidate_dcache_range)
440
441/*
442 * void _invalidate_icache_all(void)
443 */
444
445ENTRY(__invalidate_icache_all)
446
447	entry	sp, 16
448
449	___invalidate_icache_all a2 a3
450	isync
451
452	retw
453
454ENDPROC(__invalidate_icache_all)
455
456/*
457 * void _flush_invalidate_dcache_all(void)
458 */
459
460ENTRY(__flush_invalidate_dcache_all)
461
462	entry	sp, 16
463
464	___flush_invalidate_dcache_all a2 a3
465	dsync
466
467	retw
468
469ENDPROC(__flush_invalidate_dcache_all)
470
471/*
472 * void _invalidate_dcache_all(void)
473 */
474
475ENTRY(__invalidate_dcache_all)
476
477	entry	sp, 16
478
479	___invalidate_dcache_all a2 a3
480	dsync
481
482	retw
483
484ENDPROC(__invalidate_dcache_all)
485