xref: /openbmc/linux/arch/xtensa/mm/misc.S (revision 12eb4683)
1/*
2 * arch/xtensa/mm/misc.S
3 *
4 * Miscellaneous assembly functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2007 Tensilica Inc.
11 *
12 * Chris Zankel	<chris@zankel.net>
13 */
14
15
16#include <linux/linkage.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/asmmacro.h>
20#include <asm/cacheasm.h>
21#include <asm/tlbflush.h>
22
23
24/*
25 * clear_page and clear_user_page are the same for non-cache-aliased configs.
26 *
27 * clear_page (unsigned long page)
28 *                    a2
29 */
30
31ENTRY(clear_page)
32
33	entry	a1, 16
34
35	movi	a3, 0
36	__loopi	a2, a7, PAGE_SIZE, 32
37	s32i	a3, a2, 0
38	s32i	a3, a2, 4
39	s32i	a3, a2, 8
40	s32i	a3, a2, 12
41	s32i	a3, a2, 16
42	s32i	a3, a2, 20
43	s32i	a3, a2, 24
44	s32i	a3, a2, 28
45	__endla	a2, a7, 32
46
47	retw
48
49ENDPROC(clear_page)
50
51/*
52 * copy_page and copy_user_page are the same for non-cache-aliased configs.
53 *
54 * copy_page (void *to, void *from)
55 *               a2          a3
56 */
57
58ENTRY(copy_page)
59
60	entry	a1, 16
61
62	__loopi a2, a4, PAGE_SIZE, 32
63
64	l32i    a8, a3, 0
65	l32i    a9, a3, 4
66	s32i    a8, a2, 0
67	s32i    a9, a2, 4
68
69	l32i    a8, a3, 8
70	l32i    a9, a3, 12
71	s32i    a8, a2, 8
72	s32i    a9, a2, 12
73
74	l32i    a8, a3, 16
75	l32i    a9, a3, 20
76	s32i    a8, a2, 16
77	s32i    a9, a2, 20
78
79	l32i    a8, a3, 24
80	l32i    a9, a3, 28
81	s32i    a8, a2, 24
82	s32i    a9, a2, 28
83
84	addi    a2, a2, 32
85	addi    a3, a3, 32
86
87	__endl  a2, a4
88
89	retw
90
91ENDPROC(copy_page)
92
93#ifdef CONFIG_MMU
94/*
95 * If we have to deal with cache aliasing, we use temporary memory mappings
96 * to ensure that the source and destination pages have the same color as
97 * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
98 *
99 * The temporary DTLB entries shouldn't be flushed by interrupts, but are
100 * flushed by preemptive task switches. Special code in the
101 * fast_second_level_miss handler re-established the temporary mapping.
102 * It requires that the PPNs for the destination and source addresses are
103 * in a6, and a7, respectively.
104 */
105
106/* TLB miss exceptions are treated special in the following region */
107
108ENTRY(__tlbtemp_mapping_start)
109
110#if (DCACHE_WAY_SIZE > PAGE_SIZE)
111
112/*
113 * clear_user_page (void *addr, unsigned long vaddr, struct page *page)
114 *                     a2              a3                 a4
115 */
116
117ENTRY(clear_user_page)
118
119	entry	a1, 32
120
121	/* Mark page dirty and determine alias. */
122
123	movi	a7, (1 << PG_ARCH_1)
124	l32i	a5, a4, PAGE_FLAGS
125	xor	a6, a2, a3
126	extui	a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
127	extui	a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
128	or	a5, a5, a7
129	slli	a3, a3, PAGE_SHIFT
130	s32i	a5, a4, PAGE_FLAGS
131
132	/* Skip setting up a temporary DTLB if not aliased. */
133
134	beqz	a6, 1f
135
136	/* Invalidate kernel page. */
137
138	mov	a10, a2
139	call8	__invalidate_dcache_page
140
141	/* Setup a temporary DTLB with the color of the VPN */
142
143	movi	a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
144	movi	a5, TLBTEMP_BASE_1			# virt
145	add	a6, a2, a4				# ppn
146	add	a2, a5, a3				# add 'color'
147
148	wdtlb	a6, a2
149	dsync
150
1511:	movi	a3, 0
152	__loopi	a2, a7, PAGE_SIZE, 32
153	s32i	a3, a2, 0
154	s32i	a3, a2, 4
155	s32i	a3, a2, 8
156	s32i	a3, a2, 12
157	s32i	a3, a2, 16
158	s32i	a3, a2, 20
159	s32i	a3, a2, 24
160	s32i	a3, a2, 28
161	__endla	a2, a7, 32
162
163	bnez	a6, 1f
164	retw
165
166	/* We need to invalidate the temporary idtlb entry, if any. */
167
1681:	addi	a2, a2, -PAGE_SIZE
169	idtlb	a2
170	dsync
171
172	retw
173
174ENDPROC(clear_user_page)
175
176/*
177 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
178 *                    a2          a3	        a4		    a5
179 */
180
181ENTRY(copy_user_page)
182
183	entry	a1, 32
184
185	/* Mark page dirty and determine alias for destination. */
186
187	movi	a8, (1 << PG_ARCH_1)
188	l32i	a9, a5, PAGE_FLAGS
189	xor	a6, a2, a4
190	xor	a7, a3, a4
191	extui	a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
192	extui	a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
193	extui	a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
194	or	a9, a9, a8
195	slli	a4, a4, PAGE_SHIFT
196	s32i	a9, a5, PAGE_FLAGS
197	movi	a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
198
199	beqz	a6, 1f
200
201	/* Invalidate dcache */
202
203	mov	a10, a2
204	call8	__invalidate_dcache_page
205
206	/* Setup a temporary DTLB with a matching color. */
207
208	movi	a8, TLBTEMP_BASE_1			# base
209	add	a6, a2, a5				# ppn
210	add	a2, a8, a4				# add 'color'
211
212	wdtlb	a6, a2
213	dsync
214
215	/* Skip setting up a temporary DTLB for destination if not aliased. */
216
2171:	beqz	a7, 1f
218
219	/* Setup a temporary DTLB with a matching color. */
220
221	movi	a8, TLBTEMP_BASE_2			# base
222	add	a7, a3, a5				# ppn
223	add	a3, a8, a4
224	addi	a8, a3, 1				# way1
225
226	wdtlb	a7, a8
227	dsync
228
2291:	__loopi a2, a4, PAGE_SIZE, 32
230
231	l32i    a8, a3, 0
232	l32i    a9, a3, 4
233	s32i    a8, a2, 0
234	s32i    a9, a2, 4
235
236	l32i    a8, a3, 8
237	l32i    a9, a3, 12
238	s32i    a8, a2, 8
239	s32i    a9, a2, 12
240
241	l32i    a8, a3, 16
242	l32i    a9, a3, 20
243	s32i    a8, a2, 16
244	s32i    a9, a2, 20
245
246	l32i    a8, a3, 24
247	l32i    a9, a3, 28
248	s32i    a8, a2, 24
249	s32i    a9, a2, 28
250
251	addi    a2, a2, 32
252	addi    a3, a3, 32
253
254	__endl  a2, a4
255
256	/* We need to invalidate any temporary mapping! */
257
258	bnez	a6, 1f
259	bnez	a7, 2f
260	retw
261
2621:	addi	a2, a2, -PAGE_SIZE
263	idtlb	a2
264	dsync
265	bnez	a7, 2f
266	retw
267
2682:	addi	a3, a3, -PAGE_SIZE+1
269	idtlb	a3
270	dsync
271
272	retw
273
274ENDPROC(copy_user_page)
275
276#endif
277
278#if (DCACHE_WAY_SIZE > PAGE_SIZE)
279
280/*
281 * void __flush_invalidate_dcache_page_alias (addr, phys)
282 *                                             a2    a3
283 */
284
285ENTRY(__flush_invalidate_dcache_page_alias)
286
287	entry	sp, 16
288
289	movi	a7, 0			# required for exception handler
290	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
291	mov	a4, a2
292	wdtlb	a6, a2
293	dsync
294
295	___flush_invalidate_dcache_page a2 a3
296
297	idtlb	a4
298	dsync
299
300	retw
301
302ENDPROC(__flush_invalidate_dcache_page_alias)
303#endif
304
305ENTRY(__tlbtemp_mapping_itlb)
306
307#if (ICACHE_WAY_SIZE > PAGE_SIZE)
308
309ENTRY(__invalidate_icache_page_alias)
310
311	entry	sp, 16
312
313	addi	a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
314	mov	a4, a2
315	witlb	a6, a2
316	isync
317
318	___invalidate_icache_page a2 a3
319
320	iitlb	a4
321	isync
322	retw
323
324ENDPROC(__invalidate_icache_page_alias)
325
326#endif
327
328/* End of special treatment in tlb miss exception */
329
330ENTRY(__tlbtemp_mapping_end)
331
332#endif /* CONFIG_MMU
333
334/*
335 * void __invalidate_icache_page(ulong start)
336 */
337
338ENTRY(__invalidate_icache_page)
339
340	entry	sp, 16
341
342	___invalidate_icache_page a2 a3
343	isync
344
345	retw
346
347ENDPROC(__invalidate_icache_page)
348
349/*
350 * void __invalidate_dcache_page(ulong start)
351 */
352
353ENTRY(__invalidate_dcache_page)
354
355	entry	sp, 16
356
357	___invalidate_dcache_page a2 a3
358	dsync
359
360	retw
361
362ENDPROC(__invalidate_dcache_page)
363
364/*
365 * void __flush_invalidate_dcache_page(ulong start)
366 */
367
368ENTRY(__flush_invalidate_dcache_page)
369
370	entry	sp, 16
371
372	___flush_invalidate_dcache_page a2 a3
373
374	dsync
375	retw
376
377ENDPROC(__flush_invalidate_dcache_page)
378
379/*
380 * void __flush_dcache_page(ulong start)
381 */
382
383ENTRY(__flush_dcache_page)
384
385	entry	sp, 16
386
387	___flush_dcache_page a2 a3
388
389	dsync
390	retw
391
392ENDPROC(__flush_dcache_page)
393
394/*
395 * void __invalidate_icache_range(ulong start, ulong size)
396 */
397
398ENTRY(__invalidate_icache_range)
399
400	entry	sp, 16
401
402	___invalidate_icache_range a2 a3 a4
403	isync
404
405	retw
406
407ENDPROC(__invalidate_icache_range)
408
409/*
410 * void __flush_invalidate_dcache_range(ulong start, ulong size)
411 */
412
413ENTRY(__flush_invalidate_dcache_range)
414
415	entry	sp, 16
416
417	___flush_invalidate_dcache_range a2 a3 a4
418	dsync
419
420	retw
421
422ENDPROC(__flush_invalidate_dcache_range)
423
424/*
425 * void _flush_dcache_range(ulong start, ulong size)
426 */
427
428ENTRY(__flush_dcache_range)
429
430	entry	sp, 16
431
432	___flush_dcache_range a2 a3 a4
433	dsync
434
435	retw
436
437ENDPROC(__flush_dcache_range)
438
439/*
440 * void _invalidate_dcache_range(ulong start, ulong size)
441 */
442
443ENTRY(__invalidate_dcache_range)
444
445	entry	sp, 16
446
447	___invalidate_dcache_range a2 a3 a4
448
449	retw
450
451ENDPROC(__invalidate_dcache_range)
452
453/*
454 * void _invalidate_icache_all(void)
455 */
456
457ENTRY(__invalidate_icache_all)
458
459	entry	sp, 16
460
461	___invalidate_icache_all a2 a3
462	isync
463
464	retw
465
466ENDPROC(__invalidate_icache_all)
467
468/*
469 * void _flush_invalidate_dcache_all(void)
470 */
471
472ENTRY(__flush_invalidate_dcache_all)
473
474	entry	sp, 16
475
476	___flush_invalidate_dcache_all a2 a3
477	dsync
478
479	retw
480
481ENDPROC(__flush_invalidate_dcache_all)
482
483/*
484 * void _invalidate_dcache_all(void)
485 */
486
487ENTRY(__invalidate_dcache_all)
488
489	entry	sp, 16
490
491	___invalidate_dcache_all a2 a3
492	dsync
493
494	retw
495
496ENDPROC(__invalidate_dcache_all)
497