xref: /openbmc/linux/arch/mips/lib/uncached.c (revision eb3fcf00)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2005 Thiemo Seufer
7  * Copyright (C) 2005  MIPS Technologies, Inc.	All rights reserved.
8  *	Author: Maciej W. Rozycki <macro@mips.com>
9  */
10 
11 
12 #include <asm/addrspace.h>
13 #include <asm/bug.h>
14 #include <asm/cacheflush.h>
15 
16 #ifndef CKSEG2
17 #define CKSEG2 CKSSEG
18 #endif
19 #ifndef TO_PHYS_MASK
20 #define TO_PHYS_MASK -1
21 #endif
22 
23 /*
24  * FUNC is executed in one of the uncached segments, depending on its
25  * original address as follows:
26  *
27  * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
28  *    segment used is CKSEG1.
29  * 2. If the original address is in XKPHYS, then the uncached segment
30  *    used is XKPHYS(2).
31  * 3. Otherwise it's a bug.
32  *
33  * The same remapping is done with the stack pointer.  Stack handling
34  * works because we don't handle stack arguments or more complex return
35  * values, so we can avoid sharing the same stack area between a cached
36  * and the uncached mode.
37  */
38 unsigned long run_uncached(void *func)
39 {
40 	register long sp __asm__("$sp");
41 	register long ret __asm__("$2");
42 	long lfunc = (long)func, ufunc;
43 	long usp;
44 
45 	if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 		usp = CKSEG1ADDR(sp);
47 #ifdef CONFIG_64BIT
48 	else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
49 		 (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
50 		usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
51 				     XKPHYS_TO_PHYS((long long)sp));
52 #endif
53 	else {
54 		BUG();
55 		usp = sp;
56 	}
57 	if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
58 		ufunc = CKSEG1ADDR(lfunc);
59 #ifdef CONFIG_64BIT
60 	else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
61 		 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
62 		ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
63 				       XKPHYS_TO_PHYS((long long)lfunc));
64 #endif
65 	else {
66 		BUG();
67 		ufunc = lfunc;
68 	}
69 
70 	__asm__ __volatile__ (
71 		"	move	$16, $sp\n"
72 		"	move	$sp, %1\n"
73 		"	jalr	%2\n"
74 		"	move	$sp, $16"
75 		: "=r" (ret)
76 		: "r" (usp), "r" (ufunc)
77 		: "$16", "$31");
78 
79 	return ret;
80 }
81