1f15cbe6fSPaul Mundt #ifndef __ASM_SH_MMU_CONTEXT_32_H
2f15cbe6fSPaul Mundt #define __ASM_SH_MMU_CONTEXT_32_H
3f15cbe6fSPaul Mundt 
4f15cbe6fSPaul Mundt /*
5f15cbe6fSPaul Mundt  * Destroy context related info for an mm_struct that is about
6f15cbe6fSPaul Mundt  * to be put to rest.
7f15cbe6fSPaul Mundt  */
8f15cbe6fSPaul Mundt static inline void destroy_context(struct mm_struct *mm)
9f15cbe6fSPaul Mundt {
10f15cbe6fSPaul Mundt 	/* Do nothing */
11f15cbe6fSPaul Mundt }
12f15cbe6fSPaul Mundt 
138263a67eSPaul Mundt #ifdef CONFIG_CPU_HAS_PTEAEX
148263a67eSPaul Mundt static inline void set_asid(unsigned long asid)
158263a67eSPaul Mundt {
168263a67eSPaul Mundt 	__raw_writel(asid, MMU_PTEAEX);
178263a67eSPaul Mundt }
188263a67eSPaul Mundt 
198263a67eSPaul Mundt static inline unsigned long get_asid(void)
208263a67eSPaul Mundt {
218263a67eSPaul Mundt 	return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
228263a67eSPaul Mundt }
238263a67eSPaul Mundt #else
24f15cbe6fSPaul Mundt static inline void set_asid(unsigned long asid)
25f15cbe6fSPaul Mundt {
26f15cbe6fSPaul Mundt 	unsigned long __dummy;
27f15cbe6fSPaul Mundt 
28f15cbe6fSPaul Mundt 	__asm__ __volatile__ ("mov.l	%2, %0\n\t"
29f15cbe6fSPaul Mundt 			      "and	%3, %0\n\t"
30f15cbe6fSPaul Mundt 			      "or	%1, %0\n\t"
31f15cbe6fSPaul Mundt 			      "mov.l	%0, %2"
32f15cbe6fSPaul Mundt 			      : "=&r" (__dummy)
33f15cbe6fSPaul Mundt 			      : "r" (asid), "m" (__m(MMU_PTEH)),
34f15cbe6fSPaul Mundt 			        "r" (0xffffff00));
35f15cbe6fSPaul Mundt }
36f15cbe6fSPaul Mundt 
37f15cbe6fSPaul Mundt static inline unsigned long get_asid(void)
38f15cbe6fSPaul Mundt {
39f15cbe6fSPaul Mundt 	unsigned long asid;
40f15cbe6fSPaul Mundt 
41f15cbe6fSPaul Mundt 	__asm__ __volatile__ ("mov.l	%1, %0"
42f15cbe6fSPaul Mundt 			      : "=r" (asid)
43f15cbe6fSPaul Mundt 			      : "m" (__m(MMU_PTEH)));
44f15cbe6fSPaul Mundt 	asid &= MMU_CONTEXT_ASID_MASK;
45f15cbe6fSPaul Mundt 	return asid;
46f15cbe6fSPaul Mundt }
478263a67eSPaul Mundt #endif /* CONFIG_CPU_HAS_PTEAEX */
48f15cbe6fSPaul Mundt 
49f15cbe6fSPaul Mundt /* MMU_TTB is used for optimizing the fault handling. */
50f15cbe6fSPaul Mundt static inline void set_TTB(pgd_t *pgd)
51f15cbe6fSPaul Mundt {
52f15cbe6fSPaul Mundt 	ctrl_outl((unsigned long)pgd, MMU_TTB);
53f15cbe6fSPaul Mundt }
54f15cbe6fSPaul Mundt 
55f15cbe6fSPaul Mundt static inline pgd_t *get_TTB(void)
56f15cbe6fSPaul Mundt {
57f15cbe6fSPaul Mundt 	return (pgd_t *)ctrl_inl(MMU_TTB);
58f15cbe6fSPaul Mundt }
59f15cbe6fSPaul Mundt #endif /* __ASM_SH_MMU_CONTEXT_32_H */
60