xref: /openbmc/linux/arch/m68k/include/asm/page_mm.h (revision f4356947)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_PAGE_MM_H
3 #define _M68K_PAGE_MM_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <linux/compiler.h>
8 #include <asm/module.h>
9 
10 /*
11  * We don't need to check for alignment etc.
12  */
13 #ifdef CPU_M68040_OR_M68060_ONLY
14 static inline void copy_page(void *to, void *from)
15 {
16   unsigned long tmp;
17 
18   __asm__ __volatile__("1:\t"
19 		       ".chip 68040\n\t"
20 		       "move16 %1@+,%0@+\n\t"
21 		       "move16 %1@+,%0@+\n\t"
22 		       ".chip 68k\n\t"
23 		       "dbra  %2,1b\n\t"
24 		       : "=a" (to), "=a" (from), "=d" (tmp)
25 		       : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
26 		       );
27 }
28 
29 static inline void clear_page(void *page)
30 {
31 	unsigned long tmp;
32 	unsigned long *sp = page;
33 
34 	*sp++ = 0;
35 	*sp++ = 0;
36 	*sp++ = 0;
37 	*sp++ = 0;
38 
39 	__asm__ __volatile__("1:\t"
40 			     ".chip 68040\n\t"
41 			     "move16 %2@+,%0@+\n\t"
42 			     ".chip 68k\n\t"
43 			     "subqw  #8,%2\n\t"
44 			     "subqw  #8,%2\n\t"
45 			     "dbra   %1,1b\n\t"
46 			     : "=a" (sp), "=d" (tmp)
47 			     : "a" (page), "0" (sp),
48 			       "1" ((PAGE_SIZE - 16) / 16 - 1));
49 }
50 
51 #else
52 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
53 #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
54 #endif
55 
56 #define clear_user_page(addr, vaddr, page)	\
57 	do {	clear_page(addr);		\
58 		flush_dcache_page(page);	\
59 	} while (0)
60 #define copy_user_page(to, from, vaddr, page)	\
61 	do {	copy_page(to, from);		\
62 		flush_dcache_page(page);	\
63 	} while (0)
64 
65 extern unsigned long m68k_memoffset;
66 
67 #ifndef CONFIG_SUN3
68 
69 #define WANT_PAGE_VIRTUAL
70 
71 static inline unsigned long ___pa(void *vaddr)
72 {
73 	unsigned long paddr;
74 	asm (
75 		"1:	addl #0,%0\n"
76 		m68k_fixup(%c2, 1b+2)
77 		: "=r" (paddr)
78 		: "0" (vaddr), "i" (m68k_fixup_memoffset));
79 	return paddr;
80 }
81 #define __pa(vaddr)	___pa((void *)(long)(vaddr))
82 static inline void *__va(unsigned long paddr)
83 {
84 	void *vaddr;
85 	asm (
86 		"1:	subl #0,%0\n"
87 		m68k_fixup(%c2, 1b+2)
88 		: "=r" (vaddr)
89 		: "0" (paddr), "i" (m68k_fixup_memoffset));
90 	return vaddr;
91 }
92 
93 #else	/* !CONFIG_SUN3 */
94 /* This #define is a horrible hack to suppress lots of warnings. --m */
95 #define __pa(x) ___pa((unsigned long)(x))
96 static inline unsigned long ___pa(unsigned long x)
97 {
98      if(x == 0)
99 	  return 0;
100      if(x >= PAGE_OFFSET)
101         return (x-PAGE_OFFSET);
102      else
103         return (x+0x2000000);
104 }
105 
106 static inline void *__va(unsigned long x)
107 {
108      if(x == 0)
109 	  return (void *)0;
110 
111      if(x < 0x2000000)
112         return (void *)(x+PAGE_OFFSET);
113      else
114         return (void *)(x-0x2000000);
115 }
116 #endif	/* CONFIG_SUN3 */
117 
118 /*
119  * NOTE: virtual isn't really correct, actually it should be the offset into the
120  * memory node, but we have no highmem, so that works for now.
121  * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
122  * of the shifts unnecessary.
123  */
124 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
126 
127 extern int m68k_virt_to_node_shift;
128 
129 #define virt_to_page(addr) ({						\
130 	pfn_to_page(virt_to_pfn(addr));					\
131 })
132 #define page_to_virt(page) ({						\
133 	pfn_to_virt(page_to_pfn(page));					\
134 })
135 
136 #define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
137 
138 #define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
139 #define pfn_valid(pfn)		virt_addr_valid(pfn_to_virt(pfn))
140 
141 #endif /* __ASSEMBLY__ */
142 
143 #endif /* _M68K_PAGE_MM_H */
144