xref: /openbmc/linux/arch/m68k/include/asm/raw_io.h (revision 70342287)
1 /*
2  * linux/include/asm-m68k/raw_io.h
3  *
4  * 10/20/00 RZ: - created from bits of io.h and ide.h to cleanup namespace
5  *
6  */
7 
8 #ifndef _RAW_IO_H
9 #define _RAW_IO_H
10 
11 #ifdef __KERNEL__
12 
13 #include <asm/types.h>
14 
15 
16 /* Values for nocacheflag and cmode */
17 #define IOMAP_FULL_CACHING		0
18 #define IOMAP_NOCACHE_SER		1
19 #define IOMAP_NOCACHE_NONSER		2
20 #define IOMAP_WRITETHROUGH		3
21 
22 extern void iounmap(void __iomem *addr);
23 
24 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
25 		       int cacheflag);
26 extern void __iounmap(void *addr, unsigned long size);
27 
28 
29 /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
30  * two accesses to memory, which may be undesirable for some devices.
31  */
32 #define in_8(addr) \
33     ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
34 #define in_be16(addr) \
35     ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
36 #define in_be32(addr) \
37     ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
38 #define in_le16(addr) \
39     ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
40 #define in_le32(addr) \
41     ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
42 
43 #define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
44 #define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
45 #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
46 #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
47 #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
48 
49 #define raw_inb in_8
50 #define raw_inw in_be16
51 #define raw_inl in_be32
52 #define __raw_readb in_8
53 #define __raw_readw in_be16
54 #define __raw_readl in_be32
55 
56 #define raw_outb(val,port) out_8((port),(val))
57 #define raw_outw(val,port) out_be16((port),(val))
58 #define raw_outl(val,port) out_be32((port),(val))
59 #define __raw_writeb(val,addr) out_8((addr),(val))
60 #define __raw_writew(val,addr) out_be16((addr),(val))
61 #define __raw_writel(val,addr) out_be32((addr),(val))
62 
63 static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
64 {
65 	unsigned int i;
66 
67         for (i = 0; i < len; i++)
68 		*buf++ = in_8(port);
69 }
70 
71 static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
72 			     unsigned int len)
73 {
74 	unsigned int i;
75 
76         for (i = 0; i < len; i++)
77 		out_8(port, *buf++);
78 }
79 
80 static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
81 {
82 	unsigned int tmp;
83 
84 	if (nr & 15) {
85 		tmp = (nr & 15) - 1;
86 		asm volatile (
87 			"1: movew %2@,%0@+; dbra %1,1b"
88 			: "=a" (buf), "=d" (tmp)
89 			: "a" (port), "0" (buf),
90 			  "1" (tmp));
91 	}
92 	if (nr >> 4) {
93 		tmp = (nr >> 4) - 1;
94 		asm volatile (
95 			"1: "
96 			"movew %2@,%0@+; "
97 			"movew %2@,%0@+; "
98 			"movew %2@,%0@+; "
99 			"movew %2@,%0@+; "
100 			"movew %2@,%0@+; "
101 			"movew %2@,%0@+; "
102 			"movew %2@,%0@+; "
103 			"movew %2@,%0@+; "
104 			"movew %2@,%0@+; "
105 			"movew %2@,%0@+; "
106 			"movew %2@,%0@+; "
107 			"movew %2@,%0@+; "
108 			"movew %2@,%0@+; "
109 			"movew %2@,%0@+; "
110 			"movew %2@,%0@+; "
111 			"movew %2@,%0@+; "
112 			"dbra %1,1b"
113 			: "=a" (buf), "=d" (tmp)
114 			: "a" (port), "0" (buf),
115 			  "1" (tmp));
116 	}
117 }
118 
119 static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
120 			     unsigned int nr)
121 {
122 	unsigned int tmp;
123 
124 	if (nr & 15) {
125 		tmp = (nr & 15) - 1;
126 		asm volatile (
127 			"1: movew %0@+,%2@; dbra %1,1b"
128 			: "=a" (buf), "=d" (tmp)
129 			: "a" (port), "0" (buf),
130 			  "1" (tmp));
131 	}
132 	if (nr >> 4) {
133 		tmp = (nr >> 4) - 1;
134 		asm volatile (
135 			"1: "
136 			"movew %0@+,%2@; "
137 			"movew %0@+,%2@; "
138 			"movew %0@+,%2@; "
139 			"movew %0@+,%2@; "
140 			"movew %0@+,%2@; "
141 			"movew %0@+,%2@; "
142 			"movew %0@+,%2@; "
143 			"movew %0@+,%2@; "
144 			"movew %0@+,%2@; "
145 			"movew %0@+,%2@; "
146 			"movew %0@+,%2@; "
147 			"movew %0@+,%2@; "
148 			"movew %0@+,%2@; "
149 			"movew %0@+,%2@; "
150 			"movew %0@+,%2@; "
151 			"movew %0@+,%2@; "
152 			"dbra %1,1b"
153 			: "=a" (buf), "=d" (tmp)
154 			: "a" (port), "0" (buf),
155 			  "1" (tmp));
156 	}
157 }
158 
159 static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
160 {
161 	unsigned int tmp;
162 
163 	if (nr & 15) {
164 		tmp = (nr & 15) - 1;
165 		asm volatile (
166 			"1: movel %2@,%0@+; dbra %1,1b"
167 			: "=a" (buf), "=d" (tmp)
168 			: "a" (port), "0" (buf),
169 			  "1" (tmp));
170 	}
171 	if (nr >> 4) {
172 		tmp = (nr >> 4) - 1;
173 		asm volatile (
174 			"1: "
175 			"movel %2@,%0@+; "
176 			"movel %2@,%0@+; "
177 			"movel %2@,%0@+; "
178 			"movel %2@,%0@+; "
179 			"movel %2@,%0@+; "
180 			"movel %2@,%0@+; "
181 			"movel %2@,%0@+; "
182 			"movel %2@,%0@+; "
183 			"movel %2@,%0@+; "
184 			"movel %2@,%0@+; "
185 			"movel %2@,%0@+; "
186 			"movel %2@,%0@+; "
187 			"movel %2@,%0@+; "
188 			"movel %2@,%0@+; "
189 			"movel %2@,%0@+; "
190 			"movel %2@,%0@+; "
191 			"dbra %1,1b"
192 			: "=a" (buf), "=d" (tmp)
193 			: "a" (port), "0" (buf),
194 			  "1" (tmp));
195 	}
196 }
197 
198 static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
199 			     unsigned int nr)
200 {
201 	unsigned int tmp;
202 
203 	if (nr & 15) {
204 		tmp = (nr & 15) - 1;
205 		asm volatile (
206 			"1: movel %0@+,%2@; dbra %1,1b"
207 			: "=a" (buf), "=d" (tmp)
208 			: "a" (port), "0" (buf),
209 			  "1" (tmp));
210 	}
211 	if (nr >> 4) {
212 		tmp = (nr >> 4) - 1;
213 		asm volatile (
214 			"1: "
215 			"movel %0@+,%2@; "
216 			"movel %0@+,%2@; "
217 			"movel %0@+,%2@; "
218 			"movel %0@+,%2@; "
219 			"movel %0@+,%2@; "
220 			"movel %0@+,%2@; "
221 			"movel %0@+,%2@; "
222 			"movel %0@+,%2@; "
223 			"movel %0@+,%2@; "
224 			"movel %0@+,%2@; "
225 			"movel %0@+,%2@; "
226 			"movel %0@+,%2@; "
227 			"movel %0@+,%2@; "
228 			"movel %0@+,%2@; "
229 			"movel %0@+,%2@; "
230 			"movel %0@+,%2@; "
231 			"dbra %1,1b"
232 			: "=a" (buf), "=d" (tmp)
233 			: "a" (port), "0" (buf),
234 			  "1" (tmp));
235 	}
236 }
237 
238 
239 static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
240 				  unsigned int nr)
241 {
242     if ((nr) % 8)
243 	__asm__ __volatile__
244 	       ("\tmovel %0,%/a0\n\t"
245 		"movel %1,%/a1\n\t"
246 		"movel %2,%/d6\n\t"
247 		"subql #1,%/d6\n"
248 		"1:\tmovew %/a0@,%/d0\n\t"
249 		"rolw  #8,%/d0\n\t"
250 		"movew %/d0,%/a1@+\n\t"
251 		"dbra %/d6,1b"
252 		:
253 		: "g" (port), "g" (buf), "g" (nr)
254 		: "d0", "a0", "a1", "d6");
255     else
256 	__asm__ __volatile__
257 	       ("movel %0,%/a0\n\t"
258 		"movel %1,%/a1\n\t"
259 		"movel %2,%/d6\n\t"
260 		"lsrl  #3,%/d6\n\t"
261 		"subql #1,%/d6\n"
262 		"1:\tmovew %/a0@,%/d0\n\t"
263 		"rolw  #8,%/d0\n\t"
264 		"movew %/d0,%/a1@+\n\t"
265 		"movew %/a0@,%/d0\n\t"
266 		"rolw  #8,%/d0\n\t"
267 		"movew %/d0,%/a1@+\n\t"
268 		"movew %/a0@,%/d0\n\t"
269 		"rolw  #8,%/d0\n\t"
270 		"movew %/d0,%/a1@+\n\t"
271 		"movew %/a0@,%/d0\n\t"
272 		"rolw  #8,%/d0\n\t"
273 		"movew %/d0,%/a1@+\n\t"
274 		"movew %/a0@,%/d0\n\t"
275 		"rolw  #8,%/d0\n\t"
276 		"movew %/d0,%/a1@+\n\t"
277 		"movew %/a0@,%/d0\n\t"
278 		"rolw  #8,%/d0\n\t"
279 		"movew %/d0,%/a1@+\n\t"
280 		"movew %/a0@,%/d0\n\t"
281 		"rolw  #8,%/d0\n\t"
282 		"movew %/d0,%/a1@+\n\t"
283 		"movew %/a0@,%/d0\n\t"
284 		"rolw  #8,%/d0\n\t"
285 		"movew %/d0,%/a1@+\n\t"
286 		"dbra %/d6,1b"
287                 :
288 		: "g" (port), "g" (buf), "g" (nr)
289 		: "d0", "a0", "a1", "d6");
290 }
291 
292 static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
293 				   unsigned int nr)
294 {
295     if ((nr) % 8)
296 	__asm__ __volatile__
297 	       ("movel %0,%/a0\n\t"
298 		"movel %1,%/a1\n\t"
299 		"movel %2,%/d6\n\t"
300 		"subql #1,%/d6\n"
301 		"1:\tmovew %/a1@+,%/d0\n\t"
302 		"rolw  #8,%/d0\n\t"
303 		"movew %/d0,%/a0@\n\t"
304 		"dbra %/d6,1b"
305                 :
306 		: "g" (port), "g" (buf), "g" (nr)
307 		: "d0", "a0", "a1", "d6");
308     else
309 	__asm__ __volatile__
310 	       ("movel %0,%/a0\n\t"
311 		"movel %1,%/a1\n\t"
312 		"movel %2,%/d6\n\t"
313 		"lsrl  #3,%/d6\n\t"
314 		"subql #1,%/d6\n"
315 		"1:\tmovew %/a1@+,%/d0\n\t"
316 		"rolw  #8,%/d0\n\t"
317 		"movew %/d0,%/a0@\n\t"
318 		"movew %/a1@+,%/d0\n\t"
319 		"rolw  #8,%/d0\n\t"
320 		"movew %/d0,%/a0@\n\t"
321 		"movew %/a1@+,%/d0\n\t"
322 		"rolw  #8,%/d0\n\t"
323 		"movew %/d0,%/a0@\n\t"
324 		"movew %/a1@+,%/d0\n\t"
325 		"rolw  #8,%/d0\n\t"
326 		"movew %/d0,%/a0@\n\t"
327 		"movew %/a1@+,%/d0\n\t"
328 		"rolw  #8,%/d0\n\t"
329 		"movew %/d0,%/a0@\n\t"
330 		"movew %/a1@+,%/d0\n\t"
331 		"rolw  #8,%/d0\n\t"
332 		"movew %/d0,%/a0@\n\t"
333 		"movew %/a1@+,%/d0\n\t"
334 		"rolw  #8,%/d0\n\t"
335 		"movew %/d0,%/a0@\n\t"
336 		"movew %/a1@+,%/d0\n\t"
337 		"rolw  #8,%/d0\n\t"
338 		"movew %/d0,%/a0@\n\t"
339 		"dbra %/d6,1b"
340                 :
341 		: "g" (port), "g" (buf), "g" (nr)
342 		: "d0", "a0", "a1", "d6");
343 }
344 
345 #endif /* __KERNEL__ */
346 
347 #endif /* _RAW_IO_H */
348