xref: /openbmc/linux/arch/sparc/include/asm/io_64.h (revision f1575595)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __SPARC64_IO_H
3 #define __SPARC64_IO_H
4 
5 #include <linux/kernel.h>
6 #include <linux/compiler.h>
7 #include <linux/types.h>
8 
9 #include <asm/page.h>      /* IO address mapping routines need this */
10 #include <asm/asi.h>
11 #include <asm-generic/pci_iomap.h>
12 
13 /* BIO layer definitions. */
14 extern unsigned long kern_base, kern_size;
15 
16 /* __raw_{read,write}{b,w,l,q} uses direct access.
17  * Access the memory as big endian bypassing the cache
18  * by using ASI_PHYS_BYPASS_EC_E
19  */
20 #define __raw_readb __raw_readb
21 static inline u8 __raw_readb(const volatile void __iomem *addr)
22 {
23 	u8 ret;
24 
25 	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
26 			     : "=r" (ret)
27 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
28 
29 	return ret;
30 }
31 
32 #define __raw_readw __raw_readw
33 static inline u16 __raw_readw(const volatile void __iomem *addr)
34 {
35 	u16 ret;
36 
37 	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
38 			     : "=r" (ret)
39 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
40 
41 	return ret;
42 }
43 
44 #define __raw_readl __raw_readl
45 static inline u32 __raw_readl(const volatile void __iomem *addr)
46 {
47 	u32 ret;
48 
49 	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
50 			     : "=r" (ret)
51 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
52 
53 	return ret;
54 }
55 
56 #define __raw_readq __raw_readq
57 static inline u64 __raw_readq(const volatile void __iomem *addr)
58 {
59 	u64 ret;
60 
61 	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
62 			     : "=r" (ret)
63 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
64 
65 	return ret;
66 }
67 
68 #define __raw_writeb __raw_writeb
69 static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
70 {
71 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
72 			     : /* no outputs */
73 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
74 }
75 
76 #define __raw_writew __raw_writew
77 static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
78 {
79 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
80 			     : /* no outputs */
81 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
82 }
83 
84 #define __raw_writel __raw_writel
85 static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
86 {
87 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
88 			     : /* no outputs */
89 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
90 }
91 
92 #define __raw_writeq __raw_writeq
93 static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
94 {
95 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
96 			     : /* no outputs */
97 			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
98 }
99 
100 /* Memory functions, same as I/O accesses on Ultra.
101  * Access memory as little endian bypassing
102  * the cache by using ASI_PHYS_BYPASS_EC_E_L
103  */
104 #define readb readb
105 #define readb_relaxed readb
106 static inline u8 readb(const volatile void __iomem *addr)
107 {	u8 ret;
108 
109 	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
110 			     : "=r" (ret)
111 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
112 			     : "memory");
113 	return ret;
114 }
115 
116 #define readw readw
117 #define readw_relaxed readw
118 static inline u16 readw(const volatile void __iomem *addr)
119 {	u16 ret;
120 
121 	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
122 			     : "=r" (ret)
123 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
124 			     : "memory");
125 
126 	return ret;
127 }
128 
129 #define readl readl
130 #define readl_relaxed readl
131 static inline u32 readl(const volatile void __iomem *addr)
132 {	u32 ret;
133 
134 	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
135 			     : "=r" (ret)
136 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
137 			     : "memory");
138 
139 	return ret;
140 }
141 
142 #define readq readq
143 #define readq_relaxed readq
144 static inline u64 readq(const volatile void __iomem *addr)
145 {	u64 ret;
146 
147 	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
148 			     : "=r" (ret)
149 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
150 			     : "memory");
151 
152 	return ret;
153 }
154 
155 #define writeb writeb
156 #define writeb_relaxed writeb
157 static inline void writeb(u8 b, volatile void __iomem *addr)
158 {
159 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
160 			     : /* no outputs */
161 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
162 			     : "memory");
163 }
164 
165 #define writew writew
166 #define writew_relaxed writew
167 static inline void writew(u16 w, volatile void __iomem *addr)
168 {
169 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
170 			     : /* no outputs */
171 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
172 			     : "memory");
173 }
174 
175 #define writel writel
176 #define writel_relaxed writel
177 static inline void writel(u32 l, volatile void __iomem *addr)
178 {
179 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
180 			     : /* no outputs */
181 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
182 			     : "memory");
183 }
184 
185 #define writeq writeq
186 #define writeq_relaxed writeq
187 static inline void writeq(u64 q, volatile void __iomem *addr)
188 {
189 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
190 			     : /* no outputs */
191 			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
192 			     : "memory");
193 }
194 
195 #define inb inb
196 static inline u8 inb(unsigned long addr)
197 {
198 	return readb((volatile void __iomem *)addr);
199 }
200 
201 #define inw inw
202 static inline u16 inw(unsigned long addr)
203 {
204 	return readw((volatile void __iomem *)addr);
205 }
206 
207 #define inl inl
208 static inline u32 inl(unsigned long addr)
209 {
210 	return readl((volatile void __iomem *)addr);
211 }
212 
213 #define outb outb
214 static inline void outb(u8 b, unsigned long addr)
215 {
216 	writeb(b, (volatile void __iomem *)addr);
217 }
218 
219 #define outw outw
220 static inline void outw(u16 w, unsigned long addr)
221 {
222 	writew(w, (volatile void __iomem *)addr);
223 }
224 
225 #define outl outl
226 static inline void outl(u32 l, unsigned long addr)
227 {
228 	writel(l, (volatile void __iomem *)addr);
229 }
230 
231 
232 #define inb_p(__addr) 		inb(__addr)
233 #define outb_p(__b, __addr)	outb(__b, __addr)
234 #define inw_p(__addr)		inw(__addr)
235 #define outw_p(__w, __addr)	outw(__w, __addr)
236 #define inl_p(__addr)		inl(__addr)
237 #define outl_p(__l, __addr)	outl(__l, __addr)
238 
239 void outsb(unsigned long, const void *, unsigned long);
240 void outsw(unsigned long, const void *, unsigned long);
241 void outsl(unsigned long, const void *, unsigned long);
242 void insb(unsigned long, void *, unsigned long);
243 void insw(unsigned long, void *, unsigned long);
244 void insl(unsigned long, void *, unsigned long);
245 
246 static inline void readsb(void __iomem *port, void *buf, unsigned long count)
247 {
248 	insb((unsigned long __force)port, buf, count);
249 }
250 static inline void readsw(void __iomem *port, void *buf, unsigned long count)
251 {
252 	insw((unsigned long __force)port, buf, count);
253 }
254 
255 static inline void readsl(void __iomem *port, void *buf, unsigned long count)
256 {
257 	insl((unsigned long __force)port, buf, count);
258 }
259 
260 static inline void writesb(void __iomem *port, const void *buf, unsigned long count)
261 {
262 	outsb((unsigned long __force)port, buf, count);
263 }
264 
265 static inline void writesw(void __iomem *port, const void *buf, unsigned long count)
266 {
267 	outsw((unsigned long __force)port, buf, count);
268 }
269 
270 static inline void writesl(void __iomem *port, const void *buf, unsigned long count)
271 {
272 	outsl((unsigned long __force)port, buf, count);
273 }
274 
275 #define ioread8_rep(p,d,l)	readsb(p,d,l)
276 #define ioread16_rep(p,d,l)	readsw(p,d,l)
277 #define ioread32_rep(p,d,l)	readsl(p,d,l)
278 #define iowrite8_rep(p,d,l)	writesb(p,d,l)
279 #define iowrite16_rep(p,d,l)	writesw(p,d,l)
280 #define iowrite32_rep(p,d,l)	writesl(p,d,l)
281 
282 /* Valid I/O Space regions are anywhere, because each PCI bus supported
283  * can live in an arbitrary area of the physical address range.
284  */
285 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
286 
287 /* Now, SBUS variants, only difference from PCI is that we do
288  * not use little-endian ASIs.
289  */
290 static inline u8 sbus_readb(const volatile void __iomem *addr)
291 {
292 	return __raw_readb(addr);
293 }
294 
295 static inline u16 sbus_readw(const volatile void __iomem *addr)
296 {
297 	return __raw_readw(addr);
298 }
299 
300 static inline u32 sbus_readl(const volatile void __iomem *addr)
301 {
302 	return __raw_readl(addr);
303 }
304 
305 static inline u64 sbus_readq(const volatile void __iomem *addr)
306 {
307 	return __raw_readq(addr);
308 }
309 
310 static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
311 {
312 	__raw_writeb(b, addr);
313 }
314 
315 static inline void sbus_writew(u16 w, volatile void __iomem *addr)
316 {
317 	__raw_writew(w, addr);
318 }
319 
320 static inline void sbus_writel(u32 l, volatile void __iomem *addr)
321 {
322 	__raw_writel(l, addr);
323 }
324 
325 static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
326 {
327 	__raw_writeq(q, addr);
328 }
329 
330 static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
331 {
332 	while(n--) {
333 		sbus_writeb(c, dst);
334 		dst++;
335 	}
336 }
337 
338 static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
339 {
340 	volatile void __iomem *d = dst;
341 
342 	while (n--) {
343 		writeb(c, d);
344 		d++;
345 	}
346 }
347 
348 static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
349 				      __kernel_size_t n)
350 {
351 	char *d = dst;
352 
353 	while (n--) {
354 		char tmp = sbus_readb(src);
355 		*d++ = tmp;
356 		src++;
357 	}
358 }
359 
360 
361 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
362 				 __kernel_size_t n)
363 {
364 	char *d = dst;
365 
366 	while (n--) {
367 		char tmp = readb(src);
368 		*d++ = tmp;
369 		src++;
370 	}
371 }
372 
373 static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
374 				    __kernel_size_t n)
375 {
376 	const char *s = src;
377 	volatile void __iomem *d = dst;
378 
379 	while (n--) {
380 		char tmp = *s++;
381 		sbus_writeb(tmp, d);
382 		d++;
383 	}
384 }
385 
386 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
387 			       __kernel_size_t n)
388 {
389 	const char *s = src;
390 	volatile void __iomem *d = dst;
391 
392 	while (n--) {
393 		char tmp = *s++;
394 		writeb(tmp, d);
395 		d++;
396 	}
397 }
398 
399 #define mmiowb()
400 
401 #ifdef __KERNEL__
402 
403 /* On sparc64 we have the whole physical IO address space accessible
404  * using physically addressed loads and stores, so this does nothing.
405  */
406 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
407 {
408 	return (void __iomem *)offset;
409 }
410 
411 #define ioremap_nocache(X,Y)		ioremap((X),(Y))
412 #define ioremap_wc(X,Y)			ioremap((X),(Y))
413 #define ioremap_wt(X,Y)			ioremap((X),(Y))
414 
415 static inline void iounmap(volatile void __iomem *addr)
416 {
417 }
418 
419 #define ioread8			readb
420 #define ioread16		readw
421 #define ioread16be		__raw_readw
422 #define ioread32		readl
423 #define ioread32be		__raw_readl
424 #define iowrite8		writeb
425 #define iowrite16		writew
426 #define iowrite16be		__raw_writew
427 #define iowrite32		writel
428 #define iowrite32be		__raw_writel
429 
430 /* Create a virtual mapping cookie for an IO port range */
431 void __iomem *ioport_map(unsigned long port, unsigned int nr);
432 void ioport_unmap(void __iomem *);
433 
434 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
435 struct pci_dev;
436 void pci_iounmap(struct pci_dev *dev, void __iomem *);
437 
438 static inline int sbus_can_dma_64bit(void)
439 {
440 	return 1;
441 }
442 static inline int sbus_can_burst64(void)
443 {
444 	return 1;
445 }
446 struct device;
447 void sbus_set_sbus64(struct device *, int);
448 
449 /*
450  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
451  * access
452  */
453 #define xlate_dev_mem_ptr(p)	__va(p)
454 
455 /*
456  * Convert a virtual cached pointer to an uncached pointer
457  */
458 #define xlate_dev_kmem_ptr(p)	p
459 
460 #endif
461 
462 #endif /* !(__SPARC64_IO_H) */
463