xref: /openbmc/linux/arch/arc/include/asm/io.h (revision a06c488d)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef _ASM_ARC_IO_H
10 #define _ASM_ARC_IO_H
11 
12 #include <linux/types.h>
13 #include <asm/byteorder.h>
14 #include <asm/page.h>
15 
16 extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
17 extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
18 				  unsigned long flags);
19 extern void iounmap(const void __iomem *addr);
20 
21 #define ioremap_nocache(phy, sz)	ioremap(phy, sz)
22 #define ioremap_wc(phy, sz)		ioremap(phy, sz)
23 #define ioremap_wt(phy, sz)		ioremap(phy, sz)
24 
25 /* Change struct page to physical address */
26 #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
27 
28 #define __raw_readb __raw_readb
29 static inline u8 __raw_readb(const volatile void __iomem *addr)
30 {
31 	u8 b;
32 
33 	__asm__ __volatile__(
34 	"	ldb%U1 %0, %1	\n"
35 	: "=r" (b)
36 	: "m" (*(volatile u8 __force *)addr)
37 	: "memory");
38 
39 	return b;
40 }
41 
42 #define __raw_readw __raw_readw
43 static inline u16 __raw_readw(const volatile void __iomem *addr)
44 {
45 	u16 s;
46 
47 	__asm__ __volatile__(
48 	"	ldw%U1 %0, %1	\n"
49 	: "=r" (s)
50 	: "m" (*(volatile u16 __force *)addr)
51 	: "memory");
52 
53 	return s;
54 }
55 
56 #define __raw_readl __raw_readl
57 static inline u32 __raw_readl(const volatile void __iomem *addr)
58 {
59 	u32 w;
60 
61 	__asm__ __volatile__(
62 	"	ld%U1 %0, %1	\n"
63 	: "=r" (w)
64 	: "m" (*(volatile u32 __force *)addr)
65 	: "memory");
66 
67 	return w;
68 }
69 
70 #define __raw_writeb __raw_writeb
71 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
72 {
73 	__asm__ __volatile__(
74 	"	stb%U1 %0, %1	\n"
75 	:
76 	: "r" (b), "m" (*(volatile u8 __force *)addr)
77 	: "memory");
78 }
79 
80 #define __raw_writew __raw_writew
81 static inline void __raw_writew(u16 s, volatile void __iomem *addr)
82 {
83 	__asm__ __volatile__(
84 	"	stw%U1 %0, %1	\n"
85 	:
86 	: "r" (s), "m" (*(volatile u16 __force *)addr)
87 	: "memory");
88 
89 }
90 
91 #define __raw_writel __raw_writel
92 static inline void __raw_writel(u32 w, volatile void __iomem *addr)
93 {
94 	__asm__ __volatile__(
95 	"	st%U1 %0, %1	\n"
96 	:
97 	: "r" (w), "m" (*(volatile u32 __force *)addr)
98 	: "memory");
99 
100 }
101 
102 #ifdef CONFIG_ISA_ARCV2
103 #include <asm/barrier.h>
104 #define __iormb()		rmb()
105 #define __iowmb()		wmb()
106 #else
107 #define __iormb()		do { } while (0)
108 #define __iowmb()		do { } while (0)
109 #endif
110 
111 /*
112  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
113  * Based on ARM model for the typical use case
114  *
115  *	<ST [DMA buffer]>
116  *	<writel MMIO "go" reg>
117  *  or:
118  *	<readl MMIO "status" reg>
119  *	<LD [DMA buffer]>
120  *
121  * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
122  */
123 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
124 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
125 #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
126 
127 #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
128 #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
129 #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
130 
131 /*
132  * Relaxed API for drivers which can handle any ordering themselves
133  */
134 #define readb_relaxed(c)	__raw_readb(c)
135 #define readw_relaxed(c)	__raw_readw(c)
136 #define readl_relaxed(c)	__raw_readl(c)
137 
138 #define writeb_relaxed(v,c)	__raw_writeb(v,c)
139 #define writew_relaxed(v,c)	__raw_writew(v,c)
140 #define writel_relaxed(v,c)	__raw_writel(v,c)
141 
142 #include <asm-generic/io.h>
143 
144 #endif /* _ASM_ARC_IO_H */
145