xref: /openbmc/linux/lib/iomap.c (revision f42b3800)
1 /*
2  * Implement the default iomap interfaces
3  *
4  * (C) Copyright 2004 Linus Torvalds
5  */
6 #include <linux/pci.h>
7 #include <linux/io.h>
8 
9 #include <linux/module.h>
10 
11 /*
12  * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
13  * access or a MMIO access, these functions don't care. The info is
14  * encoded in the hardware mapping set up by the mapping functions
15  * (or the cookie itself, depending on implementation and hw).
16  *
17  * The generic routines don't assume any hardware mappings, and just
18  * encode the PIO/MMIO as part of the cookie. They coldly assume that
19  * the MMIO IO mappings are not in the low address range.
20  *
21  * Architectures for which this is not true can't use this generic
22  * implementation and should do their own copy.
23  */
24 
25 #ifndef HAVE_ARCH_PIO_SIZE
26 /*
27  * We encode the physical PIO addresses (0-0xffff) into the
28  * pointer by offsetting them with a constant (0x10000) and
29  * assuming that all the low addresses are always PIO. That means
30  * we can do some sanity checks on the low bits, and don't
31  * need to just take things for granted.
32  */
33 #define PIO_OFFSET	0x10000UL
34 #define PIO_MASK	0x0ffffUL
35 #define PIO_RESERVED	0x40000UL
36 #endif
37 
38 static void bad_io_access(unsigned long port, const char *access)
39 {
40 	static int count = 10;
41 	if (count) {
42 		count--;
43 		printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
44 		WARN_ON(1);
45 	}
46 }
47 
48 /*
49  * Ugly macros are a way of life.
50  */
51 #define IO_COND(addr, is_pio, is_mmio) do {			\
52 	unsigned long port = (unsigned long __force)addr;	\
53 	if (port >= PIO_RESERVED) {				\
54 		is_mmio;					\
55 	} else if (port > PIO_OFFSET) {				\
56 		port &= PIO_MASK;				\
57 		is_pio;						\
58 	} else							\
59 		bad_io_access(port, #is_pio );			\
60 } while (0)
61 
62 #ifndef pio_read16be
63 #define pio_read16be(port) swab16(inw(port))
64 #define pio_read32be(port) swab32(inl(port))
65 #endif
66 
67 #ifndef mmio_read16be
68 #define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
69 #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
70 #endif
71 
72 unsigned int ioread8(void __iomem *addr)
73 {
74 	IO_COND(addr, return inb(port), return readb(addr));
75 	return 0xff;
76 }
77 unsigned int ioread16(void __iomem *addr)
78 {
79 	IO_COND(addr, return inw(port), return readw(addr));
80 	return 0xffff;
81 }
82 unsigned int ioread16be(void __iomem *addr)
83 {
84 	IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
85 	return 0xffff;
86 }
87 unsigned int ioread32(void __iomem *addr)
88 {
89 	IO_COND(addr, return inl(port), return readl(addr));
90 	return 0xffffffff;
91 }
92 unsigned int ioread32be(void __iomem *addr)
93 {
94 	IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
95 	return 0xffffffff;
96 }
97 EXPORT_SYMBOL(ioread8);
98 EXPORT_SYMBOL(ioread16);
99 EXPORT_SYMBOL(ioread16be);
100 EXPORT_SYMBOL(ioread32);
101 EXPORT_SYMBOL(ioread32be);
102 
103 #ifndef pio_write16be
104 #define pio_write16be(val,port) outw(swab16(val),port)
105 #define pio_write32be(val,port) outl(swab32(val),port)
106 #endif
107 
108 #ifndef mmio_write16be
109 #define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
110 #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
111 #endif
112 
113 void iowrite8(u8 val, void __iomem *addr)
114 {
115 	IO_COND(addr, outb(val,port), writeb(val, addr));
116 }
117 void iowrite16(u16 val, void __iomem *addr)
118 {
119 	IO_COND(addr, outw(val,port), writew(val, addr));
120 }
121 void iowrite16be(u16 val, void __iomem *addr)
122 {
123 	IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
124 }
125 void iowrite32(u32 val, void __iomem *addr)
126 {
127 	IO_COND(addr, outl(val,port), writel(val, addr));
128 }
129 void iowrite32be(u32 val, void __iomem *addr)
130 {
131 	IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
132 }
133 EXPORT_SYMBOL(iowrite8);
134 EXPORT_SYMBOL(iowrite16);
135 EXPORT_SYMBOL(iowrite16be);
136 EXPORT_SYMBOL(iowrite32);
137 EXPORT_SYMBOL(iowrite32be);
138 
139 /*
140  * These are the "repeat MMIO read/write" functions.
141  * Note the "__raw" accesses, since we don't want to
142  * convert to CPU byte order. We write in "IO byte
143  * order" (we also don't have IO barriers).
144  */
145 #ifndef mmio_insb
146 static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
147 {
148 	while (--count >= 0) {
149 		u8 data = __raw_readb(addr);
150 		*dst = data;
151 		dst++;
152 	}
153 }
154 static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
155 {
156 	while (--count >= 0) {
157 		u16 data = __raw_readw(addr);
158 		*dst = data;
159 		dst++;
160 	}
161 }
162 static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
163 {
164 	while (--count >= 0) {
165 		u32 data = __raw_readl(addr);
166 		*dst = data;
167 		dst++;
168 	}
169 }
170 #endif
171 
172 #ifndef mmio_outsb
173 static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
174 {
175 	while (--count >= 0) {
176 		__raw_writeb(*src, addr);
177 		src++;
178 	}
179 }
180 static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
181 {
182 	while (--count >= 0) {
183 		__raw_writew(*src, addr);
184 		src++;
185 	}
186 }
187 static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
188 {
189 	while (--count >= 0) {
190 		__raw_writel(*src, addr);
191 		src++;
192 	}
193 }
194 #endif
195 
196 void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
197 {
198 	IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
199 }
200 void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
201 {
202 	IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
203 }
204 void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
205 {
206 	IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
207 }
208 EXPORT_SYMBOL(ioread8_rep);
209 EXPORT_SYMBOL(ioread16_rep);
210 EXPORT_SYMBOL(ioread32_rep);
211 
212 void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
213 {
214 	IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
215 }
216 void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
217 {
218 	IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
219 }
220 void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
221 {
222 	IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
223 }
224 EXPORT_SYMBOL(iowrite8_rep);
225 EXPORT_SYMBOL(iowrite16_rep);
226 EXPORT_SYMBOL(iowrite32_rep);
227 
228 /* Create a virtual mapping cookie for an IO port range */
229 void __iomem *ioport_map(unsigned long port, unsigned int nr)
230 {
231 	if (port > PIO_MASK)
232 		return NULL;
233 	return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
234 }
235 
236 void ioport_unmap(void __iomem *addr)
237 {
238 	/* Nothing to do */
239 }
240 EXPORT_SYMBOL(ioport_map);
241 EXPORT_SYMBOL(ioport_unmap);
242 
243 /**
244  * pci_iomap - create a virtual mapping cookie for a PCI BAR
245  * @dev: PCI device that owns the BAR
246  * @bar: BAR number
247  * @maxlen: length of the memory to map
248  *
249  * Using this function you will get a __iomem address to your device BAR.
250  * You can access it using ioread*() and iowrite*(). These functions hide
251  * the details if this is a MMIO or PIO address space and will just do what
252  * you expect from them in the correct way.
253  *
254  * @maxlen specifies the maximum length to map. If you want to get access to
255  * the complete BAR without checking for its length first, pass %0 here.
256  * */
257 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
258 {
259 	resource_size_t start = pci_resource_start(dev, bar);
260 	unsigned long len = pci_resource_len(dev, bar);
261 	unsigned long flags = pci_resource_flags(dev, bar);
262 
263 	if (!len || !start)
264 		return NULL;
265 	if (maxlen && len > maxlen)
266 		len = maxlen;
267 	if (flags & IORESOURCE_IO)
268 		return ioport_map(start, len);
269 	if (flags & IORESOURCE_MEM) {
270 		if (flags & IORESOURCE_CACHEABLE)
271 			return ioremap(start, len);
272 		return ioremap_nocache(start, len);
273 	}
274 	/* What? */
275 	return NULL;
276 }
277 
278 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
279 {
280 	IO_COND(addr, /* nothing */, iounmap(addr));
281 }
282 EXPORT_SYMBOL(pci_iomap);
283 EXPORT_SYMBOL(pci_iounmap);
284