xref: /openbmc/linux/arch/arc/include/asm/io.h (revision 10d443431dc2bb733cf7add99b453e3fb9047a2e)
11162b070SVineet Gupta /*
21162b070SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
31162b070SVineet Gupta  *
41162b070SVineet Gupta  * This program is free software; you can redistribute it and/or modify
51162b070SVineet Gupta  * it under the terms of the GNU General Public License version 2 as
61162b070SVineet Gupta  * published by the Free Software Foundation.
71162b070SVineet Gupta  */
81162b070SVineet Gupta 
91162b070SVineet Gupta #ifndef _ASM_ARC_IO_H
101162b070SVineet Gupta #define _ASM_ARC_IO_H
111162b070SVineet Gupta 
121162b070SVineet Gupta #include <linux/types.h>
131162b070SVineet Gupta #include <asm/byteorder.h>
141162b070SVineet Gupta #include <asm/page.h>
15*10d44343SJose Abreu #include <asm/unaligned.h>
161162b070SVineet Gupta 
17e5bc0478SVineet Gupta #ifdef CONFIG_ISA_ARCV2
18e5bc0478SVineet Gupta #include <asm/barrier.h>
19e5bc0478SVineet Gupta #define __iormb()		rmb()
20e5bc0478SVineet Gupta #define __iowmb()		wmb()
21e5bc0478SVineet Gupta #else
22e5bc0478SVineet Gupta #define __iormb()		do { } while (0)
23e5bc0478SVineet Gupta #define __iowmb()		do { } while (0)
24e5bc0478SVineet Gupta #endif
25e5bc0478SVineet Gupta 
26f5db19e9SVineet Gupta extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
27f5db19e9SVineet Gupta extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
284368902bSGilad Ben-Yossef 				  unsigned long flags);
29c1678ffcSJoao Pinto static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
30c1678ffcSJoao Pinto {
31c1678ffcSJoao Pinto 	return (void __iomem *)port;
32c1678ffcSJoao Pinto }
33c1678ffcSJoao Pinto 
34c1678ffcSJoao Pinto static inline void ioport_unmap(void __iomem *addr)
35c1678ffcSJoao Pinto {
36c1678ffcSJoao Pinto }
37c1678ffcSJoao Pinto 
381162b070SVineet Gupta extern void iounmap(const void __iomem *addr);
391162b070SVineet Gupta 
401162b070SVineet Gupta #define ioremap_nocache(phy, sz)	ioremap(phy, sz)
411162b070SVineet Gupta #define ioremap_wc(phy, sz)		ioremap(phy, sz)
42556269c1SToshi Kani #define ioremap_wt(phy, sz)		ioremap(phy, sz)
431162b070SVineet Gupta 
44e5bc0478SVineet Gupta /*
45e5bc0478SVineet Gupta  * io{read,write}{16,32}be() macros
46e5bc0478SVineet Gupta  */
47e5bc0478SVineet Gupta #define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
48e5bc0478SVineet Gupta #define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
49e5bc0478SVineet Gupta 
50e5bc0478SVineet Gupta #define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
51e5bc0478SVineet Gupta #define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
52e5bc0478SVineet Gupta 
531162b070SVineet Gupta /* Change struct page to physical address */
541162b070SVineet Gupta #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
551162b070SVineet Gupta 
561162b070SVineet Gupta #define __raw_readb __raw_readb
571162b070SVineet Gupta static inline u8 __raw_readb(const volatile void __iomem *addr)
581162b070SVineet Gupta {
591162b070SVineet Gupta 	u8 b;
601162b070SVineet Gupta 
611162b070SVineet Gupta 	__asm__ __volatile__(
621162b070SVineet Gupta 	"	ldb%U1 %0, %1	\n"
631162b070SVineet Gupta 	: "=r" (b)
641162b070SVineet Gupta 	: "m" (*(volatile u8 __force *)addr)
651162b070SVineet Gupta 	: "memory");
661162b070SVineet Gupta 
671162b070SVineet Gupta 	return b;
681162b070SVineet Gupta }
691162b070SVineet Gupta 
701162b070SVineet Gupta #define __raw_readw __raw_readw
711162b070SVineet Gupta static inline u16 __raw_readw(const volatile void __iomem *addr)
721162b070SVineet Gupta {
731162b070SVineet Gupta 	u16 s;
741162b070SVineet Gupta 
751162b070SVineet Gupta 	__asm__ __volatile__(
761162b070SVineet Gupta 	"	ldw%U1 %0, %1	\n"
771162b070SVineet Gupta 	: "=r" (s)
781162b070SVineet Gupta 	: "m" (*(volatile u16 __force *)addr)
791162b070SVineet Gupta 	: "memory");
801162b070SVineet Gupta 
811162b070SVineet Gupta 	return s;
821162b070SVineet Gupta }
831162b070SVineet Gupta 
841162b070SVineet Gupta #define __raw_readl __raw_readl
851162b070SVineet Gupta static inline u32 __raw_readl(const volatile void __iomem *addr)
861162b070SVineet Gupta {
871162b070SVineet Gupta 	u32 w;
881162b070SVineet Gupta 
891162b070SVineet Gupta 	__asm__ __volatile__(
901162b070SVineet Gupta 	"	ld%U1 %0, %1	\n"
911162b070SVineet Gupta 	: "=r" (w)
921162b070SVineet Gupta 	: "m" (*(volatile u32 __force *)addr)
931162b070SVineet Gupta 	: "memory");
941162b070SVineet Gupta 
951162b070SVineet Gupta 	return w;
961162b070SVineet Gupta }
971162b070SVineet Gupta 
98*10d44343SJose Abreu /*
99*10d44343SJose Abreu  * {read,write}s{b,w,l}() repeatedly access the same IO address in
100*10d44343SJose Abreu  * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
101*10d44343SJose Abreu  * @count times
102*10d44343SJose Abreu  */
103*10d44343SJose Abreu #define __raw_readsx(t,f) \
104*10d44343SJose Abreu static inline void __raw_reads##f(const volatile void __iomem *addr,	\
105*10d44343SJose Abreu 				  void *ptr, unsigned int count)	\
106*10d44343SJose Abreu {									\
107*10d44343SJose Abreu 	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
108*10d44343SJose Abreu 	u##t *buf = ptr;						\
109*10d44343SJose Abreu 									\
110*10d44343SJose Abreu 	if (!count)							\
111*10d44343SJose Abreu 		return;							\
112*10d44343SJose Abreu 									\
113*10d44343SJose Abreu 	/* Some ARC CPU's don't support unaligned accesses */		\
114*10d44343SJose Abreu 	if (is_aligned) {						\
115*10d44343SJose Abreu 		do {							\
116*10d44343SJose Abreu 			u##t x = __raw_read##f(addr);			\
117*10d44343SJose Abreu 			*buf++ = x;					\
118*10d44343SJose Abreu 		} while (--count);					\
119*10d44343SJose Abreu 	} else {							\
120*10d44343SJose Abreu 		do {							\
121*10d44343SJose Abreu 			u##t x = __raw_read##f(addr);			\
122*10d44343SJose Abreu 			put_unaligned(x, buf++);			\
123*10d44343SJose Abreu 		} while (--count);					\
124*10d44343SJose Abreu 	}								\
125*10d44343SJose Abreu }
126*10d44343SJose Abreu 
127*10d44343SJose Abreu #define __raw_readsb __raw_readsb
128*10d44343SJose Abreu __raw_readsx(8, b)
129*10d44343SJose Abreu #define __raw_readsw __raw_readsw
130*10d44343SJose Abreu __raw_readsx(16, w)
131*10d44343SJose Abreu #define __raw_readsl __raw_readsl
132*10d44343SJose Abreu __raw_readsx(32, l)
133*10d44343SJose Abreu 
1341162b070SVineet Gupta #define __raw_writeb __raw_writeb
1351162b070SVineet Gupta static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
1361162b070SVineet Gupta {
1371162b070SVineet Gupta 	__asm__ __volatile__(
1381162b070SVineet Gupta 	"	stb%U1 %0, %1	\n"
1391162b070SVineet Gupta 	:
1401162b070SVineet Gupta 	: "r" (b), "m" (*(volatile u8 __force *)addr)
1411162b070SVineet Gupta 	: "memory");
1421162b070SVineet Gupta }
1431162b070SVineet Gupta 
1441162b070SVineet Gupta #define __raw_writew __raw_writew
1451162b070SVineet Gupta static inline void __raw_writew(u16 s, volatile void __iomem *addr)
1461162b070SVineet Gupta {
1471162b070SVineet Gupta 	__asm__ __volatile__(
1481162b070SVineet Gupta 	"	stw%U1 %0, %1	\n"
1491162b070SVineet Gupta 	:
1501162b070SVineet Gupta 	: "r" (s), "m" (*(volatile u16 __force *)addr)
1511162b070SVineet Gupta 	: "memory");
1521162b070SVineet Gupta 
1531162b070SVineet Gupta }
1541162b070SVineet Gupta 
1551162b070SVineet Gupta #define __raw_writel __raw_writel
1561162b070SVineet Gupta static inline void __raw_writel(u32 w, volatile void __iomem *addr)
1571162b070SVineet Gupta {
1581162b070SVineet Gupta 	__asm__ __volatile__(
1591162b070SVineet Gupta 	"	st%U1 %0, %1	\n"
1601162b070SVineet Gupta 	:
1611162b070SVineet Gupta 	: "r" (w), "m" (*(volatile u32 __force *)addr)
1621162b070SVineet Gupta 	: "memory");
1631162b070SVineet Gupta 
1641162b070SVineet Gupta }
1651162b070SVineet Gupta 
166*10d44343SJose Abreu #define __raw_writesx(t,f)						\
167*10d44343SJose Abreu static inline void __raw_writes##f(volatile void __iomem *addr, 	\
168*10d44343SJose Abreu 				   const void *ptr, unsigned int count)	\
169*10d44343SJose Abreu {									\
170*10d44343SJose Abreu 	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
171*10d44343SJose Abreu 	const u##t *buf = ptr;						\
172*10d44343SJose Abreu 									\
173*10d44343SJose Abreu 	if (!count)							\
174*10d44343SJose Abreu 		return;							\
175*10d44343SJose Abreu 									\
176*10d44343SJose Abreu 	/* Some ARC CPU's don't support unaligned accesses */		\
177*10d44343SJose Abreu 	if (is_aligned) {						\
178*10d44343SJose Abreu 		do {							\
179*10d44343SJose Abreu 			__raw_write##f(*buf++, addr);			\
180*10d44343SJose Abreu 		} while (--count);					\
181*10d44343SJose Abreu 	} else {							\
182*10d44343SJose Abreu 		do {							\
183*10d44343SJose Abreu 			__raw_write##f(get_unaligned(buf++), addr);	\
184*10d44343SJose Abreu 		} while (--count);					\
185*10d44343SJose Abreu 	}								\
186*10d44343SJose Abreu }
187*10d44343SJose Abreu 
188*10d44343SJose Abreu #define __raw_writesb __raw_writesb
189*10d44343SJose Abreu __raw_writesx(8, b)
190*10d44343SJose Abreu #define __raw_writesw __raw_writesw
191*10d44343SJose Abreu __raw_writesx(16, w)
192*10d44343SJose Abreu #define __raw_writesl __raw_writesl
193*10d44343SJose Abreu __raw_writesx(32, l)
194*10d44343SJose Abreu 
195b8a03302SVineet Gupta /*
196b8a03302SVineet Gupta  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
197b8a03302SVineet Gupta  * Based on ARM model for the typical use case
198b8a03302SVineet Gupta  *
199b8a03302SVineet Gupta  *	<ST [DMA buffer]>
200b8a03302SVineet Gupta  *	<writel MMIO "go" reg>
201b8a03302SVineet Gupta  *  or:
202b8a03302SVineet Gupta  *	<readl MMIO "status" reg>
203b8a03302SVineet Gupta  *	<LD [DMA buffer]>
204b8a03302SVineet Gupta  *
205b8a03302SVineet Gupta  * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
206b8a03302SVineet Gupta  */
207b8a03302SVineet Gupta #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
208b8a03302SVineet Gupta #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
209b8a03302SVineet Gupta #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
210*10d44343SJose Abreu #define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
211*10d44343SJose Abreu #define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
212*10d44343SJose Abreu #define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
213b8a03302SVineet Gupta 
214b8a03302SVineet Gupta #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
215b8a03302SVineet Gupta #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
216b8a03302SVineet Gupta #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
217*10d44343SJose Abreu #define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
218*10d44343SJose Abreu #define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
219*10d44343SJose Abreu #define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
220b8a03302SVineet Gupta 
221b8a03302SVineet Gupta /*
222f778cc65SLada Trimasova  * Relaxed API for drivers which can handle barrier ordering themselves
223f778cc65SLada Trimasova  *
224f778cc65SLada Trimasova  * Also these are defined to perform little endian accesses.
225f778cc65SLada Trimasova  * To provide the typical device register semantics of fixed endian,
226f778cc65SLada Trimasova  * swap the byte order for Big Endian
227f778cc65SLada Trimasova  *
228f778cc65SLada Trimasova  * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
229b8a03302SVineet Gupta  */
230b8a03302SVineet Gupta #define readb_relaxed(c)	__raw_readb(c)
231f778cc65SLada Trimasova #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
232f778cc65SLada Trimasova 					__raw_readw(c)); __r; })
233f778cc65SLada Trimasova #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
234f778cc65SLada Trimasova 					__raw_readl(c)); __r; })
235b8a03302SVineet Gupta 
236b8a03302SVineet Gupta #define writeb_relaxed(v,c)	__raw_writeb(v,c)
237f778cc65SLada Trimasova #define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
238f778cc65SLada Trimasova #define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
2396532b02fSMischa Jonker 
2401162b070SVineet Gupta #include <asm-generic/io.h>
2411162b070SVineet Gupta 
2421162b070SVineet Gupta #endif /* _ASM_ARC_IO_H */
243