xref: /openbmc/linux/arch/arc/include/asm/io.h (revision f5db19e93f680160a0fb3e2b05ceb4832b24d486)
11162b070SVineet Gupta /*
21162b070SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
31162b070SVineet Gupta  *
41162b070SVineet Gupta  * This program is free software; you can redistribute it and/or modify
51162b070SVineet Gupta  * it under the terms of the GNU General Public License version 2 as
61162b070SVineet Gupta  * published by the Free Software Foundation.
71162b070SVineet Gupta  */
81162b070SVineet Gupta 
91162b070SVineet Gupta #ifndef _ASM_ARC_IO_H
101162b070SVineet Gupta #define _ASM_ARC_IO_H
111162b070SVineet Gupta 
121162b070SVineet Gupta #include <linux/types.h>
131162b070SVineet Gupta #include <asm/byteorder.h>
141162b070SVineet Gupta #include <asm/page.h>
151162b070SVineet Gupta 
16*f5db19e9SVineet Gupta extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
17*f5db19e9SVineet Gupta extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
184368902bSGilad Ben-Yossef 				  unsigned long flags);
191162b070SVineet Gupta extern void iounmap(const void __iomem *addr);
201162b070SVineet Gupta 
211162b070SVineet Gupta #define ioremap_nocache(phy, sz)	ioremap(phy, sz)
221162b070SVineet Gupta #define ioremap_wc(phy, sz)		ioremap(phy, sz)
23556269c1SToshi Kani #define ioremap_wt(phy, sz)		ioremap(phy, sz)
241162b070SVineet Gupta 
251162b070SVineet Gupta /* Change struct page to physical address */
261162b070SVineet Gupta #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
271162b070SVineet Gupta 
281162b070SVineet Gupta #define __raw_readb __raw_readb
291162b070SVineet Gupta static inline u8 __raw_readb(const volatile void __iomem *addr)
301162b070SVineet Gupta {
311162b070SVineet Gupta 	u8 b;
321162b070SVineet Gupta 
331162b070SVineet Gupta 	__asm__ __volatile__(
341162b070SVineet Gupta 	"	ldb%U1 %0, %1	\n"
351162b070SVineet Gupta 	: "=r" (b)
361162b070SVineet Gupta 	: "m" (*(volatile u8 __force *)addr)
371162b070SVineet Gupta 	: "memory");
381162b070SVineet Gupta 
391162b070SVineet Gupta 	return b;
401162b070SVineet Gupta }
411162b070SVineet Gupta 
421162b070SVineet Gupta #define __raw_readw __raw_readw
431162b070SVineet Gupta static inline u16 __raw_readw(const volatile void __iomem *addr)
441162b070SVineet Gupta {
451162b070SVineet Gupta 	u16 s;
461162b070SVineet Gupta 
471162b070SVineet Gupta 	__asm__ __volatile__(
481162b070SVineet Gupta 	"	ldw%U1 %0, %1	\n"
491162b070SVineet Gupta 	: "=r" (s)
501162b070SVineet Gupta 	: "m" (*(volatile u16 __force *)addr)
511162b070SVineet Gupta 	: "memory");
521162b070SVineet Gupta 
531162b070SVineet Gupta 	return s;
541162b070SVineet Gupta }
551162b070SVineet Gupta 
561162b070SVineet Gupta #define __raw_readl __raw_readl
571162b070SVineet Gupta static inline u32 __raw_readl(const volatile void __iomem *addr)
581162b070SVineet Gupta {
591162b070SVineet Gupta 	u32 w;
601162b070SVineet Gupta 
611162b070SVineet Gupta 	__asm__ __volatile__(
621162b070SVineet Gupta 	"	ld%U1 %0, %1	\n"
631162b070SVineet Gupta 	: "=r" (w)
641162b070SVineet Gupta 	: "m" (*(volatile u32 __force *)addr)
651162b070SVineet Gupta 	: "memory");
661162b070SVineet Gupta 
671162b070SVineet Gupta 	return w;
681162b070SVineet Gupta }
691162b070SVineet Gupta 
701162b070SVineet Gupta #define __raw_writeb __raw_writeb
711162b070SVineet Gupta static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
721162b070SVineet Gupta {
731162b070SVineet Gupta 	__asm__ __volatile__(
741162b070SVineet Gupta 	"	stb%U1 %0, %1	\n"
751162b070SVineet Gupta 	:
761162b070SVineet Gupta 	: "r" (b), "m" (*(volatile u8 __force *)addr)
771162b070SVineet Gupta 	: "memory");
781162b070SVineet Gupta }
791162b070SVineet Gupta 
801162b070SVineet Gupta #define __raw_writew __raw_writew
811162b070SVineet Gupta static inline void __raw_writew(u16 s, volatile void __iomem *addr)
821162b070SVineet Gupta {
831162b070SVineet Gupta 	__asm__ __volatile__(
841162b070SVineet Gupta 	"	stw%U1 %0, %1	\n"
851162b070SVineet Gupta 	:
861162b070SVineet Gupta 	: "r" (s), "m" (*(volatile u16 __force *)addr)
871162b070SVineet Gupta 	: "memory");
881162b070SVineet Gupta 
891162b070SVineet Gupta }
901162b070SVineet Gupta 
911162b070SVineet Gupta #define __raw_writel __raw_writel
921162b070SVineet Gupta static inline void __raw_writel(u32 w, volatile void __iomem *addr)
931162b070SVineet Gupta {
941162b070SVineet Gupta 	__asm__ __volatile__(
951162b070SVineet Gupta 	"	st%U1 %0, %1	\n"
961162b070SVineet Gupta 	:
971162b070SVineet Gupta 	: "r" (w), "m" (*(volatile u32 __force *)addr)
981162b070SVineet Gupta 	: "memory");
991162b070SVineet Gupta 
1001162b070SVineet Gupta }
1011162b070SVineet Gupta 
102b8a03302SVineet Gupta #ifdef CONFIG_ISA_ARCV2
103b8a03302SVineet Gupta #include <asm/barrier.h>
104b8a03302SVineet Gupta #define __iormb()		rmb()
105b8a03302SVineet Gupta #define __iowmb()		wmb()
106b8a03302SVineet Gupta #else
107b8a03302SVineet Gupta #define __iormb()		do { } while (0)
108b8a03302SVineet Gupta #define __iowmb()		do { } while (0)
109b8a03302SVineet Gupta #endif
110b8a03302SVineet Gupta 
111b8a03302SVineet Gupta /*
112b8a03302SVineet Gupta  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
113b8a03302SVineet Gupta  * Based on ARM model for the typical use case
114b8a03302SVineet Gupta  *
115b8a03302SVineet Gupta  *	<ST [DMA buffer]>
116b8a03302SVineet Gupta  *	<writel MMIO "go" reg>
117b8a03302SVineet Gupta  *  or:
118b8a03302SVineet Gupta  *	<readl MMIO "status" reg>
119b8a03302SVineet Gupta  *	<LD [DMA buffer]>
120b8a03302SVineet Gupta  *
121b8a03302SVineet Gupta  * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
122b8a03302SVineet Gupta  */
123b8a03302SVineet Gupta #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
124b8a03302SVineet Gupta #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
125b8a03302SVineet Gupta #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
126b8a03302SVineet Gupta 
127b8a03302SVineet Gupta #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
128b8a03302SVineet Gupta #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
129b8a03302SVineet Gupta #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
130b8a03302SVineet Gupta 
131b8a03302SVineet Gupta /*
132f778cc65SLada Trimasova  * Relaxed API for drivers which can handle barrier ordering themselves
133f778cc65SLada Trimasova  *
134f778cc65SLada Trimasova  * Also these are defined to perform little endian accesses.
135f778cc65SLada Trimasova  * To provide the typical device register semantics of fixed endian,
136f778cc65SLada Trimasova  * swap the byte order for Big Endian
137f778cc65SLada Trimasova  *
138f778cc65SLada Trimasova  * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
139b8a03302SVineet Gupta  */
140b8a03302SVineet Gupta #define readb_relaxed(c)	__raw_readb(c)
141f778cc65SLada Trimasova #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
142f778cc65SLada Trimasova 					__raw_readw(c)); __r; })
143f778cc65SLada Trimasova #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
144f778cc65SLada Trimasova 					__raw_readl(c)); __r; })
145b8a03302SVineet Gupta 
146b8a03302SVineet Gupta #define writeb_relaxed(v,c)	__raw_writeb(v,c)
147f778cc65SLada Trimasova #define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
148f778cc65SLada Trimasova #define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
1496532b02fSMischa Jonker 
1501162b070SVineet Gupta #include <asm-generic/io.h>
1511162b070SVineet Gupta 
1521162b070SVineet Gupta #endif /* _ASM_ARC_IO_H */
153