xref: /openbmc/linux/arch/riscv/include/asm/io.h (revision c4c3c32d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
4  *   which was based on arch/arm/include/io.h
5  *
6  * Copyright (C) 1996-2000 Russell King
7  * Copyright (C) 2012 ARM Ltd.
8  * Copyright (C) 2014 Regents of the University of California
9  */
10 
11 #ifndef _ASM_RISCV_IO_H
12 #define _ASM_RISCV_IO_H
13 
14 #include <linux/types.h>
15 #include <linux/pgtable.h>
16 #include <asm/mmiowb.h>
17 #include <asm/early_ioremap.h>
18 
19 /*
20  * MMIO access functions are separated out to break dependency cycles
21  * when using {read,write}* fns in low-level headers
22  */
23 #include <asm/mmio.h>
24 
25 /*
26  *  I/O port access constants.
27  */
28 #ifdef CONFIG_MMU
29 #define IO_SPACE_LIMIT		(PCI_IO_SIZE - 1)
30 #define PCI_IOBASE		((void __iomem *)PCI_IO_START)
31 #endif /* CONFIG_MMU */
32 
33 /*
34  * Emulation routines for the port-mapped IO space used by some PCI drivers.
35  * These are defined as being "fully synchronous", but also "not guaranteed to
36  * be fully ordered with respect to other memory and I/O operations".  We're
37  * going to be on the safe side here and just make them:
38  *  - Fully ordered WRT each other, by bracketing them with two fences.  The
39  *    outer set contains both I/O so inX is ordered with outX, while the inner just
40  *    needs the type of the access (I for inX and O for outX).
41  *  - Ordered in the same manner as readX/writeX WRT memory by subsuming their
42  *    fences.
43  *  - Ordered WRT timer reads, so udelay and friends don't get elided by the
44  *    implementation.
45  * Note that there is no way to actually enforce that outX is a non-posted
46  * operation on RISC-V, but hopefully the timer ordering constraint is
47  * sufficient to ensure this works sanely on controllers that support I/O
48  * writes.
49  */
50 #define __io_pbr()	__asm__ __volatile__ ("fence io,i"  : : : "memory");
51 #define __io_par(v)	__asm__ __volatile__ ("fence i,ior" : : : "memory");
52 #define __io_pbw()	__asm__ __volatile__ ("fence iow,o" : : : "memory");
53 #define __io_paw()	__asm__ __volatile__ ("fence o,io"  : : : "memory");
54 
55 /*
56  * Accesses from a single hart to a single I/O address must be ordered.  This
57  * allows us to use the raw read macros, but we still need to fence before and
58  * after the block to ensure ordering WRT other macros.  These are defined to
59  * perform host-endian accesses so we use __raw instead of __cpu.
60  */
61 #define __io_reads_ins(port, ctype, len, bfence, afence)			\
62 	static inline void __ ## port ## len(const volatile void __iomem *addr,	\
63 					     void *buffer,			\
64 					     unsigned int count)		\
65 	{									\
66 		bfence;								\
67 		if (count) {							\
68 			ctype *buf = buffer;					\
69 										\
70 			do {							\
71 				ctype x = __raw_read ## len(addr);		\
72 				*buf++ = x;					\
73 			} while (--count);					\
74 		}								\
75 		afence;								\
76 	}
77 
78 #define __io_writes_outs(port, ctype, len, bfence, afence)			\
79 	static inline void __ ## port ## len(volatile void __iomem *addr,	\
80 					     const void *buffer,		\
81 					     unsigned int count)		\
82 	{									\
83 		bfence;								\
84 		if (count) {							\
85 			const ctype *buf = buffer;				\
86 										\
87 			do {							\
88 				__raw_write ## len(*buf++, addr);		\
89 			} while (--count);					\
90 		}								\
91 		afence;								\
92 	}
93 
94 __io_reads_ins(reads,  u8, b, __io_br(), __io_ar(addr))
95 __io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
96 __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
97 #define readsb(addr, buffer, count) __readsb(addr, buffer, count)
98 #define readsw(addr, buffer, count) __readsw(addr, buffer, count)
99 #define readsl(addr, buffer, count) __readsl(addr, buffer, count)
100 
101 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par(addr))
102 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
103 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
104 #define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
105 #define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
106 #define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
107 
108 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
109 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
110 __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
111 #define writesb(addr, buffer, count) __writesb(addr, buffer, count)
112 #define writesw(addr, buffer, count) __writesw(addr, buffer, count)
113 #define writesl(addr, buffer, count) __writesl(addr, buffer, count)
114 
115 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
116 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
117 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
118 #define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
119 #define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
120 #define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
121 
122 #ifdef CONFIG_64BIT
123 __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
124 #define readsq(addr, buffer, count) __readsq(addr, buffer, count)
125 
126 __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
127 #define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
128 
129 __io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
130 #define writesq(addr, buffer, count) __writesq(addr, buffer, count)
131 
132 __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
133 #define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
134 #endif
135 
136 #include <asm-generic/io.h>
137 
138 #ifdef CONFIG_MMU
139 #define arch_memremap_wb(addr, size)	\
140 	((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
141 #endif
142 
143 #endif /* _ASM_RISCV_IO_H */
144