1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_IO_H 3 #define _ASM_IO_H 4 5 #include <linux/types.h> 6 #include <asm/pgtable.h> 7 8 #define virt_to_phys(a) ((unsigned long)__pa(a)) 9 #define phys_to_virt(a) __va(a) 10 #define virt_to_bus virt_to_phys 11 #define bus_to_virt phys_to_virt 12 13 static inline unsigned long isa_bus_to_virt(unsigned long addr) { 14 BUG(); 15 return 0; 16 } 17 18 static inline unsigned long isa_virt_to_bus(void *addr) { 19 BUG(); 20 return 0; 21 } 22 23 /* 24 * Memory mapped I/O 25 * 26 * readX()/writeX() do byteswapping and take an ioremapped address 27 * __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address. 28 * gsc_*() don't byteswap and operate on physical addresses; 29 * eg dev->hpa or 0xfee00000. 30 */ 31 32 static inline unsigned char gsc_readb(unsigned long addr) 33 { 34 long flags; 35 unsigned char ret; 36 37 __asm__ __volatile__( 38 " rsm %3,%0\n" 39 " ldbx 0(%2),%1\n" 40 " mtsm %0\n" 41 : "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) ); 42 43 return ret; 44 } 45 46 static inline unsigned short gsc_readw(unsigned long addr) 47 { 48 long flags; 49 unsigned short ret; 50 51 __asm__ __volatile__( 52 " rsm %3,%0\n" 53 " ldhx 0(%2),%1\n" 54 " mtsm %0\n" 55 : "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) ); 56 57 return ret; 58 } 59 60 static inline unsigned int gsc_readl(unsigned long addr) 61 { 62 u32 ret; 63 64 __asm__ __volatile__( 65 " ldwax 0(%1),%0\n" 66 : "=r" (ret) : "r" (addr) ); 67 68 return ret; 69 } 70 71 static inline unsigned long long gsc_readq(unsigned long addr) 72 { 73 unsigned long long ret; 74 75 #ifdef CONFIG_64BIT 76 __asm__ __volatile__( 77 " ldda 0(%1),%0\n" 78 : "=r" (ret) : "r" (addr) ); 79 #else 80 /* two reads may have side effects.. */ 81 ret = ((u64) gsc_readl(addr)) << 32; 82 ret |= gsc_readl(addr+4); 83 #endif 84 return ret; 85 } 86 87 static inline void gsc_writeb(unsigned char val, unsigned long addr) 88 { 89 long flags; 90 __asm__ __volatile__( 91 " rsm %3,%0\n" 92 " stbs %1,0(%2)\n" 93 " mtsm %0\n" 94 : "=&r" (flags) : "r" (val), "r" (addr), "i" (PSW_SM_D) ); 95 } 96 97 static inline void gsc_writew(unsigned short val, unsigned long addr) 98 { 99 long flags; 100 __asm__ __volatile__( 101 " rsm %3,%0\n" 102 " sths %1,0(%2)\n" 103 " mtsm %0\n" 104 : "=&r" (flags) : "r" (val), "r" (addr), "i" (PSW_SM_D) ); 105 } 106 107 static inline void gsc_writel(unsigned int val, unsigned long addr) 108 { 109 __asm__ __volatile__( 110 " stwas %0,0(%1)\n" 111 : : "r" (val), "r" (addr) ); 112 } 113 114 static inline void gsc_writeq(unsigned long long val, unsigned long addr) 115 { 116 #ifdef CONFIG_64BIT 117 __asm__ __volatile__( 118 " stda %0,0(%1)\n" 119 : : "r" (val), "r" (addr) ); 120 #else 121 /* two writes may have side effects.. */ 122 gsc_writel(val >> 32, addr); 123 gsc_writel(val, addr+4); 124 #endif 125 } 126 127 /* 128 * The standard PCI ioremap interfaces 129 */ 130 131 extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); 132 133 /* Most machines react poorly to I/O-space being cacheable... Instead let's 134 * define ioremap() in terms of ioremap_nocache(). 135 */ 136 static inline void __iomem * ioremap(unsigned long offset, unsigned long size) 137 { 138 return __ioremap(offset, size, _PAGE_NO_CACHE); 139 } 140 #define ioremap_nocache(off, sz) ioremap((off), (sz)) 141 #define ioremap_wc ioremap_nocache 142 #define ioremap_uc ioremap_nocache 143 144 extern void iounmap(const volatile void __iomem *addr); 145 146 static inline unsigned char __raw_readb(const volatile void __iomem *addr) 147 { 148 return (*(volatile unsigned char __force *) (addr)); 149 } 150 static inline unsigned short __raw_readw(const volatile void __iomem *addr) 151 { 152 return *(volatile unsigned short __force *) addr; 153 } 154 static inline unsigned int __raw_readl(const volatile void __iomem *addr) 155 { 156 return *(volatile unsigned int __force *) addr; 157 } 158 static inline unsigned long long __raw_readq(const volatile void __iomem *addr) 159 { 160 return *(volatile unsigned long long __force *) addr; 161 } 162 163 static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr) 164 { 165 *(volatile unsigned char __force *) addr = b; 166 } 167 static inline void __raw_writew(unsigned short b, volatile void __iomem *addr) 168 { 169 *(volatile unsigned short __force *) addr = b; 170 } 171 static inline void __raw_writel(unsigned int b, volatile void __iomem *addr) 172 { 173 *(volatile unsigned int __force *) addr = b; 174 } 175 static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr) 176 { 177 *(volatile unsigned long long __force *) addr = b; 178 } 179 180 static inline unsigned char readb(const volatile void __iomem *addr) 181 { 182 return __raw_readb(addr); 183 } 184 static inline unsigned short readw(const volatile void __iomem *addr) 185 { 186 return le16_to_cpu(__raw_readw(addr)); 187 } 188 static inline unsigned int readl(const volatile void __iomem *addr) 189 { 190 return le32_to_cpu(__raw_readl(addr)); 191 } 192 static inline unsigned long long readq(const volatile void __iomem *addr) 193 { 194 return le64_to_cpu(__raw_readq(addr)); 195 } 196 197 static inline void writeb(unsigned char b, volatile void __iomem *addr) 198 { 199 __raw_writeb(b, addr); 200 } 201 static inline void writew(unsigned short w, volatile void __iomem *addr) 202 { 203 __raw_writew(cpu_to_le16(w), addr); 204 } 205 static inline void writel(unsigned int l, volatile void __iomem *addr) 206 { 207 __raw_writel(cpu_to_le32(l), addr); 208 } 209 static inline void writeq(unsigned long long q, volatile void __iomem *addr) 210 { 211 __raw_writeq(cpu_to_le64(q), addr); 212 } 213 214 #define readb readb 215 #define readw readw 216 #define readl readl 217 #define readq readq 218 #define writeb writeb 219 #define writew writew 220 #define writel writel 221 #define writeq writeq 222 223 #define readb_relaxed(addr) readb(addr) 224 #define readw_relaxed(addr) readw(addr) 225 #define readl_relaxed(addr) readl(addr) 226 #define readq_relaxed(addr) readq(addr) 227 #define writeb_relaxed(b, addr) writeb(b, addr) 228 #define writew_relaxed(w, addr) writew(w, addr) 229 #define writel_relaxed(l, addr) writel(l, addr) 230 #define writeq_relaxed(q, addr) writeq(q, addr) 231 232 #define mmiowb() do { } while (0) 233 234 void memset_io(volatile void __iomem *addr, unsigned char val, int count); 235 void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); 236 void memcpy_toio(volatile void __iomem *dst, const void *src, int count); 237 238 /* Port-space IO */ 239 240 #define inb_p inb 241 #define inw_p inw 242 #define inl_p inl 243 #define outb_p outb 244 #define outw_p outw 245 #define outl_p outl 246 247 extern unsigned char eisa_in8(unsigned short port); 248 extern unsigned short eisa_in16(unsigned short port); 249 extern unsigned int eisa_in32(unsigned short port); 250 extern void eisa_out8(unsigned char data, unsigned short port); 251 extern void eisa_out16(unsigned short data, unsigned short port); 252 extern void eisa_out32(unsigned int data, unsigned short port); 253 254 #if defined(CONFIG_PCI) 255 extern unsigned char inb(int addr); 256 extern unsigned short inw(int addr); 257 extern unsigned int inl(int addr); 258 259 extern void outb(unsigned char b, int addr); 260 extern void outw(unsigned short b, int addr); 261 extern void outl(unsigned int b, int addr); 262 #elif defined(CONFIG_EISA) 263 #define inb eisa_in8 264 #define inw eisa_in16 265 #define inl eisa_in32 266 #define outb eisa_out8 267 #define outw eisa_out16 268 #define outl eisa_out32 269 #else 270 static inline char inb(unsigned long addr) 271 { 272 BUG(); 273 return -1; 274 } 275 276 static inline short inw(unsigned long addr) 277 { 278 BUG(); 279 return -1; 280 } 281 282 static inline int inl(unsigned long addr) 283 { 284 BUG(); 285 return -1; 286 } 287 288 #define outb(x, y) BUG() 289 #define outw(x, y) BUG() 290 #define outl(x, y) BUG() 291 #endif 292 293 /* 294 * String versions of in/out ops: 295 */ 296 extern void insb (unsigned long port, void *dst, unsigned long count); 297 extern void insw (unsigned long port, void *dst, unsigned long count); 298 extern void insl (unsigned long port, void *dst, unsigned long count); 299 extern void outsb (unsigned long port, const void *src, unsigned long count); 300 extern void outsw (unsigned long port, const void *src, unsigned long count); 301 extern void outsl (unsigned long port, const void *src, unsigned long count); 302 303 304 /* IO Port space is : BBiiii where BB is HBA number. */ 305 #define IO_SPACE_LIMIT 0x00ffffff 306 307 /* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32 308 * bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit 309 * mode (essentially just sign extending. This macro takes in a 32 310 * bit I/O address (still with the leading f) and outputs the correct 311 * value for either 32 or 64 bit mode */ 312 #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) 313 314 #include <asm-generic/iomap.h> 315 316 /* 317 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 318 * access 319 */ 320 #define xlate_dev_mem_ptr(p) __va(p) 321 322 /* 323 * Convert a virtual cached pointer to an uncached pointer 324 */ 325 #define xlate_dev_kmem_ptr(p) p 326 327 #endif 328