1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_SH_IO_H 3 #define __ASM_SH_IO_H 4 5 /* 6 * Convention: 7 * read{b,w,l,q}/write{b,w,l,q} are for PCI, 8 * while in{b,w,l}/out{b,w,l} are for ISA 9 * 10 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p 11 * and 'string' versions: ins{b,w,l}/outs{b,w,l} 12 * 13 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers 14 * automatically, there are also __raw versions, which do not. 15 */ 16 #include <linux/errno.h> 17 #include <asm/cache.h> 18 #include <asm/addrspace.h> 19 #include <asm/machvec.h> 20 #include <asm/page.h> 21 #include <linux/pgtable.h> 22 #include <asm-generic/iomap.h> 23 24 #ifdef __KERNEL__ 25 #define __IO_PREFIX generic 26 #include <asm/io_generic.h> 27 #include <asm/io_trapped.h> 28 #include <asm-generic/pci_iomap.h> 29 #include <mach/mangle-port.h> 30 31 #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) 32 #define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) 33 #define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) 34 #define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v)) 35 36 #define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a)) 37 #define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a)) 38 #define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) 39 #define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) 40 41 #define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; }) 42 #define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; }) 43 #define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; }) 44 #define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; }) 45 46 #define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c)) 47 #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c)) 48 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c)) 49 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c)) 50 51 #define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; }) 52 #define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; }) 53 #define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; }) 54 #define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; }) 55 56 #define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); }) 57 #define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); }) 58 #define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); }) 59 #define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); }) 60 61 #define readsb(p,d,l) __raw_readsb(p,d,l) 62 #define readsw(p,d,l) __raw_readsw(p,d,l) 63 #define readsl(p,d,l) __raw_readsl(p,d,l) 64 65 #define writesb(p,d,l) __raw_writesb(p,d,l) 66 #define writesw(p,d,l) __raw_writesw(p,d,l) 67 #define writesl(p,d,l) __raw_writesl(p,d,l) 68 69 #define __BUILD_UNCACHED_IO(bwlq, type) \ 70 static inline type read##bwlq##_uncached(unsigned long addr) \ 71 { \ 72 type ret; \ 73 jump_to_uncached(); \ 74 ret = __raw_read##bwlq(addr); \ 75 back_to_cached(); \ 76 return ret; \ 77 } \ 78 \ 79 static inline void write##bwlq##_uncached(type v, unsigned long addr) \ 80 { \ 81 jump_to_uncached(); \ 82 __raw_write##bwlq(v, addr); \ 83 back_to_cached(); \ 84 } 85 86 __BUILD_UNCACHED_IO(b, u8) 87 __BUILD_UNCACHED_IO(w, u16) 88 __BUILD_UNCACHED_IO(l, u32) 89 __BUILD_UNCACHED_IO(q, u64) 90 91 #define __BUILD_MEMORY_STRING(pfx, bwlq, type) \ 92 \ 93 static inline void \ 94 pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \ 95 unsigned int count) \ 96 { \ 97 const volatile type *__addr = addr; \ 98 \ 99 while (count--) { \ 100 __raw_write##bwlq(*__addr, mem); \ 101 __addr++; \ 102 } \ 103 } \ 104 \ 105 static inline void pfx##reads##bwlq(volatile void __iomem *mem, \ 106 void *addr, unsigned int count) \ 107 { \ 108 volatile type *__addr = addr; \ 109 \ 110 while (count--) { \ 111 *__addr = __raw_read##bwlq(mem); \ 112 __addr++; \ 113 } \ 114 } 115 116 __BUILD_MEMORY_STRING(__raw_, b, u8) 117 __BUILD_MEMORY_STRING(__raw_, w, u16) 118 119 void __raw_writesl(void __iomem *addr, const void *data, int longlen); 120 void __raw_readsl(const void __iomem *addr, void *data, int longlen); 121 122 __BUILD_MEMORY_STRING(__raw_, q, u64) 123 124 #ifdef CONFIG_HAS_IOPORT_MAP 125 126 /* 127 * Slowdown I/O port space accesses for antique hardware. 128 */ 129 #undef CONF_SLOWDOWN_IO 130 131 /* 132 * On SuperH I/O ports are memory mapped, so we access them using normal 133 * load/store instructions. sh_io_port_base is the virtual address to 134 * which all ports are being mapped. 135 */ 136 extern unsigned long sh_io_port_base; 137 138 static inline void __set_io_port_base(unsigned long pbase) 139 { 140 *(unsigned long *)&sh_io_port_base = pbase; 141 barrier(); 142 } 143 144 #ifdef CONFIG_GENERIC_IOMAP 145 #define __ioport_map ioport_map 146 #else 147 extern void __iomem *__ioport_map(unsigned long addr, unsigned int size); 148 #endif 149 150 #ifdef CONF_SLOWDOWN_IO 151 #define SLOW_DOWN_IO __raw_readw(sh_io_port_base) 152 #else 153 #define SLOW_DOWN_IO 154 #endif 155 156 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 157 \ 158 static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 159 { \ 160 volatile type *__addr; \ 161 \ 162 __addr = __ioport_map(port, sizeof(type)); \ 163 *__addr = val; \ 164 slow; \ 165 } \ 166 \ 167 static inline type pfx##in##bwlq##p(unsigned long port) \ 168 { \ 169 volatile type *__addr; \ 170 type __val; \ 171 \ 172 __addr = __ioport_map(port, sizeof(type)); \ 173 __val = *__addr; \ 174 slow; \ 175 \ 176 return __val; \ 177 } 178 179 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 180 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 181 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 182 183 #define BUILDIO_IOPORT(bwlq, type) \ 184 __BUILD_IOPORT_PFX(, bwlq, type) 185 186 BUILDIO_IOPORT(b, u8) 187 BUILDIO_IOPORT(w, u16) 188 BUILDIO_IOPORT(l, u32) 189 BUILDIO_IOPORT(q, u64) 190 191 #define __BUILD_IOPORT_STRING(bwlq, type) \ 192 \ 193 static inline void outs##bwlq(unsigned long port, const void *addr, \ 194 unsigned int count) \ 195 { \ 196 const volatile type *__addr = addr; \ 197 \ 198 while (count--) { \ 199 out##bwlq(*__addr, port); \ 200 __addr++; \ 201 } \ 202 } \ 203 \ 204 static inline void ins##bwlq(unsigned long port, void *addr, \ 205 unsigned int count) \ 206 { \ 207 volatile type *__addr = addr; \ 208 \ 209 while (count--) { \ 210 *__addr = in##bwlq(port); \ 211 __addr++; \ 212 } \ 213 } 214 215 __BUILD_IOPORT_STRING(b, u8) 216 __BUILD_IOPORT_STRING(w, u16) 217 __BUILD_IOPORT_STRING(l, u32) 218 __BUILD_IOPORT_STRING(q, u64) 219 220 #else /* !CONFIG_HAS_IOPORT_MAP */ 221 222 #include <asm/io_noioport.h> 223 224 #endif 225 226 227 #define IO_SPACE_LIMIT 0xffffffff 228 229 /* We really want to try and get these to memcpy etc */ 230 void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); 231 void memcpy_toio(volatile void __iomem *, const void *, unsigned long); 232 void memset_io(volatile void __iomem *, int, unsigned long); 233 234 /* Quad-word real-mode I/O, don't ask.. */ 235 unsigned long long peek_real_address_q(unsigned long long addr); 236 unsigned long long poke_real_address_q(unsigned long long addr, 237 unsigned long long val); 238 239 #if !defined(CONFIG_MMU) 240 #define virt_to_phys(address) ((unsigned long)(address)) 241 #define phys_to_virt(address) ((void *)(address)) 242 #else 243 #define virt_to_phys(address) (__pa(address)) 244 #define phys_to_virt(address) (__va(address)) 245 #endif 246 247 /* 248 * On 32-bit SH, we traditionally have the whole physical address space 249 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do 250 * not need to do anything but place the address in the proper segment. 251 * This is true for P1 and P2 addresses, as well as some P3 ones. 252 * However, most of the P3 addresses and newer cores using extended 253 * addressing need to map through page tables, so the ioremap() 254 * implementation becomes a bit more complicated. 255 * 256 * See arch/sh/mm/ioremap.c for additional notes on this. 257 * 258 * We cheat a bit and always return uncachable areas until we've fixed 259 * the drivers to handle caching properly. 260 * 261 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply 262 * doesn't exist, so everything must go through page tables. 263 */ 264 #ifdef CONFIG_MMU 265 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, 266 pgprot_t prot, void *caller); 267 void iounmap(void __iomem *addr); 268 269 static inline void __iomem * 270 __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) 271 { 272 return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); 273 } 274 275 static inline void __iomem * 276 __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) 277 { 278 #ifdef CONFIG_29BIT 279 phys_addr_t last_addr = offset + size - 1; 280 281 /* 282 * For P1 and P2 space this is trivial, as everything is already 283 * mapped. Uncached access for P1 addresses are done through P2. 284 * In the P3 case or for addresses outside of the 29-bit space, 285 * mapping must be done by the PMB or by using page tables. 286 */ 287 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { 288 u64 flags = pgprot_val(prot); 289 290 /* 291 * Anything using the legacy PTEA space attributes needs 292 * to be kicked down to page table mappings. 293 */ 294 if (unlikely(flags & _PAGE_PCC_MASK)) 295 return NULL; 296 if (unlikely(flags & _PAGE_CACHABLE)) 297 return (void __iomem *)P1SEGADDR(offset); 298 299 return (void __iomem *)P2SEGADDR(offset); 300 } 301 302 /* P4 above the store queues are always mapped. */ 303 if (unlikely(offset >= P3_ADDR_MAX)) 304 return (void __iomem *)P4SEGADDR(offset); 305 #endif 306 307 return NULL; 308 } 309 310 static inline void __iomem * 311 __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) 312 { 313 void __iomem *ret; 314 315 ret = __ioremap_trapped(offset, size); 316 if (ret) 317 return ret; 318 319 ret = __ioremap_29bit(offset, size, prot); 320 if (ret) 321 return ret; 322 323 return __ioremap(offset, size, prot); 324 } 325 #else 326 #define __ioremap(offset, size, prot) ((void __iomem *)(offset)) 327 #define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) 328 static inline void iounmap(void __iomem *addr) {} 329 #endif /* CONFIG_MMU */ 330 331 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) 332 { 333 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); 334 } 335 336 static inline void __iomem * 337 ioremap_cache(phys_addr_t offset, unsigned long size) 338 { 339 return __ioremap_mode(offset, size, PAGE_KERNEL); 340 } 341 #define ioremap_cache ioremap_cache 342 343 #ifdef CONFIG_HAVE_IOREMAP_PROT 344 static inline void __iomem * 345 ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) 346 { 347 return __ioremap_mode(offset, size, __pgprot(flags)); 348 } 349 #endif 350 351 #ifdef CONFIG_IOREMAP_FIXED 352 extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); 353 extern int iounmap_fixed(void __iomem *); 354 extern void ioremap_fixed_init(void); 355 #else 356 static inline void __iomem * 357 ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) 358 { 359 BUG(); 360 return NULL; 361 } 362 363 static inline void ioremap_fixed_init(void) { } 364 static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } 365 #endif 366 367 #define ioremap_uc ioremap 368 369 /* 370 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 371 * access 372 */ 373 #define xlate_dev_mem_ptr(p) __va(p) 374 375 /* 376 * Convert a virtual cached pointer to an uncached pointer 377 */ 378 #define xlate_dev_kmem_ptr(p) p 379 380 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 381 int valid_phys_addr_range(phys_addr_t addr, size_t size); 382 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 383 384 #endif /* __KERNEL__ */ 385 386 #endif /* __ASM_SH_IO_H */ 387