1 /* 2 * Copyright (C) 2017 Andes Technology Corporation 3 * Rick Chen, Andes Technology Corporation <rick@andestech.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0 6 * 7 */ 8 #ifndef __ASM_RISCV_IO_H 9 #define __ASM_RISCV_IO_H 10 11 #ifdef __KERNEL__ 12 13 #include <linux/types.h> 14 #include <asm/byteorder.h> 15 16 static inline void sync(void) 17 { 18 } 19 20 /* 21 * Given a physical address and a length, return a virtual address 22 * that can be used to access the memory range with the caching 23 * properties specified by "flags". 24 */ 25 #define MAP_NOCACHE (0) 26 #define MAP_WRCOMBINE (0) 27 #define MAP_WRBACK (0) 28 #define MAP_WRTHROUGH (0) 29 30 #ifdef CONFIG_ARCH_MAP_SYSMEM 31 static inline void *map_sysmem(phys_addr_t paddr, unsigned long len) 32 { 33 if (paddr < PHYS_SDRAM_0_SIZE + PHYS_SDRAM_1_SIZE) 34 paddr = paddr | 0x40000000; 35 return (void *)(uintptr_t)paddr; 36 } 37 38 static inline void *unmap_sysmem(const void *vaddr) 39 { 40 phys_addr_t paddr = (phys_addr_t)vaddr; 41 42 paddr = paddr & ~0x40000000; 43 return (void *)(uintptr_t)paddr; 44 } 45 46 static inline phys_addr_t map_to_sysmem(const void *ptr) 47 { 48 return (phys_addr_t)(uintptr_t)ptr; 49 } 50 #endif 51 52 static inline void * 53 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 54 { 55 return (void *)paddr; 56 } 57 58 /* 59 * Take down a mapping set up by map_physmem(). 60 */ 61 static inline void unmap_physmem(void *vaddr, unsigned long flags) 62 { 63 } 64 65 static inline phys_addr_t virt_to_phys(void *vaddr) 66 { 67 return (phys_addr_t)(vaddr); 68 } 69 70 /* 71 * Generic virtual read/write. Note that we don't support half-word 72 * read/writes. We define __arch_*[bl] here, and leave __arch_*w 73 * to the architecture specific code. 74 */ 75 #define __arch_getb(a) (*(unsigned char *)(a)) 76 #define __arch_getw(a) (*(unsigned short *)(a)) 77 #define __arch_getl(a) (*(unsigned int *)(a)) 78 #define __arch_getq(a) (*(unsigned long *)(a)) 79 80 #define __arch_putb(v, a) (*(unsigned char *)(a) = (v)) 81 #define __arch_putw(v, a) (*(unsigned short *)(a) = (v)) 82 #define __arch_putl(v, a) (*(unsigned int *)(a) = (v)) 83 #define __arch_putq(v, a) (*(unsigned long *)(a) = (v)) 84 85 #define __raw_writeb(v, a) __arch_putb(v, a) 86 #define __raw_writew(v, a) __arch_putw(v, a) 87 #define __raw_writel(v, a) __arch_putl(v, a) 88 #define __raw_writeq(v, a) __arch_putq(v, a) 89 90 #define __raw_readb(a) __arch_getb(a) 91 #define __raw_readw(a) __arch_getw(a) 92 #define __raw_readl(a) __arch_getl(a) 93 #define __raw_readq(a) __arch_getq(a) 94 95 /* 96 * TODO: The kernel offers some more advanced versions of barriers, it might 97 * have some advantages to use them instead of the simple one here. 98 */ 99 #define dmb() __asm__ __volatile__ ("" : : : "memory") 100 #define __iormb() dmb() 101 #define __iowmb() dmb() 102 103 static inline void writeb(u8 val, volatile void __iomem *addr) 104 { 105 __iowmb(); 106 __arch_putb(val, addr); 107 } 108 109 static inline void writew(u16 val, volatile void __iomem *addr) 110 { 111 __iowmb(); 112 __arch_putw(val, addr); 113 } 114 115 static inline void writel(u32 val, volatile void __iomem *addr) 116 { 117 __iowmb(); 118 __arch_putl(val, addr); 119 } 120 121 static inline void writeq(u64 val, volatile void __iomem *addr) 122 { 123 __iowmb(); 124 __arch_putq(val, addr); 125 } 126 127 static inline u8 readb(const volatile void __iomem *addr) 128 { 129 u8 val; 130 131 val = __arch_getb(addr); 132 __iormb(); 133 return val; 134 } 135 136 static inline u16 readw(const volatile void __iomem *addr) 137 { 138 u16 val; 139 140 val = __arch_getw(addr); 141 __iormb(); 142 return val; 143 } 144 145 static inline u32 readl(const volatile void __iomem *addr) 146 { 147 u32 val; 148 149 val = __arch_getl(addr); 150 __iormb(); 151 return val; 152 } 153 154 static inline u64 readq(const volatile void __iomem *addr) 155 { 156 u32 val; 157 158 val = __arch_getq(addr); 159 __iormb(); 160 return val; 161 } 162 163 /* 164 * The compiler seems to be incapable of optimising constants 165 * properly. Spell it out to the compiler in some cases. 166 * These are only valid for small values of "off" (< 1<<12) 167 */ 168 #define __raw_base_writeb(val, base, off) __arch_base_putb(val, base, off) 169 #define __raw_base_writew(val, base, off) __arch_base_putw(val, base, off) 170 #define __raw_base_writel(val, base, off) __arch_base_putl(val, base, off) 171 172 #define __raw_base_readb(base, off) __arch_base_getb(base, off) 173 #define __raw_base_readw(base, off) __arch_base_getw(base, off) 174 #define __raw_base_readl(base, off) __arch_base_getl(base, off) 175 176 #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) 177 #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) 178 179 #define out_le32(a, v) out_arch(l, le32, a, v) 180 #define out_le16(a, v) out_arch(w, le16, a, v) 181 182 #define in_le32(a) in_arch(l, le32, a) 183 #define in_le16(a) in_arch(w, le16, a) 184 185 #define out_be32(a, v) out_arch(l, be32, a, v) 186 #define out_be16(a, v) out_arch(w, be16, a, v) 187 188 #define in_be32(a) in_arch(l, be32, a) 189 #define in_be16(a) in_arch(w, be16, a) 190 191 #define out_8(a, v) __raw_writeb(v, a) 192 #define in_8(a) __raw_readb(a) 193 194 /* 195 * Clear and set bits in one shot. These macros can be used to clear and 196 * set multiple bits in a register using a single call. These macros can 197 * also be used to set a multiple-bit bit pattern using a mask, by 198 * specifying the mask in the 'clear' parameter and the new bit pattern 199 * in the 'set' parameter. 200 */ 201 202 #define clrbits(type, addr, clear) \ 203 out_##type((addr), in_##type(addr) & ~(clear)) 204 205 #define setbits(type, addr, set) \ 206 out_##type((addr), in_##type(addr) | (set)) 207 208 #define clrsetbits(type, addr, clear, set) \ 209 out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) 210 211 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear) 212 #define setbits_be32(addr, set) setbits(be32, addr, set) 213 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) 214 215 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear) 216 #define setbits_le32(addr, set) setbits(le32, addr, set) 217 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) 218 219 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear) 220 #define setbits_be16(addr, set) setbits(be16, addr, set) 221 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) 222 223 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear) 224 #define setbits_le16(addr, set) setbits(le16, addr, set) 225 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) 226 227 #define clrbits_8(addr, clear) clrbits(8, addr, clear) 228 #define setbits_8(addr, set) setbits(8, addr, set) 229 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) 230 231 /* 232 * Now, pick up the machine-defined IO definitions 233 * #include <asm/arch/io.h> 234 */ 235 236 /* 237 * IO port access primitives 238 * ------------------------- 239 * 240 * The NDS32 doesn't have special IO access instructions just like ARM; 241 * all IO is memory mapped. 242 * Note that these are defined to perform little endian accesses 243 * only. Their primary purpose is to access PCI and ISA peripherals. 244 * 245 * Note that for a big endian machine, this implies that the following 246 * big endian mode connectivity is in place, as described by numerious 247 * ARM documents: 248 * 249 * PCI: D0-D7 D8-D15 D16-D23 D24-D31 250 * ARM: D24-D31 D16-D23 D8-D15 D0-D7 251 * 252 * The machine specific io.h include defines __io to translate an "IO" 253 * address to a memory address. 254 * 255 * Note that we prevent GCC re-ordering or caching values in expressions 256 * by introducing sequence points into the in*() definitions. Note that 257 * __raw_* do not guarantee this behaviour. 258 * 259 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. 260 */ 261 #ifdef __io 262 #define outb(v, p) __raw_writeb(v, __io(p)) 263 #define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p)) 264 #define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p)) 265 266 #define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) 267 #define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) 268 #define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) 269 270 #define outsb(p, d, l) writesb(__io(p), d, l) 271 #define outsw(p, d, l) writesw(__io(p), d, l) 272 #define outsl(p, d, l) writesl(__io(p), d, l) 273 274 #define insb(p, d, l) readsb(__io(p), d, l) 275 #define insw(p, d, l) readsw(__io(p), d, l) 276 #define insl(p, d, l) readsl(__io(p), d, l) 277 278 static inline void readsb(unsigned int *addr, void *data, int bytelen) 279 { 280 unsigned char *ptr; 281 unsigned char *ptr2; 282 283 ptr = (unsigned char *)addr; 284 ptr2 = (unsigned char *)data; 285 286 while (bytelen) { 287 *ptr2 = *ptr; 288 ptr2++; 289 bytelen--; 290 } 291 } 292 293 static inline void readsw(unsigned int *addr, void *data, int wordlen) 294 { 295 unsigned short *ptr; 296 unsigned short *ptr2; 297 298 ptr = (unsigned short *)addr; 299 ptr2 = (unsigned short *)data; 300 301 while (wordlen) { 302 *ptr2 = *ptr; 303 ptr2++; 304 wordlen--; 305 } 306 } 307 308 static inline void readsl(unsigned int *addr, void *data, int longlen) 309 { 310 unsigned int *ptr; 311 unsigned int *ptr2; 312 313 ptr = (unsigned int *)addr; 314 ptr2 = (unsigned int *)data; 315 316 while (longlen) { 317 *ptr2 = *ptr; 318 ptr2++; 319 longlen--; 320 } 321 } 322 323 static inline void writesb(unsigned int *addr, const void *data, int bytelen) 324 { 325 unsigned char *ptr; 326 unsigned char *ptr2; 327 328 ptr = (unsigned char *)addr; 329 ptr2 = (unsigned char *)data; 330 331 while (bytelen) { 332 *ptr = *ptr2; 333 ptr2++; 334 bytelen--; 335 } 336 } 337 338 static inline void writesw(unsigned int *addr, const void *data, int wordlen) 339 { 340 unsigned short *ptr; 341 unsigned short *ptr2; 342 343 ptr = (unsigned short *)addr; 344 ptr2 = (unsigned short *)data; 345 346 while (wordlen) { 347 *ptr = *ptr2; 348 ptr2++; 349 wordlen--; 350 } 351 } 352 353 static inline void writesl(unsigned int *addr, const void *data, int longlen) 354 { 355 unsigned int *ptr; 356 unsigned int *ptr2; 357 358 ptr = (unsigned int *)addr; 359 ptr2 = (unsigned int *)data; 360 361 while (longlen) { 362 *ptr = *ptr2; 363 ptr2++; 364 longlen--; 365 } 366 } 367 #endif 368 369 #define outb_p(val, port) outb((val), (port)) 370 #define outw_p(val, port) outw((val), (port)) 371 #define outl_p(val, port) outl((val), (port)) 372 #define inb_p(port) inb((port)) 373 #define inw_p(port) inw((port)) 374 #define inl_p(port) inl((port)) 375 376 #define outsb_p(port, from, len) outsb(port, from, len) 377 #define outsw_p(port, from, len) outsw(port, from, len) 378 #define outsl_p(port, from, len) outsl(port, from, len) 379 #define insb_p(port, to, len) insb(port, to, len) 380 #define insw_p(port, to, len) insw(port, to, len) 381 #define insl_p(port, to, len) insl(port, to, len) 382 383 /* 384 * DMA-consistent mapping functions. These allocate/free a region of 385 * uncached, unwrite-buffered mapped memory space for use with DMA 386 * devices. This is the "generic" version. The PCI specific version 387 * is in pci.h 388 */ 389 390 /* 391 * String version of IO memory access ops: 392 */ 393 394 /* 395 * If this architecture has PCI memory IO, then define the read/write 396 * macros. These should only be used with the cookie passed from 397 * ioremap. 398 */ 399 #ifdef __mem_pci 400 401 #define readb(c) ({ unsigned int __v = \ 402 __raw_readb(__mem_pci(c)); __v; }) 403 #define readw(c) ({ unsigned int __v = \ 404 le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) 405 #define readl(c) ({ unsigned int __v = \ 406 le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) 407 408 #define writeb(v, c) __raw_writeb(v, __mem_pci(c)) 409 #define writew(v, c) __raw_writew(cpu_to_le16(v), __mem_pci(c)) 410 #define writel(v, c) __raw_writel(cpu_to_le32(v), __mem_pci(c)) 411 412 #define memset_io(c, v, l) _memset_io(__mem_pci(c), (v), (l)) 413 #define memcpy_fromio(a, c, l) _memcpy_fromio((a), __mem_pci(c), (l)) 414 #define memcpy_toio(c, a, l) _memcpy_toio(__mem_pci(c), (a), (l)) 415 416 #define eth_io_copy_and_sum(s, c, l, b) \ 417 eth_copy_and_sum((s), __mem_pci(c), (l), (b)) 418 419 static inline int check_signature(ulong io_addr, const uchar *s, int len) 420 { 421 int retval = 0; 422 423 do { 424 if (readb(io_addr) != *s) 425 goto out; 426 io_addr++; 427 s++; 428 len--; 429 } while (len); 430 retval = 1; 431 out: 432 return retval; 433 } 434 #endif /* __mem_pci */ 435 436 /* 437 * If this architecture has ISA IO, then define the isa_read/isa_write 438 * macros. 439 */ 440 #ifdef __mem_isa 441 442 #define isa_readb(addr) __raw_readb(__mem_isa(addr)) 443 #define isa_readw(addr) __raw_readw(__mem_isa(addr)) 444 #define isa_readl(addr) __raw_readl(__mem_isa(addr)) 445 #define isa_writeb(val, addr) __raw_writeb(val, __mem_isa(addr)) 446 #define isa_writew(val, addr) __raw_writew(val, __mem_isa(addr)) 447 #define isa_writel(val, addr) __raw_writel(val, __mem_isa(addr)) 448 #define isa_memset_io(a, b, c) _memset_io(__mem_isa(a), (b), (c)) 449 #define isa_memcpy_fromio(a, b, c) _memcpy_fromio((a), __mem_isa(b), (c)) 450 #define isa_memcpy_toio(a, b, c) _memcpy_toio(__mem_isa((a)), (b), (c)) 451 452 #define isa_eth_io_copy_and_sum(a, b, c, d) \ 453 eth_copy_and_sum((a), __mem_isa(b), (c), (d)) 454 455 static inline int 456 isa_check_signature(ulong io_addr, const uchar *s, int len) 457 { 458 int retval = 0; 459 460 do { 461 if (isa_readb(io_addr) != *s) 462 goto out; 463 io_addr++; 464 s++; 465 len--; 466 } while (len); 467 retval = 1; 468 out: 469 return retval; 470 } 471 472 #else /* __mem_isa */ 473 474 #define isa_readb(addr) (__readwrite_bug("isa_readb"), 0) 475 #define isa_readw(addr) (__readwrite_bug("isa_readw"), 0) 476 #define isa_readl(addr) (__readwrite_bug("isa_readl"), 0) 477 #define isa_writeb(val, addr) __readwrite_bug("isa_writeb") 478 #define isa_writew(val, addr) __readwrite_bug("isa_writew") 479 #define isa_writel(val, addr) __readwrite_bug("isa_writel") 480 #define isa_memset_io(a, b, c) __readwrite_bug("isa_memset_io") 481 #define isa_memcpy_fromio(a, b, c) __readwrite_bug("isa_memcpy_fromio") 482 #define isa_memcpy_toio(a, b, c) __readwrite_bug("isa_memcpy_toio") 483 484 #define isa_eth_io_copy_and_sum(a, b, c, d) \ 485 __readwrite_bug("isa_eth_io_copy_and_sum") 486 487 #define isa_check_signature(io, sig, len) (0) 488 489 #endif /* __mem_isa */ 490 #endif /* __KERNEL__ */ 491 #endif /* __ASM_RISCV_IO_H */ 492