1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ALPHA_IO_H 3 #define __ALPHA_IO_H 4 5 #ifdef __KERNEL__ 6 7 #include <linux/kernel.h> 8 #include <linux/mm.h> 9 #include <asm/compiler.h> 10 #include <asm/pgtable.h> 11 #include <asm/machvec.h> 12 #include <asm/hwrpb.h> 13 14 /* The generic header contains only prototypes. Including it ensures that 15 the implementation we have here matches that interface. */ 16 #include <asm-generic/iomap.h> 17 18 /* We don't use IO slowdowns on the Alpha, but.. */ 19 #define __SLOW_DOWN_IO do { } while (0) 20 #define SLOW_DOWN_IO do { } while (0) 21 22 /* 23 * Virtual -> physical identity mapping starts at this offset 24 */ 25 #ifdef USE_48_BIT_KSEG 26 #define IDENT_ADDR 0xffff800000000000UL 27 #else 28 #define IDENT_ADDR 0xfffffc0000000000UL 29 #endif 30 31 /* 32 * We try to avoid hae updates (thus the cache), but when we 33 * do need to update the hae, we need to do it atomically, so 34 * that any interrupts wouldn't get confused with the hae 35 * register not being up-to-date with respect to the hardware 36 * value. 37 */ 38 extern inline void __set_hae(unsigned long new_hae) 39 { 40 unsigned long flags = swpipl(IPL_MAX); 41 42 barrier(); 43 44 alpha_mv.hae_cache = new_hae; 45 *alpha_mv.hae_register = new_hae; 46 mb(); 47 /* Re-read to make sure it was written. */ 48 new_hae = *alpha_mv.hae_register; 49 50 setipl(flags); 51 barrier(); 52 } 53 54 extern inline void set_hae(unsigned long new_hae) 55 { 56 if (new_hae != alpha_mv.hae_cache) 57 __set_hae(new_hae); 58 } 59 60 /* 61 * Change virtual addresses to physical addresses and vv. 62 */ 63 #ifdef USE_48_BIT_KSEG 64 static inline unsigned long virt_to_phys(void *address) 65 { 66 return (unsigned long)address - IDENT_ADDR; 67 } 68 69 static inline void * phys_to_virt(unsigned long address) 70 { 71 return (void *) (address + IDENT_ADDR); 72 } 73 #else 74 static inline unsigned long virt_to_phys(void *address) 75 { 76 unsigned long phys = (unsigned long)address; 77 78 /* Sign-extend from bit 41. */ 79 phys <<= (64 - 41); 80 phys = (long)phys >> (64 - 41); 81 82 /* Crop to the physical address width of the processor. */ 83 phys &= (1ul << hwrpb->pa_bits) - 1; 84 85 return phys; 86 } 87 88 static inline void * phys_to_virt(unsigned long address) 89 { 90 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); 91 } 92 #endif 93 94 #define page_to_phys(page) page_to_pa(page) 95 96 /* Maximum PIO space address supported? */ 97 #define IO_SPACE_LIMIT 0xffff 98 99 /* 100 * Change addresses as seen by the kernel (virtual) to addresses as 101 * seen by a device (bus), and vice versa. 102 * 103 * Note that this only works for a limited range of kernel addresses, 104 * and very well may not span all memory. Consider this interface 105 * deprecated in favour of the DMA-mapping API. 106 */ 107 extern unsigned long __direct_map_base; 108 extern unsigned long __direct_map_size; 109 110 static inline unsigned long __deprecated virt_to_bus(void *address) 111 { 112 unsigned long phys = virt_to_phys(address); 113 unsigned long bus = phys + __direct_map_base; 114 return phys <= __direct_map_size ? bus : 0; 115 } 116 #define isa_virt_to_bus virt_to_bus 117 118 static inline void * __deprecated bus_to_virt(unsigned long address) 119 { 120 void *virt; 121 122 /* This check is a sanity check but also ensures that bus address 0 123 maps to virtual address 0 which is useful to detect null pointers 124 (the NCR driver is much simpler if NULL pointers are preserved). */ 125 address -= __direct_map_base; 126 virt = phys_to_virt(address); 127 return (long)address <= 0 ? NULL : virt; 128 } 129 #define isa_bus_to_virt bus_to_virt 130 131 /* 132 * There are different chipsets to interface the Alpha CPUs to the world. 133 */ 134 135 #define IO_CONCAT(a,b) _IO_CONCAT(a,b) 136 #define _IO_CONCAT(a,b) a ## _ ## b 137 138 #ifdef CONFIG_ALPHA_GENERIC 139 140 /* In a generic kernel, we always go through the machine vector. */ 141 142 #define REMAP1(TYPE, NAME, QUAL) \ 143 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ 144 { \ 145 return alpha_mv.mv_##NAME(addr); \ 146 } 147 148 #define REMAP2(TYPE, NAME, QUAL) \ 149 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ 150 { \ 151 alpha_mv.mv_##NAME(b, addr); \ 152 } 153 154 REMAP1(unsigned int, ioread8, /**/) 155 REMAP1(unsigned int, ioread16, /**/) 156 REMAP1(unsigned int, ioread32, /**/) 157 REMAP1(u8, readb, const volatile) 158 REMAP1(u16, readw, const volatile) 159 REMAP1(u32, readl, const volatile) 160 REMAP1(u64, readq, const volatile) 161 162 REMAP2(u8, iowrite8, /**/) 163 REMAP2(u16, iowrite16, /**/) 164 REMAP2(u32, iowrite32, /**/) 165 REMAP2(u8, writeb, volatile) 166 REMAP2(u16, writew, volatile) 167 REMAP2(u32, writel, volatile) 168 REMAP2(u64, writeq, volatile) 169 170 #undef REMAP1 171 #undef REMAP2 172 173 extern inline void __iomem *generic_ioportmap(unsigned long a) 174 { 175 return alpha_mv.mv_ioportmap(a); 176 } 177 178 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) 179 { 180 return alpha_mv.mv_ioremap(a, s); 181 } 182 183 static inline void generic_iounmap(volatile void __iomem *a) 184 { 185 return alpha_mv.mv_iounmap(a); 186 } 187 188 static inline int generic_is_ioaddr(unsigned long a) 189 { 190 return alpha_mv.mv_is_ioaddr(a); 191 } 192 193 static inline int generic_is_mmio(const volatile void __iomem *a) 194 { 195 return alpha_mv.mv_is_mmio(a); 196 } 197 198 #define __IO_PREFIX generic 199 #define generic_trivial_rw_bw 0 200 #define generic_trivial_rw_lq 0 201 #define generic_trivial_io_bw 0 202 #define generic_trivial_io_lq 0 203 #define generic_trivial_iounmap 0 204 205 #else 206 207 #if defined(CONFIG_ALPHA_APECS) 208 # include <asm/core_apecs.h> 209 #elif defined(CONFIG_ALPHA_CIA) 210 # include <asm/core_cia.h> 211 #elif defined(CONFIG_ALPHA_IRONGATE) 212 # include <asm/core_irongate.h> 213 #elif defined(CONFIG_ALPHA_JENSEN) 214 # include <asm/jensen.h> 215 #elif defined(CONFIG_ALPHA_LCA) 216 # include <asm/core_lca.h> 217 #elif defined(CONFIG_ALPHA_MARVEL) 218 # include <asm/core_marvel.h> 219 #elif defined(CONFIG_ALPHA_MCPCIA) 220 # include <asm/core_mcpcia.h> 221 #elif defined(CONFIG_ALPHA_POLARIS) 222 # include <asm/core_polaris.h> 223 #elif defined(CONFIG_ALPHA_T2) 224 # include <asm/core_t2.h> 225 #elif defined(CONFIG_ALPHA_TSUNAMI) 226 # include <asm/core_tsunami.h> 227 #elif defined(CONFIG_ALPHA_TITAN) 228 # include <asm/core_titan.h> 229 #elif defined(CONFIG_ALPHA_WILDFIRE) 230 # include <asm/core_wildfire.h> 231 #else 232 #error "What system is this?" 233 #endif 234 235 #endif /* GENERIC */ 236 237 /* 238 * We always have external versions of these routines. 239 */ 240 extern u8 inb(unsigned long port); 241 extern u16 inw(unsigned long port); 242 extern u32 inl(unsigned long port); 243 extern void outb(u8 b, unsigned long port); 244 extern void outw(u16 b, unsigned long port); 245 extern void outl(u32 b, unsigned long port); 246 247 extern u8 readb(const volatile void __iomem *addr); 248 extern u16 readw(const volatile void __iomem *addr); 249 extern u32 readl(const volatile void __iomem *addr); 250 extern u64 readq(const volatile void __iomem *addr); 251 extern void writeb(u8 b, volatile void __iomem *addr); 252 extern void writew(u16 b, volatile void __iomem *addr); 253 extern void writel(u32 b, volatile void __iomem *addr); 254 extern void writeq(u64 b, volatile void __iomem *addr); 255 256 extern u8 __raw_readb(const volatile void __iomem *addr); 257 extern u16 __raw_readw(const volatile void __iomem *addr); 258 extern u32 __raw_readl(const volatile void __iomem *addr); 259 extern u64 __raw_readq(const volatile void __iomem *addr); 260 extern void __raw_writeb(u8 b, volatile void __iomem *addr); 261 extern void __raw_writew(u16 b, volatile void __iomem *addr); 262 extern void __raw_writel(u32 b, volatile void __iomem *addr); 263 extern void __raw_writeq(u64 b, volatile void __iomem *addr); 264 265 /* 266 * Mapping from port numbers to __iomem space is pretty easy. 267 */ 268 269 /* These two have to be extern inline because of the extern prototype from 270 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for 271 the same declaration. */ 272 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) 273 { 274 return IO_CONCAT(__IO_PREFIX,ioportmap) (port); 275 } 276 277 extern inline void ioport_unmap(void __iomem *addr) 278 { 279 } 280 281 static inline void __iomem *ioremap(unsigned long port, unsigned long size) 282 { 283 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); 284 } 285 286 static inline void __iomem * ioremap_nocache(unsigned long offset, 287 unsigned long size) 288 { 289 return ioremap(offset, size); 290 } 291 292 #define ioremap_wc ioremap_nocache 293 #define ioremap_uc ioremap_nocache 294 295 static inline void iounmap(volatile void __iomem *addr) 296 { 297 IO_CONCAT(__IO_PREFIX,iounmap)(addr); 298 } 299 300 static inline int __is_ioaddr(unsigned long addr) 301 { 302 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); 303 } 304 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) 305 306 static inline int __is_mmio(const volatile void __iomem *addr) 307 { 308 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); 309 } 310 311 312 /* 313 * If the actual I/O bits are sufficiently trivial, then expand inline. 314 */ 315 316 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 317 extern inline unsigned int ioread8(void __iomem *addr) 318 { 319 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); 320 mb(); 321 return ret; 322 } 323 324 extern inline unsigned int ioread16(void __iomem *addr) 325 { 326 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); 327 mb(); 328 return ret; 329 } 330 331 extern inline void iowrite8(u8 b, void __iomem *addr) 332 { 333 mb(); 334 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr); 335 } 336 337 extern inline void iowrite16(u16 b, void __iomem *addr) 338 { 339 mb(); 340 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr); 341 } 342 343 extern inline u8 inb(unsigned long port) 344 { 345 return ioread8(ioport_map(port, 1)); 346 } 347 348 extern inline u16 inw(unsigned long port) 349 { 350 return ioread16(ioport_map(port, 2)); 351 } 352 353 extern inline void outb(u8 b, unsigned long port) 354 { 355 iowrite8(b, ioport_map(port, 1)); 356 } 357 358 extern inline void outw(u16 b, unsigned long port) 359 { 360 iowrite16(b, ioport_map(port, 2)); 361 } 362 #endif 363 364 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 365 extern inline unsigned int ioread32(void __iomem *addr) 366 { 367 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); 368 mb(); 369 return ret; 370 } 371 372 extern inline void iowrite32(u32 b, void __iomem *addr) 373 { 374 mb(); 375 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr); 376 } 377 378 extern inline u32 inl(unsigned long port) 379 { 380 return ioread32(ioport_map(port, 4)); 381 } 382 383 extern inline void outl(u32 b, unsigned long port) 384 { 385 iowrite32(b, ioport_map(port, 4)); 386 } 387 #endif 388 389 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 390 extern inline u8 __raw_readb(const volatile void __iomem *addr) 391 { 392 return IO_CONCAT(__IO_PREFIX,readb)(addr); 393 } 394 395 extern inline u16 __raw_readw(const volatile void __iomem *addr) 396 { 397 return IO_CONCAT(__IO_PREFIX,readw)(addr); 398 } 399 400 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) 401 { 402 IO_CONCAT(__IO_PREFIX,writeb)(b, addr); 403 } 404 405 extern inline void __raw_writew(u16 b, volatile void __iomem *addr) 406 { 407 IO_CONCAT(__IO_PREFIX,writew)(b, addr); 408 } 409 410 extern inline u8 readb(const volatile void __iomem *addr) 411 { 412 u8 ret = __raw_readb(addr); 413 mb(); 414 return ret; 415 } 416 417 extern inline u16 readw(const volatile void __iomem *addr) 418 { 419 u16 ret = __raw_readw(addr); 420 mb(); 421 return ret; 422 } 423 424 extern inline void writeb(u8 b, volatile void __iomem *addr) 425 { 426 mb(); 427 __raw_writeb(b, addr); 428 } 429 430 extern inline void writew(u16 b, volatile void __iomem *addr) 431 { 432 mb(); 433 __raw_writew(b, addr); 434 } 435 #endif 436 437 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 438 extern inline u32 __raw_readl(const volatile void __iomem *addr) 439 { 440 return IO_CONCAT(__IO_PREFIX,readl)(addr); 441 } 442 443 extern inline u64 __raw_readq(const volatile void __iomem *addr) 444 { 445 return IO_CONCAT(__IO_PREFIX,readq)(addr); 446 } 447 448 extern inline void __raw_writel(u32 b, volatile void __iomem *addr) 449 { 450 IO_CONCAT(__IO_PREFIX,writel)(b, addr); 451 } 452 453 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) 454 { 455 IO_CONCAT(__IO_PREFIX,writeq)(b, addr); 456 } 457 458 extern inline u32 readl(const volatile void __iomem *addr) 459 { 460 u32 ret = __raw_readl(addr); 461 mb(); 462 return ret; 463 } 464 465 extern inline u64 readq(const volatile void __iomem *addr) 466 { 467 u64 ret = __raw_readq(addr); 468 mb(); 469 return ret; 470 } 471 472 extern inline void writel(u32 b, volatile void __iomem *addr) 473 { 474 mb(); 475 __raw_writel(b, addr); 476 } 477 478 extern inline void writeq(u64 b, volatile void __iomem *addr) 479 { 480 mb(); 481 __raw_writeq(b, addr); 482 } 483 #endif 484 485 #define ioread16be(p) be16_to_cpu(ioread16(p)) 486 #define ioread32be(p) be32_to_cpu(ioread32(p)) 487 #define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) 488 #define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) 489 490 #define inb_p inb 491 #define inw_p inw 492 #define inl_p inl 493 #define outb_p outb 494 #define outw_p outw 495 #define outl_p outl 496 #define readb_relaxed(addr) __raw_readb(addr) 497 #define readw_relaxed(addr) __raw_readw(addr) 498 #define readl_relaxed(addr) __raw_readl(addr) 499 #define readq_relaxed(addr) __raw_readq(addr) 500 #define writeb_relaxed(b, addr) __raw_writeb(b, addr) 501 #define writew_relaxed(b, addr) __raw_writew(b, addr) 502 #define writel_relaxed(b, addr) __raw_writel(b, addr) 503 #define writeq_relaxed(b, addr) __raw_writeq(b, addr) 504 505 /* 506 * String version of IO memory access ops: 507 */ 508 extern void memcpy_fromio(void *, const volatile void __iomem *, long); 509 extern void memcpy_toio(volatile void __iomem *, const void *, long); 510 extern void _memset_c_io(volatile void __iomem *, unsigned long, long); 511 512 static inline void memset_io(volatile void __iomem *addr, u8 c, long len) 513 { 514 _memset_c_io(addr, 0x0101010101010101UL * c, len); 515 } 516 517 #define __HAVE_ARCH_MEMSETW_IO 518 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) 519 { 520 _memset_c_io(addr, 0x0001000100010001UL * c, len); 521 } 522 523 /* 524 * String versions of in/out ops: 525 */ 526 extern void insb (unsigned long port, void *dst, unsigned long count); 527 extern void insw (unsigned long port, void *dst, unsigned long count); 528 extern void insl (unsigned long port, void *dst, unsigned long count); 529 extern void outsb (unsigned long port, const void *src, unsigned long count); 530 extern void outsw (unsigned long port, const void *src, unsigned long count); 531 extern void outsl (unsigned long port, const void *src, unsigned long count); 532 533 /* 534 * The Alpha Jensen hardware for some rather strange reason puts 535 * the RTC clock at 0x170 instead of 0x70. Probably due to some 536 * misguided idea about using 0x70 for NMI stuff. 537 * 538 * These defines will override the defaults when doing RTC queries 539 */ 540 541 #ifdef CONFIG_ALPHA_GENERIC 542 # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) 543 #else 544 # ifdef CONFIG_ALPHA_JENSEN 545 # define RTC_PORT(x) (0x170+(x)) 546 # else 547 # define RTC_PORT(x) (0x70 + (x)) 548 # endif 549 #endif 550 #define RTC_ALWAYS_BCD 0 551 552 /* 553 * Some mucking forons use if[n]def writeq to check if platform has it. 554 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them 555 * to play with; for now just use cpp anti-recursion logics and make sure 556 * that damn thing is defined and expands to itself. 557 */ 558 559 #define writeq writeq 560 #define readq readq 561 562 /* 563 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 564 * access 565 */ 566 #define xlate_dev_mem_ptr(p) __va(p) 567 568 /* 569 * Convert a virtual cached pointer to an uncached pointer 570 */ 571 #define xlate_dev_kmem_ptr(p) p 572 573 #endif /* __KERNEL__ */ 574 575 #endif /* __ALPHA_IO_H */ 576