1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ALPHA_IO_H 3 #define __ALPHA_IO_H 4 5 #ifdef __KERNEL__ 6 7 #include <linux/kernel.h> 8 #include <linux/mm.h> 9 #include <asm/compiler.h> 10 #include <asm/machvec.h> 11 #include <asm/hwrpb.h> 12 13 /* The generic header contains only prototypes. Including it ensures that 14 the implementation we have here matches that interface. */ 15 #include <asm-generic/iomap.h> 16 17 /* We don't use IO slowdowns on the Alpha, but.. */ 18 #define __SLOW_DOWN_IO do { } while (0) 19 #define SLOW_DOWN_IO do { } while (0) 20 21 /* 22 * Virtual -> physical identity mapping starts at this offset 23 */ 24 #ifdef USE_48_BIT_KSEG 25 #define IDENT_ADDR 0xffff800000000000UL 26 #else 27 #define IDENT_ADDR 0xfffffc0000000000UL 28 #endif 29 30 /* 31 * We try to avoid hae updates (thus the cache), but when we 32 * do need to update the hae, we need to do it atomically, so 33 * that any interrupts wouldn't get confused with the hae 34 * register not being up-to-date with respect to the hardware 35 * value. 36 */ 37 extern inline void __set_hae(unsigned long new_hae) 38 { 39 unsigned long flags = swpipl(IPL_MAX); 40 41 barrier(); 42 43 alpha_mv.hae_cache = new_hae; 44 *alpha_mv.hae_register = new_hae; 45 mb(); 46 /* Re-read to make sure it was written. */ 47 new_hae = *alpha_mv.hae_register; 48 49 setipl(flags); 50 barrier(); 51 } 52 53 extern inline void set_hae(unsigned long new_hae) 54 { 55 if (new_hae != alpha_mv.hae_cache) 56 __set_hae(new_hae); 57 } 58 59 /* 60 * Change virtual addresses to physical addresses and vv. 61 */ 62 #ifdef USE_48_BIT_KSEG 63 static inline unsigned long virt_to_phys(volatile void *address) 64 { 65 return (unsigned long)address - IDENT_ADDR; 66 } 67 68 static inline void * phys_to_virt(unsigned long address) 69 { 70 return (void *) (address + IDENT_ADDR); 71 } 72 #else 73 static inline unsigned long virt_to_phys(volatile void *address) 74 { 75 unsigned long phys = (unsigned long)address; 76 77 /* Sign-extend from bit 41. */ 78 phys <<= (64 - 41); 79 phys = (long)phys >> (64 - 41); 80 81 /* Crop to the physical address width of the processor. */ 82 phys &= (1ul << hwrpb->pa_bits) - 1; 83 84 return phys; 85 } 86 87 static inline void * phys_to_virt(unsigned long address) 88 { 89 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); 90 } 91 #endif 92 93 #define virt_to_phys virt_to_phys 94 #define phys_to_virt phys_to_virt 95 #define page_to_phys(page) page_to_pa(page) 96 97 /* Maximum PIO space address supported? */ 98 #define IO_SPACE_LIMIT 0xffff 99 100 /* 101 * Change addresses as seen by the kernel (virtual) to addresses as 102 * seen by a device (bus), and vice versa. 103 * 104 * Note that this only works for a limited range of kernel addresses, 105 * and very well may not span all memory. Consider this interface 106 * deprecated in favour of the DMA-mapping API. 107 */ 108 extern unsigned long __direct_map_base; 109 extern unsigned long __direct_map_size; 110 111 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address) 112 { 113 unsigned long phys = virt_to_phys(address); 114 unsigned long bus = phys + __direct_map_base; 115 return phys <= __direct_map_size ? bus : 0; 116 } 117 #define isa_virt_to_bus isa_virt_to_bus 118 119 static inline void * __deprecated isa_bus_to_virt(unsigned long address) 120 { 121 void *virt; 122 123 /* This check is a sanity check but also ensures that bus address 0 124 maps to virtual address 0 which is useful to detect null pointers 125 (the NCR driver is much simpler if NULL pointers are preserved). */ 126 address -= __direct_map_base; 127 virt = phys_to_virt(address); 128 return (long)address <= 0 ? NULL : virt; 129 } 130 #define isa_bus_to_virt isa_bus_to_virt 131 132 /* 133 * There are different chipsets to interface the Alpha CPUs to the world. 134 */ 135 136 #define IO_CONCAT(a,b) _IO_CONCAT(a,b) 137 #define _IO_CONCAT(a,b) a ## _ ## b 138 139 #ifdef CONFIG_ALPHA_GENERIC 140 141 /* In a generic kernel, we always go through the machine vector. */ 142 143 #define REMAP1(TYPE, NAME, QUAL) \ 144 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ 145 { \ 146 return alpha_mv.mv_##NAME(addr); \ 147 } 148 149 #define REMAP2(TYPE, NAME, QUAL) \ 150 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ 151 { \ 152 alpha_mv.mv_##NAME(b, addr); \ 153 } 154 155 REMAP1(unsigned int, ioread8, const) 156 REMAP1(unsigned int, ioread16, const) 157 REMAP1(unsigned int, ioread32, const) 158 REMAP1(u64, ioread64, const) 159 REMAP1(u8, readb, const volatile) 160 REMAP1(u16, readw, const volatile) 161 REMAP1(u32, readl, const volatile) 162 REMAP1(u64, readq, const volatile) 163 164 REMAP2(u8, iowrite8, /**/) 165 REMAP2(u16, iowrite16, /**/) 166 REMAP2(u32, iowrite32, /**/) 167 REMAP2(u64, iowrite64, /**/) 168 REMAP2(u8, writeb, volatile) 169 REMAP2(u16, writew, volatile) 170 REMAP2(u32, writel, volatile) 171 REMAP2(u64, writeq, volatile) 172 173 #undef REMAP1 174 #undef REMAP2 175 176 extern inline void __iomem *generic_ioportmap(unsigned long a) 177 { 178 return alpha_mv.mv_ioportmap(a); 179 } 180 181 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) 182 { 183 return alpha_mv.mv_ioremap(a, s); 184 } 185 186 static inline void generic_iounmap(volatile void __iomem *a) 187 { 188 return alpha_mv.mv_iounmap(a); 189 } 190 191 static inline int generic_is_ioaddr(unsigned long a) 192 { 193 return alpha_mv.mv_is_ioaddr(a); 194 } 195 196 static inline int generic_is_mmio(const volatile void __iomem *a) 197 { 198 return alpha_mv.mv_is_mmio(a); 199 } 200 201 #define __IO_PREFIX generic 202 #define generic_trivial_rw_bw 0 203 #define generic_trivial_rw_lq 0 204 #define generic_trivial_io_bw 0 205 #define generic_trivial_io_lq 0 206 #define generic_trivial_iounmap 0 207 208 #else 209 210 #if defined(CONFIG_ALPHA_APECS) 211 # include <asm/core_apecs.h> 212 #elif defined(CONFIG_ALPHA_CIA) 213 # include <asm/core_cia.h> 214 #elif defined(CONFIG_ALPHA_IRONGATE) 215 # include <asm/core_irongate.h> 216 #elif defined(CONFIG_ALPHA_JENSEN) 217 # include <asm/jensen.h> 218 #elif defined(CONFIG_ALPHA_LCA) 219 # include <asm/core_lca.h> 220 #elif defined(CONFIG_ALPHA_MARVEL) 221 # include <asm/core_marvel.h> 222 #elif defined(CONFIG_ALPHA_MCPCIA) 223 # include <asm/core_mcpcia.h> 224 #elif defined(CONFIG_ALPHA_POLARIS) 225 # include <asm/core_polaris.h> 226 #elif defined(CONFIG_ALPHA_T2) 227 # include <asm/core_t2.h> 228 #elif defined(CONFIG_ALPHA_TSUNAMI) 229 # include <asm/core_tsunami.h> 230 #elif defined(CONFIG_ALPHA_TITAN) 231 # include <asm/core_titan.h> 232 #elif defined(CONFIG_ALPHA_WILDFIRE) 233 # include <asm/core_wildfire.h> 234 #else 235 #error "What system is this?" 236 #endif 237 238 #endif /* GENERIC */ 239 240 /* 241 * We always have external versions of these routines. 242 */ 243 extern u8 inb(unsigned long port); 244 extern u16 inw(unsigned long port); 245 extern u32 inl(unsigned long port); 246 extern void outb(u8 b, unsigned long port); 247 extern void outw(u16 b, unsigned long port); 248 extern void outl(u32 b, unsigned long port); 249 #define inb inb 250 #define inw inw 251 #define inl inl 252 #define outb outb 253 #define outw outw 254 #define outl outl 255 256 extern u8 readb(const volatile void __iomem *addr); 257 extern u16 readw(const volatile void __iomem *addr); 258 extern u32 readl(const volatile void __iomem *addr); 259 extern u64 readq(const volatile void __iomem *addr); 260 extern void writeb(u8 b, volatile void __iomem *addr); 261 extern void writew(u16 b, volatile void __iomem *addr); 262 extern void writel(u32 b, volatile void __iomem *addr); 263 extern void writeq(u64 b, volatile void __iomem *addr); 264 #define readb readb 265 #define readw readw 266 #define readl readl 267 #define readq readq 268 #define writeb writeb 269 #define writew writew 270 #define writel writel 271 #define writeq writeq 272 273 extern u8 __raw_readb(const volatile void __iomem *addr); 274 extern u16 __raw_readw(const volatile void __iomem *addr); 275 extern u32 __raw_readl(const volatile void __iomem *addr); 276 extern u64 __raw_readq(const volatile void __iomem *addr); 277 extern void __raw_writeb(u8 b, volatile void __iomem *addr); 278 extern void __raw_writew(u16 b, volatile void __iomem *addr); 279 extern void __raw_writel(u32 b, volatile void __iomem *addr); 280 extern void __raw_writeq(u64 b, volatile void __iomem *addr); 281 #define __raw_readb __raw_readb 282 #define __raw_readw __raw_readw 283 #define __raw_readl __raw_readl 284 #define __raw_readq __raw_readq 285 #define __raw_writeb __raw_writeb 286 #define __raw_writew __raw_writew 287 #define __raw_writel __raw_writel 288 #define __raw_writeq __raw_writeq 289 290 /* 291 * Mapping from port numbers to __iomem space is pretty easy. 292 */ 293 294 /* These two have to be extern inline because of the extern prototype from 295 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for 296 the same declaration. */ 297 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) 298 { 299 return IO_CONCAT(__IO_PREFIX,ioportmap) (port); 300 } 301 302 extern inline void ioport_unmap(void __iomem *addr) 303 { 304 } 305 306 #define ioport_map ioport_map 307 #define ioport_unmap ioport_unmap 308 309 static inline void __iomem *ioremap(unsigned long port, unsigned long size) 310 { 311 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); 312 } 313 314 #define ioremap_wc ioremap 315 #define ioremap_uc ioremap 316 317 static inline void iounmap(volatile void __iomem *addr) 318 { 319 IO_CONCAT(__IO_PREFIX,iounmap)(addr); 320 } 321 322 static inline int __is_ioaddr(unsigned long addr) 323 { 324 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); 325 } 326 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) 327 328 static inline int __is_mmio(const volatile void __iomem *addr) 329 { 330 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); 331 } 332 333 334 /* 335 * If the actual I/O bits are sufficiently trivial, then expand inline. 336 */ 337 338 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 339 extern inline unsigned int ioread8(const void __iomem *addr) 340 { 341 unsigned int ret; 342 mb(); 343 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); 344 mb(); 345 return ret; 346 } 347 348 extern inline unsigned int ioread16(const void __iomem *addr) 349 { 350 unsigned int ret; 351 mb(); 352 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); 353 mb(); 354 return ret; 355 } 356 357 extern inline void iowrite8(u8 b, void __iomem *addr) 358 { 359 mb(); 360 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr); 361 } 362 363 extern inline void iowrite16(u16 b, void __iomem *addr) 364 { 365 mb(); 366 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr); 367 } 368 369 extern inline u8 inb(unsigned long port) 370 { 371 return ioread8(ioport_map(port, 1)); 372 } 373 374 extern inline u16 inw(unsigned long port) 375 { 376 return ioread16(ioport_map(port, 2)); 377 } 378 379 extern inline void outb(u8 b, unsigned long port) 380 { 381 iowrite8(b, ioport_map(port, 1)); 382 } 383 384 extern inline void outw(u16 b, unsigned long port) 385 { 386 iowrite16(b, ioport_map(port, 2)); 387 } 388 #endif 389 390 #define ioread8 ioread8 391 #define ioread16 ioread16 392 #define iowrite8 iowrite8 393 #define iowrite16 iowrite16 394 395 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 396 extern inline unsigned int ioread32(const void __iomem *addr) 397 { 398 unsigned int ret; 399 mb(); 400 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); 401 mb(); 402 return ret; 403 } 404 405 extern inline u64 ioread64(const void __iomem *addr) 406 { 407 unsigned int ret; 408 mb(); 409 ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr); 410 mb(); 411 return ret; 412 } 413 414 extern inline void iowrite32(u32 b, void __iomem *addr) 415 { 416 mb(); 417 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr); 418 } 419 420 extern inline void iowrite64(u64 b, void __iomem *addr) 421 { 422 mb(); 423 IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr); 424 } 425 426 extern inline u32 inl(unsigned long port) 427 { 428 return ioread32(ioport_map(port, 4)); 429 } 430 431 extern inline void outl(u32 b, unsigned long port) 432 { 433 iowrite32(b, ioport_map(port, 4)); 434 } 435 #endif 436 437 #define ioread32 ioread32 438 #define ioread64 ioread64 439 #define iowrite32 iowrite32 440 #define iowrite64 iowrite64 441 442 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 443 extern inline u8 __raw_readb(const volatile void __iomem *addr) 444 { 445 return IO_CONCAT(__IO_PREFIX,readb)(addr); 446 } 447 448 extern inline u16 __raw_readw(const volatile void __iomem *addr) 449 { 450 return IO_CONCAT(__IO_PREFIX,readw)(addr); 451 } 452 453 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) 454 { 455 IO_CONCAT(__IO_PREFIX,writeb)(b, addr); 456 } 457 458 extern inline void __raw_writew(u16 b, volatile void __iomem *addr) 459 { 460 IO_CONCAT(__IO_PREFIX,writew)(b, addr); 461 } 462 463 extern inline u8 readb(const volatile void __iomem *addr) 464 { 465 u8 ret; 466 mb(); 467 ret = __raw_readb(addr); 468 mb(); 469 return ret; 470 } 471 472 extern inline u16 readw(const volatile void __iomem *addr) 473 { 474 u16 ret; 475 mb(); 476 ret = __raw_readw(addr); 477 mb(); 478 return ret; 479 } 480 481 extern inline void writeb(u8 b, volatile void __iomem *addr) 482 { 483 mb(); 484 __raw_writeb(b, addr); 485 } 486 487 extern inline void writew(u16 b, volatile void __iomem *addr) 488 { 489 mb(); 490 __raw_writew(b, addr); 491 } 492 #endif 493 494 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 495 extern inline u32 __raw_readl(const volatile void __iomem *addr) 496 { 497 return IO_CONCAT(__IO_PREFIX,readl)(addr); 498 } 499 500 extern inline u64 __raw_readq(const volatile void __iomem *addr) 501 { 502 return IO_CONCAT(__IO_PREFIX,readq)(addr); 503 } 504 505 extern inline void __raw_writel(u32 b, volatile void __iomem *addr) 506 { 507 IO_CONCAT(__IO_PREFIX,writel)(b, addr); 508 } 509 510 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) 511 { 512 IO_CONCAT(__IO_PREFIX,writeq)(b, addr); 513 } 514 515 extern inline u32 readl(const volatile void __iomem *addr) 516 { 517 u32 ret; 518 mb(); 519 ret = __raw_readl(addr); 520 mb(); 521 return ret; 522 } 523 524 extern inline u64 readq(const volatile void __iomem *addr) 525 { 526 u64 ret; 527 mb(); 528 ret = __raw_readq(addr); 529 mb(); 530 return ret; 531 } 532 533 extern inline void writel(u32 b, volatile void __iomem *addr) 534 { 535 mb(); 536 __raw_writel(b, addr); 537 } 538 539 extern inline void writeq(u64 b, volatile void __iomem *addr) 540 { 541 mb(); 542 __raw_writeq(b, addr); 543 } 544 #endif 545 546 #define ioread16be(p) swab16(ioread16(p)) 547 #define ioread32be(p) swab32(ioread32(p)) 548 #define iowrite16be(v,p) iowrite16(swab16(v), (p)) 549 #define iowrite32be(v,p) iowrite32(swab32(v), (p)) 550 551 #define inb_p inb 552 #define inw_p inw 553 #define inl_p inl 554 #define outb_p outb 555 #define outw_p outw 556 #define outl_p outl 557 558 extern u8 readb_relaxed(const volatile void __iomem *addr); 559 extern u16 readw_relaxed(const volatile void __iomem *addr); 560 extern u32 readl_relaxed(const volatile void __iomem *addr); 561 extern u64 readq_relaxed(const volatile void __iomem *addr); 562 #define readb_relaxed readb_relaxed 563 #define readw_relaxed readw_relaxed 564 #define readl_relaxed readl_relaxed 565 #define readq_relaxed readq_relaxed 566 567 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 568 extern inline u8 readb_relaxed(const volatile void __iomem *addr) 569 { 570 mb(); 571 return __raw_readb(addr); 572 } 573 574 extern inline u16 readw_relaxed(const volatile void __iomem *addr) 575 { 576 mb(); 577 return __raw_readw(addr); 578 } 579 #endif 580 581 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 582 extern inline u32 readl_relaxed(const volatile void __iomem *addr) 583 { 584 mb(); 585 return __raw_readl(addr); 586 } 587 588 extern inline u64 readq_relaxed(const volatile void __iomem *addr) 589 { 590 mb(); 591 return __raw_readq(addr); 592 } 593 #endif 594 595 #define writeb_relaxed writeb 596 #define writew_relaxed writew 597 #define writel_relaxed writel 598 #define writeq_relaxed writeq 599 600 /* 601 * String version of IO memory access ops: 602 */ 603 extern void memcpy_fromio(void *, const volatile void __iomem *, long); 604 extern void memcpy_toio(volatile void __iomem *, const void *, long); 605 extern void _memset_c_io(volatile void __iomem *, unsigned long, long); 606 607 static inline void memset_io(volatile void __iomem *addr, u8 c, long len) 608 { 609 _memset_c_io(addr, 0x0101010101010101UL * c, len); 610 } 611 612 #define __HAVE_ARCH_MEMSETW_IO 613 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) 614 { 615 _memset_c_io(addr, 0x0001000100010001UL * c, len); 616 } 617 618 #define memset_io memset_io 619 #define memcpy_fromio memcpy_fromio 620 #define memcpy_toio memcpy_toio 621 622 /* 623 * String versions of in/out ops: 624 */ 625 extern void insb (unsigned long port, void *dst, unsigned long count); 626 extern void insw (unsigned long port, void *dst, unsigned long count); 627 extern void insl (unsigned long port, void *dst, unsigned long count); 628 extern void outsb (unsigned long port, const void *src, unsigned long count); 629 extern void outsw (unsigned long port, const void *src, unsigned long count); 630 extern void outsl (unsigned long port, const void *src, unsigned long count); 631 632 #define insb insb 633 #define insw insw 634 #define insl insl 635 #define outsb outsb 636 #define outsw outsw 637 #define outsl outsl 638 639 /* 640 * The Alpha Jensen hardware for some rather strange reason puts 641 * the RTC clock at 0x170 instead of 0x70. Probably due to some 642 * misguided idea about using 0x70 for NMI stuff. 643 * 644 * These defines will override the defaults when doing RTC queries 645 */ 646 647 #ifdef CONFIG_ALPHA_GENERIC 648 # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) 649 #else 650 # ifdef CONFIG_ALPHA_JENSEN 651 # define RTC_PORT(x) (0x170+(x)) 652 # else 653 # define RTC_PORT(x) (0x70 + (x)) 654 # endif 655 #endif 656 #define RTC_ALWAYS_BCD 0 657 658 /* 659 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 660 * access 661 */ 662 #define xlate_dev_mem_ptr(p) __va(p) 663 664 /* 665 * These get provided from <asm-generic/iomap.h> since alpha does not 666 * select GENERIC_IOMAP. 667 */ 668 #define ioread64 ioread64 669 #define iowrite64 iowrite64 670 #define ioread64be ioread64be 671 #define iowrite64be iowrite64be 672 #define ioread8_rep ioread8_rep 673 #define ioread16_rep ioread16_rep 674 #define ioread32_rep ioread32_rep 675 #define iowrite8_rep iowrite8_rep 676 #define iowrite16_rep iowrite16_rep 677 #define iowrite32_rep iowrite32_rep 678 #define pci_iounmap pci_iounmap 679 680 #include <asm-generic/io.h> 681 682 #endif /* __KERNEL__ */ 683 684 #endif /* __ALPHA_IO_H */ 685