1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Generic I/O port emulation. 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __ASM_GENERIC_IO_H 8 #define __ASM_GENERIC_IO_H 9 10 #include <asm/page.h> /* I/O is all done through memory accesses */ 11 #include <linux/string.h> /* for memset() and memcpy() */ 12 #include <linux/types.h> 13 14 #ifdef CONFIG_GENERIC_IOMAP 15 #include <asm-generic/iomap.h> 16 #endif 17 18 #include <asm/mmiowb.h> 19 #include <asm-generic/pci_iomap.h> 20 21 #ifndef __io_br 22 #define __io_br() barrier() 23 #endif 24 25 /* prevent prefetching of coherent DMA data ahead of a dma-complete */ 26 #ifndef __io_ar 27 #ifdef rmb 28 #define __io_ar(v) rmb() 29 #else 30 #define __io_ar(v) barrier() 31 #endif 32 #endif 33 34 /* flush writes to coherent DMA data before possibly triggering a DMA read */ 35 #ifndef __io_bw 36 #ifdef wmb 37 #define __io_bw() wmb() 38 #else 39 #define __io_bw() barrier() 40 #endif 41 #endif 42 43 /* serialize device access against a spin_unlock, usually handled there. */ 44 #ifndef __io_aw 45 #define __io_aw() mmiowb_set_pending() 46 #endif 47 48 #ifndef __io_pbw 49 #define __io_pbw() __io_bw() 50 #endif 51 52 #ifndef __io_paw 53 #define __io_paw() __io_aw() 54 #endif 55 56 #ifndef __io_pbr 57 #define __io_pbr() __io_br() 58 #endif 59 60 #ifndef __io_par 61 #define __io_par(v) __io_ar(v) 62 #endif 63 64 65 /* 66 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 67 * 68 * On some architectures memory mapped IO needs to be accessed differently. 69 * On the simple architectures, we just read/write the memory location 70 * directly. 71 */ 72 73 #ifndef __raw_readb 74 #define __raw_readb __raw_readb 75 static inline u8 __raw_readb(const volatile void __iomem *addr) 76 { 77 return *(const volatile u8 __force *)addr; 78 } 79 #endif 80 81 #ifndef __raw_readw 82 #define __raw_readw __raw_readw 83 static inline u16 __raw_readw(const volatile void __iomem *addr) 84 { 85 return *(const volatile u16 __force *)addr; 86 } 87 #endif 88 89 #ifndef __raw_readl 90 #define __raw_readl __raw_readl 91 static inline u32 __raw_readl(const volatile void __iomem *addr) 92 { 93 return *(const volatile u32 __force *)addr; 94 } 95 #endif 96 97 #ifdef CONFIG_64BIT 98 #ifndef __raw_readq 99 #define __raw_readq __raw_readq 100 static inline u64 __raw_readq(const volatile void __iomem *addr) 101 { 102 return *(const volatile u64 __force *)addr; 103 } 104 #endif 105 #endif /* CONFIG_64BIT */ 106 107 #ifndef __raw_writeb 108 #define __raw_writeb __raw_writeb 109 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 110 { 111 *(volatile u8 __force *)addr = value; 112 } 113 #endif 114 115 #ifndef __raw_writew 116 #define __raw_writew __raw_writew 117 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 118 { 119 *(volatile u16 __force *)addr = value; 120 } 121 #endif 122 123 #ifndef __raw_writel 124 #define __raw_writel __raw_writel 125 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 126 { 127 *(volatile u32 __force *)addr = value; 128 } 129 #endif 130 131 #ifdef CONFIG_64BIT 132 #ifndef __raw_writeq 133 #define __raw_writeq __raw_writeq 134 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 135 { 136 *(volatile u64 __force *)addr = value; 137 } 138 #endif 139 #endif /* CONFIG_64BIT */ 140 141 /* 142 * {read,write}{b,w,l,q}() access little endian memory and return result in 143 * native endianness. 144 */ 145 146 #ifndef readb 147 #define readb readb 148 static inline u8 readb(const volatile void __iomem *addr) 149 { 150 u8 val; 151 152 __io_br(); 153 val = __raw_readb(addr); 154 __io_ar(val); 155 return val; 156 } 157 #endif 158 159 #ifndef readw 160 #define readw readw 161 static inline u16 readw(const volatile void __iomem *addr) 162 { 163 u16 val; 164 165 __io_br(); 166 val = __le16_to_cpu(__raw_readw(addr)); 167 __io_ar(val); 168 return val; 169 } 170 #endif 171 172 #ifndef readl 173 #define readl readl 174 static inline u32 readl(const volatile void __iomem *addr) 175 { 176 u32 val; 177 178 __io_br(); 179 val = __le32_to_cpu(__raw_readl(addr)); 180 __io_ar(val); 181 return val; 182 } 183 #endif 184 185 #ifdef CONFIG_64BIT 186 #ifndef readq 187 #define readq readq 188 static inline u64 readq(const volatile void __iomem *addr) 189 { 190 u64 val; 191 192 __io_br(); 193 val = __le64_to_cpu(__raw_readq(addr)); 194 __io_ar(val); 195 return val; 196 } 197 #endif 198 #endif /* CONFIG_64BIT */ 199 200 #ifndef writeb 201 #define writeb writeb 202 static inline void writeb(u8 value, volatile void __iomem *addr) 203 { 204 __io_bw(); 205 __raw_writeb(value, addr); 206 __io_aw(); 207 } 208 #endif 209 210 #ifndef writew 211 #define writew writew 212 static inline void writew(u16 value, volatile void __iomem *addr) 213 { 214 __io_bw(); 215 __raw_writew(cpu_to_le16(value), addr); 216 __io_aw(); 217 } 218 #endif 219 220 #ifndef writel 221 #define writel writel 222 static inline void writel(u32 value, volatile void __iomem *addr) 223 { 224 __io_bw(); 225 __raw_writel(__cpu_to_le32(value), addr); 226 __io_aw(); 227 } 228 #endif 229 230 #ifdef CONFIG_64BIT 231 #ifndef writeq 232 #define writeq writeq 233 static inline void writeq(u64 value, volatile void __iomem *addr) 234 { 235 __io_bw(); 236 __raw_writeq(__cpu_to_le64(value), addr); 237 __io_aw(); 238 } 239 #endif 240 #endif /* CONFIG_64BIT */ 241 242 /* 243 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 244 * are not guaranteed to provide ordering against spinlocks or memory 245 * accesses. 246 */ 247 #ifndef readb_relaxed 248 #define readb_relaxed readb_relaxed 249 static inline u8 readb_relaxed(const volatile void __iomem *addr) 250 { 251 return __raw_readb(addr); 252 } 253 #endif 254 255 #ifndef readw_relaxed 256 #define readw_relaxed readw_relaxed 257 static inline u16 readw_relaxed(const volatile void __iomem *addr) 258 { 259 return __le16_to_cpu(__raw_readw(addr)); 260 } 261 #endif 262 263 #ifndef readl_relaxed 264 #define readl_relaxed readl_relaxed 265 static inline u32 readl_relaxed(const volatile void __iomem *addr) 266 { 267 return __le32_to_cpu(__raw_readl(addr)); 268 } 269 #endif 270 271 #if defined(readq) && !defined(readq_relaxed) 272 #define readq_relaxed readq_relaxed 273 static inline u64 readq_relaxed(const volatile void __iomem *addr) 274 { 275 return __le64_to_cpu(__raw_readq(addr)); 276 } 277 #endif 278 279 #ifndef writeb_relaxed 280 #define writeb_relaxed writeb_relaxed 281 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 282 { 283 __raw_writeb(value, addr); 284 } 285 #endif 286 287 #ifndef writew_relaxed 288 #define writew_relaxed writew_relaxed 289 static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 290 { 291 __raw_writew(cpu_to_le16(value), addr); 292 } 293 #endif 294 295 #ifndef writel_relaxed 296 #define writel_relaxed writel_relaxed 297 static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 298 { 299 __raw_writel(__cpu_to_le32(value), addr); 300 } 301 #endif 302 303 #if defined(writeq) && !defined(writeq_relaxed) 304 #define writeq_relaxed writeq_relaxed 305 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) 306 { 307 __raw_writeq(__cpu_to_le64(value), addr); 308 } 309 #endif 310 311 /* 312 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 313 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 314 */ 315 #ifndef readsb 316 #define readsb readsb 317 static inline void readsb(const volatile void __iomem *addr, void *buffer, 318 unsigned int count) 319 { 320 if (count) { 321 u8 *buf = buffer; 322 323 do { 324 u8 x = __raw_readb(addr); 325 *buf++ = x; 326 } while (--count); 327 } 328 } 329 #endif 330 331 #ifndef readsw 332 #define readsw readsw 333 static inline void readsw(const volatile void __iomem *addr, void *buffer, 334 unsigned int count) 335 { 336 if (count) { 337 u16 *buf = buffer; 338 339 do { 340 u16 x = __raw_readw(addr); 341 *buf++ = x; 342 } while (--count); 343 } 344 } 345 #endif 346 347 #ifndef readsl 348 #define readsl readsl 349 static inline void readsl(const volatile void __iomem *addr, void *buffer, 350 unsigned int count) 351 { 352 if (count) { 353 u32 *buf = buffer; 354 355 do { 356 u32 x = __raw_readl(addr); 357 *buf++ = x; 358 } while (--count); 359 } 360 } 361 #endif 362 363 #ifdef CONFIG_64BIT 364 #ifndef readsq 365 #define readsq readsq 366 static inline void readsq(const volatile void __iomem *addr, void *buffer, 367 unsigned int count) 368 { 369 if (count) { 370 u64 *buf = buffer; 371 372 do { 373 u64 x = __raw_readq(addr); 374 *buf++ = x; 375 } while (--count); 376 } 377 } 378 #endif 379 #endif /* CONFIG_64BIT */ 380 381 #ifndef writesb 382 #define writesb writesb 383 static inline void writesb(volatile void __iomem *addr, const void *buffer, 384 unsigned int count) 385 { 386 if (count) { 387 const u8 *buf = buffer; 388 389 do { 390 __raw_writeb(*buf++, addr); 391 } while (--count); 392 } 393 } 394 #endif 395 396 #ifndef writesw 397 #define writesw writesw 398 static inline void writesw(volatile void __iomem *addr, const void *buffer, 399 unsigned int count) 400 { 401 if (count) { 402 const u16 *buf = buffer; 403 404 do { 405 __raw_writew(*buf++, addr); 406 } while (--count); 407 } 408 } 409 #endif 410 411 #ifndef writesl 412 #define writesl writesl 413 static inline void writesl(volatile void __iomem *addr, const void *buffer, 414 unsigned int count) 415 { 416 if (count) { 417 const u32 *buf = buffer; 418 419 do { 420 __raw_writel(*buf++, addr); 421 } while (--count); 422 } 423 } 424 #endif 425 426 #ifdef CONFIG_64BIT 427 #ifndef writesq 428 #define writesq writesq 429 static inline void writesq(volatile void __iomem *addr, const void *buffer, 430 unsigned int count) 431 { 432 if (count) { 433 const u64 *buf = buffer; 434 435 do { 436 __raw_writeq(*buf++, addr); 437 } while (--count); 438 } 439 } 440 #endif 441 #endif /* CONFIG_64BIT */ 442 443 #ifndef PCI_IOBASE 444 #define PCI_IOBASE ((void __iomem *)0) 445 #endif 446 447 #ifndef IO_SPACE_LIMIT 448 #define IO_SPACE_LIMIT 0xffff 449 #endif 450 451 #include <linux/logic_pio.h> 452 453 /* 454 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 455 * implemented on hardware that needs an additional delay for I/O accesses to 456 * take effect. 457 */ 458 459 #ifndef inb 460 #define inb inb 461 static inline u8 inb(unsigned long addr) 462 { 463 u8 val; 464 465 __io_pbr(); 466 val = __raw_readb(PCI_IOBASE + addr); 467 __io_par(val); 468 return val; 469 } 470 #endif 471 472 #ifndef inw 473 #define inw inw 474 static inline u16 inw(unsigned long addr) 475 { 476 u16 val; 477 478 __io_pbr(); 479 val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); 480 __io_par(val); 481 return val; 482 } 483 #endif 484 485 #ifndef inl 486 #define inl inl 487 static inline u32 inl(unsigned long addr) 488 { 489 u32 val; 490 491 __io_pbr(); 492 val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); 493 __io_par(val); 494 return val; 495 } 496 #endif 497 498 #ifndef outb 499 #define outb outb 500 static inline void outb(u8 value, unsigned long addr) 501 { 502 __io_pbw(); 503 __raw_writeb(value, PCI_IOBASE + addr); 504 __io_paw(); 505 } 506 #endif 507 508 #ifndef outw 509 #define outw outw 510 static inline void outw(u16 value, unsigned long addr) 511 { 512 __io_pbw(); 513 __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); 514 __io_paw(); 515 } 516 #endif 517 518 #ifndef outl 519 #define outl outl 520 static inline void outl(u32 value, unsigned long addr) 521 { 522 __io_pbw(); 523 __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); 524 __io_paw(); 525 } 526 #endif 527 528 #ifndef inb_p 529 #define inb_p inb_p 530 static inline u8 inb_p(unsigned long addr) 531 { 532 return inb(addr); 533 } 534 #endif 535 536 #ifndef inw_p 537 #define inw_p inw_p 538 static inline u16 inw_p(unsigned long addr) 539 { 540 return inw(addr); 541 } 542 #endif 543 544 #ifndef inl_p 545 #define inl_p inl_p 546 static inline u32 inl_p(unsigned long addr) 547 { 548 return inl(addr); 549 } 550 #endif 551 552 #ifndef outb_p 553 #define outb_p outb_p 554 static inline void outb_p(u8 value, unsigned long addr) 555 { 556 outb(value, addr); 557 } 558 #endif 559 560 #ifndef outw_p 561 #define outw_p outw_p 562 static inline void outw_p(u16 value, unsigned long addr) 563 { 564 outw(value, addr); 565 } 566 #endif 567 568 #ifndef outl_p 569 #define outl_p outl_p 570 static inline void outl_p(u32 value, unsigned long addr) 571 { 572 outl(value, addr); 573 } 574 #endif 575 576 /* 577 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 578 * single I/O port multiple times. 579 */ 580 581 #ifndef insb 582 #define insb insb 583 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 584 { 585 readsb(PCI_IOBASE + addr, buffer, count); 586 } 587 #endif 588 589 #ifndef insw 590 #define insw insw 591 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 592 { 593 readsw(PCI_IOBASE + addr, buffer, count); 594 } 595 #endif 596 597 #ifndef insl 598 #define insl insl 599 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 600 { 601 readsl(PCI_IOBASE + addr, buffer, count); 602 } 603 #endif 604 605 #ifndef outsb 606 #define outsb outsb 607 static inline void outsb(unsigned long addr, const void *buffer, 608 unsigned int count) 609 { 610 writesb(PCI_IOBASE + addr, buffer, count); 611 } 612 #endif 613 614 #ifndef outsw 615 #define outsw outsw 616 static inline void outsw(unsigned long addr, const void *buffer, 617 unsigned int count) 618 { 619 writesw(PCI_IOBASE + addr, buffer, count); 620 } 621 #endif 622 623 #ifndef outsl 624 #define outsl outsl 625 static inline void outsl(unsigned long addr, const void *buffer, 626 unsigned int count) 627 { 628 writesl(PCI_IOBASE + addr, buffer, count); 629 } 630 #endif 631 632 #ifndef insb_p 633 #define insb_p insb_p 634 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 635 { 636 insb(addr, buffer, count); 637 } 638 #endif 639 640 #ifndef insw_p 641 #define insw_p insw_p 642 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 643 { 644 insw(addr, buffer, count); 645 } 646 #endif 647 648 #ifndef insl_p 649 #define insl_p insl_p 650 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 651 { 652 insl(addr, buffer, count); 653 } 654 #endif 655 656 #ifndef outsb_p 657 #define outsb_p outsb_p 658 static inline void outsb_p(unsigned long addr, const void *buffer, 659 unsigned int count) 660 { 661 outsb(addr, buffer, count); 662 } 663 #endif 664 665 #ifndef outsw_p 666 #define outsw_p outsw_p 667 static inline void outsw_p(unsigned long addr, const void *buffer, 668 unsigned int count) 669 { 670 outsw(addr, buffer, count); 671 } 672 #endif 673 674 #ifndef outsl_p 675 #define outsl_p outsl_p 676 static inline void outsl_p(unsigned long addr, const void *buffer, 677 unsigned int count) 678 { 679 outsl(addr, buffer, count); 680 } 681 #endif 682 683 #ifndef CONFIG_GENERIC_IOMAP 684 #ifndef ioread8 685 #define ioread8 ioread8 686 static inline u8 ioread8(const volatile void __iomem *addr) 687 { 688 return readb(addr); 689 } 690 #endif 691 692 #ifndef ioread16 693 #define ioread16 ioread16 694 static inline u16 ioread16(const volatile void __iomem *addr) 695 { 696 return readw(addr); 697 } 698 #endif 699 700 #ifndef ioread32 701 #define ioread32 ioread32 702 static inline u32 ioread32(const volatile void __iomem *addr) 703 { 704 return readl(addr); 705 } 706 #endif 707 708 #ifdef CONFIG_64BIT 709 #ifndef ioread64 710 #define ioread64 ioread64 711 static inline u64 ioread64(const volatile void __iomem *addr) 712 { 713 return readq(addr); 714 } 715 #endif 716 #endif /* CONFIG_64BIT */ 717 718 #ifndef iowrite8 719 #define iowrite8 iowrite8 720 static inline void iowrite8(u8 value, volatile void __iomem *addr) 721 { 722 writeb(value, addr); 723 } 724 #endif 725 726 #ifndef iowrite16 727 #define iowrite16 iowrite16 728 static inline void iowrite16(u16 value, volatile void __iomem *addr) 729 { 730 writew(value, addr); 731 } 732 #endif 733 734 #ifndef iowrite32 735 #define iowrite32 iowrite32 736 static inline void iowrite32(u32 value, volatile void __iomem *addr) 737 { 738 writel(value, addr); 739 } 740 #endif 741 742 #ifdef CONFIG_64BIT 743 #ifndef iowrite64 744 #define iowrite64 iowrite64 745 static inline void iowrite64(u64 value, volatile void __iomem *addr) 746 { 747 writeq(value, addr); 748 } 749 #endif 750 #endif /* CONFIG_64BIT */ 751 752 #ifndef ioread16be 753 #define ioread16be ioread16be 754 static inline u16 ioread16be(const volatile void __iomem *addr) 755 { 756 return swab16(readw(addr)); 757 } 758 #endif 759 760 #ifndef ioread32be 761 #define ioread32be ioread32be 762 static inline u32 ioread32be(const volatile void __iomem *addr) 763 { 764 return swab32(readl(addr)); 765 } 766 #endif 767 768 #ifdef CONFIG_64BIT 769 #ifndef ioread64be 770 #define ioread64be ioread64be 771 static inline u64 ioread64be(const volatile void __iomem *addr) 772 { 773 return swab64(readq(addr)); 774 } 775 #endif 776 #endif /* CONFIG_64BIT */ 777 778 #ifndef iowrite16be 779 #define iowrite16be iowrite16be 780 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 781 { 782 writew(swab16(value), addr); 783 } 784 #endif 785 786 #ifndef iowrite32be 787 #define iowrite32be iowrite32be 788 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 789 { 790 writel(swab32(value), addr); 791 } 792 #endif 793 794 #ifdef CONFIG_64BIT 795 #ifndef iowrite64be 796 #define iowrite64be iowrite64be 797 static inline void iowrite64be(u64 value, volatile void __iomem *addr) 798 { 799 writeq(swab64(value), addr); 800 } 801 #endif 802 #endif /* CONFIG_64BIT */ 803 804 #ifndef ioread8_rep 805 #define ioread8_rep ioread8_rep 806 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 807 unsigned int count) 808 { 809 readsb(addr, buffer, count); 810 } 811 #endif 812 813 #ifndef ioread16_rep 814 #define ioread16_rep ioread16_rep 815 static inline void ioread16_rep(const volatile void __iomem *addr, 816 void *buffer, unsigned int count) 817 { 818 readsw(addr, buffer, count); 819 } 820 #endif 821 822 #ifndef ioread32_rep 823 #define ioread32_rep ioread32_rep 824 static inline void ioread32_rep(const volatile void __iomem *addr, 825 void *buffer, unsigned int count) 826 { 827 readsl(addr, buffer, count); 828 } 829 #endif 830 831 #ifdef CONFIG_64BIT 832 #ifndef ioread64_rep 833 #define ioread64_rep ioread64_rep 834 static inline void ioread64_rep(const volatile void __iomem *addr, 835 void *buffer, unsigned int count) 836 { 837 readsq(addr, buffer, count); 838 } 839 #endif 840 #endif /* CONFIG_64BIT */ 841 842 #ifndef iowrite8_rep 843 #define iowrite8_rep iowrite8_rep 844 static inline void iowrite8_rep(volatile void __iomem *addr, 845 const void *buffer, 846 unsigned int count) 847 { 848 writesb(addr, buffer, count); 849 } 850 #endif 851 852 #ifndef iowrite16_rep 853 #define iowrite16_rep iowrite16_rep 854 static inline void iowrite16_rep(volatile void __iomem *addr, 855 const void *buffer, 856 unsigned int count) 857 { 858 writesw(addr, buffer, count); 859 } 860 #endif 861 862 #ifndef iowrite32_rep 863 #define iowrite32_rep iowrite32_rep 864 static inline void iowrite32_rep(volatile void __iomem *addr, 865 const void *buffer, 866 unsigned int count) 867 { 868 writesl(addr, buffer, count); 869 } 870 #endif 871 872 #ifdef CONFIG_64BIT 873 #ifndef iowrite64_rep 874 #define iowrite64_rep iowrite64_rep 875 static inline void iowrite64_rep(volatile void __iomem *addr, 876 const void *buffer, 877 unsigned int count) 878 { 879 writesq(addr, buffer, count); 880 } 881 #endif 882 #endif /* CONFIG_64BIT */ 883 #endif /* CONFIG_GENERIC_IOMAP */ 884 885 #ifdef __KERNEL__ 886 887 #include <linux/vmalloc.h> 888 #define __io_virt(x) ((void __force *)(x)) 889 890 #ifndef CONFIG_GENERIC_IOMAP 891 struct pci_dev; 892 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 893 894 #ifndef pci_iounmap 895 #define pci_iounmap pci_iounmap 896 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 897 { 898 } 899 #endif 900 #endif /* CONFIG_GENERIC_IOMAP */ 901 902 /* 903 * Change virtual addresses to physical addresses and vv. 904 * These are pretty trivial 905 */ 906 #ifndef virt_to_phys 907 #define virt_to_phys virt_to_phys 908 static inline unsigned long virt_to_phys(volatile void *address) 909 { 910 return __pa((unsigned long)address); 911 } 912 #endif 913 914 #ifndef phys_to_virt 915 #define phys_to_virt phys_to_virt 916 static inline void *phys_to_virt(unsigned long address) 917 { 918 return __va(address); 919 } 920 #endif 921 922 /** 923 * DOC: ioremap() and ioremap_*() variants 924 * 925 * Architectures with an MMU are expected to provide ioremap() and iounmap() 926 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide 927 * a default nop-op implementation that expect that the physical address used 928 * for MMIO are already marked as uncached, and can be used as kernel virtual 929 * addresses. 930 * 931 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes 932 * for specific drivers if the architecture choses to implement them. If they 933 * are not implemented we fall back to plain ioremap. 934 */ 935 #ifndef CONFIG_MMU 936 #ifndef ioremap 937 #define ioremap ioremap 938 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 939 { 940 return (void __iomem *)(unsigned long)offset; 941 } 942 #endif 943 944 #ifndef iounmap 945 #define iounmap iounmap 946 static inline void iounmap(void __iomem *addr) 947 { 948 } 949 #endif 950 #elif defined(CONFIG_GENERIC_IOREMAP) 951 #include <asm/pgtable.h> 952 953 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); 954 void iounmap(volatile void __iomem *addr); 955 956 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 957 { 958 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 959 return ioremap_prot(addr, size, _PAGE_IOREMAP); 960 } 961 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ 962 963 #ifndef ioremap_nocache 964 #define ioremap_nocache ioremap 965 #endif 966 967 #ifndef ioremap_wc 968 #define ioremap_wc ioremap 969 #endif 970 971 #ifndef ioremap_wt 972 #define ioremap_wt ioremap 973 #endif 974 975 /* 976 * ioremap_uc is special in that we do require an explicit architecture 977 * implementation. In general you do not want to use this function in a 978 * driver and use plain ioremap, which is uncached by default. Similarly 979 * architectures should not implement it unless they have a very good 980 * reason. 981 */ 982 #ifndef ioremap_uc 983 #define ioremap_uc ioremap_uc 984 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 985 { 986 return NULL; 987 } 988 #endif 989 990 #ifdef CONFIG_HAS_IOPORT_MAP 991 #ifndef CONFIG_GENERIC_IOMAP 992 #ifndef ioport_map 993 #define ioport_map ioport_map 994 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 995 { 996 port &= IO_SPACE_LIMIT; 997 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 998 } 999 #endif 1000 1001 #ifndef ioport_unmap 1002 #define ioport_unmap ioport_unmap 1003 static inline void ioport_unmap(void __iomem *p) 1004 { 1005 } 1006 #endif 1007 #else /* CONFIG_GENERIC_IOMAP */ 1008 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 1009 extern void ioport_unmap(void __iomem *p); 1010 #endif /* CONFIG_GENERIC_IOMAP */ 1011 #endif /* CONFIG_HAS_IOPORT_MAP */ 1012 1013 /* 1014 * Convert a virtual cached pointer to an uncached pointer 1015 */ 1016 #ifndef xlate_dev_kmem_ptr 1017 #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr 1018 static inline void *xlate_dev_kmem_ptr(void *addr) 1019 { 1020 return addr; 1021 } 1022 #endif 1023 1024 #ifndef xlate_dev_mem_ptr 1025 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 1026 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 1027 { 1028 return __va(addr); 1029 } 1030 #endif 1031 1032 #ifndef unxlate_dev_mem_ptr 1033 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 1034 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 1035 { 1036 } 1037 #endif 1038 1039 #ifdef CONFIG_VIRT_TO_BUS 1040 #ifndef virt_to_bus 1041 static inline unsigned long virt_to_bus(void *address) 1042 { 1043 return (unsigned long)address; 1044 } 1045 1046 static inline void *bus_to_virt(unsigned long address) 1047 { 1048 return (void *)address; 1049 } 1050 #endif 1051 #endif 1052 1053 #ifndef memset_io 1054 #define memset_io memset_io 1055 /** 1056 * memset_io Set a range of I/O memory to a constant value 1057 * @addr: The beginning of the I/O-memory range to set 1058 * @val: The value to set the memory to 1059 * @count: The number of bytes to set 1060 * 1061 * Set a range of I/O memory to a given value. 1062 */ 1063 static inline void memset_io(volatile void __iomem *addr, int value, 1064 size_t size) 1065 { 1066 memset(__io_virt(addr), value, size); 1067 } 1068 #endif 1069 1070 #ifndef memcpy_fromio 1071 #define memcpy_fromio memcpy_fromio 1072 /** 1073 * memcpy_fromio Copy a block of data from I/O memory 1074 * @dst: The (RAM) destination for the copy 1075 * @src: The (I/O memory) source for the data 1076 * @count: The number of bytes to copy 1077 * 1078 * Copy a block of data from I/O memory. 1079 */ 1080 static inline void memcpy_fromio(void *buffer, 1081 const volatile void __iomem *addr, 1082 size_t size) 1083 { 1084 memcpy(buffer, __io_virt(addr), size); 1085 } 1086 #endif 1087 1088 #ifndef memcpy_toio 1089 #define memcpy_toio memcpy_toio 1090 /** 1091 * memcpy_toio Copy a block of data into I/O memory 1092 * @dst: The (I/O memory) destination for the copy 1093 * @src: The (RAM) source for the data 1094 * @count: The number of bytes to copy 1095 * 1096 * Copy a block of data to I/O memory. 1097 */ 1098 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 1099 size_t size) 1100 { 1101 memcpy(__io_virt(addr), buffer, size); 1102 } 1103 #endif 1104 1105 #endif /* __KERNEL__ */ 1106 1107 #endif /* __ASM_GENERIC_IO_H */ 1108