1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Generic I/O port emulation. 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __ASM_GENERIC_IO_H 8 #define __ASM_GENERIC_IO_H 9 10 #include <asm/page.h> /* I/O is all done through memory accesses */ 11 #include <linux/string.h> /* for memset() and memcpy() */ 12 #include <linux/types.h> 13 #include <linux/instruction_pointer.h> 14 15 #ifdef CONFIG_GENERIC_IOMAP 16 #include <asm-generic/iomap.h> 17 #endif 18 19 #include <asm/mmiowb.h> 20 #include <asm-generic/pci_iomap.h> 21 22 #ifndef __io_br 23 #define __io_br() barrier() 24 #endif 25 26 /* prevent prefetching of coherent DMA data ahead of a dma-complete */ 27 #ifndef __io_ar 28 #ifdef rmb 29 #define __io_ar(v) rmb() 30 #else 31 #define __io_ar(v) barrier() 32 #endif 33 #endif 34 35 /* flush writes to coherent DMA data before possibly triggering a DMA read */ 36 #ifndef __io_bw 37 #ifdef wmb 38 #define __io_bw() wmb() 39 #else 40 #define __io_bw() barrier() 41 #endif 42 #endif 43 44 /* serialize device access against a spin_unlock, usually handled there. */ 45 #ifndef __io_aw 46 #define __io_aw() mmiowb_set_pending() 47 #endif 48 49 #ifndef __io_pbw 50 #define __io_pbw() __io_bw() 51 #endif 52 53 #ifndef __io_paw 54 #define __io_paw() __io_aw() 55 #endif 56 57 #ifndef __io_pbr 58 #define __io_pbr() __io_br() 59 #endif 60 61 #ifndef __io_par 62 #define __io_par(v) __io_ar(v) 63 #endif 64 65 /* 66 * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for 67 * specific kernel drivers in case of excessive/unwanted logging. 68 * 69 * Usage: Add a #define flag at the beginning of the driver file. 70 * Ex: #define __DISABLE_TRACE_MMIO__ 71 * #include <...> 72 * ... 73 */ 74 #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) 75 #include <linux/tracepoint-defs.h> 76 77 DECLARE_TRACEPOINT(rwmmio_write); 78 DECLARE_TRACEPOINT(rwmmio_post_write); 79 DECLARE_TRACEPOINT(rwmmio_read); 80 DECLARE_TRACEPOINT(rwmmio_post_read); 81 82 void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 83 unsigned long caller_addr); 84 void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 85 unsigned long caller_addr); 86 void log_read_mmio(u8 width, const volatile void __iomem *addr, 87 unsigned long caller_addr); 88 void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 89 unsigned long caller_addr); 90 91 #else 92 93 static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 94 unsigned long caller_addr) {} 95 static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 96 unsigned long caller_addr) {} 97 static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, 98 unsigned long caller_addr) {} 99 static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 100 unsigned long caller_addr) {} 101 102 #endif /* CONFIG_TRACE_MMIO_ACCESS */ 103 104 /* 105 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 106 * 107 * On some architectures memory mapped IO needs to be accessed differently. 108 * On the simple architectures, we just read/write the memory location 109 * directly. 110 */ 111 112 #ifndef __raw_readb 113 #define __raw_readb __raw_readb 114 static inline u8 __raw_readb(const volatile void __iomem *addr) 115 { 116 return *(const volatile u8 __force *)addr; 117 } 118 #endif 119 120 #ifndef __raw_readw 121 #define __raw_readw __raw_readw 122 static inline u16 __raw_readw(const volatile void __iomem *addr) 123 { 124 return *(const volatile u16 __force *)addr; 125 } 126 #endif 127 128 #ifndef __raw_readl 129 #define __raw_readl __raw_readl 130 static inline u32 __raw_readl(const volatile void __iomem *addr) 131 { 132 return *(const volatile u32 __force *)addr; 133 } 134 #endif 135 136 #ifdef CONFIG_64BIT 137 #ifndef __raw_readq 138 #define __raw_readq __raw_readq 139 static inline u64 __raw_readq(const volatile void __iomem *addr) 140 { 141 return *(const volatile u64 __force *)addr; 142 } 143 #endif 144 #endif /* CONFIG_64BIT */ 145 146 #ifndef __raw_writeb 147 #define __raw_writeb __raw_writeb 148 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 149 { 150 *(volatile u8 __force *)addr = value; 151 } 152 #endif 153 154 #ifndef __raw_writew 155 #define __raw_writew __raw_writew 156 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 157 { 158 *(volatile u16 __force *)addr = value; 159 } 160 #endif 161 162 #ifndef __raw_writel 163 #define __raw_writel __raw_writel 164 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 165 { 166 *(volatile u32 __force *)addr = value; 167 } 168 #endif 169 170 #ifdef CONFIG_64BIT 171 #ifndef __raw_writeq 172 #define __raw_writeq __raw_writeq 173 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 174 { 175 *(volatile u64 __force *)addr = value; 176 } 177 #endif 178 #endif /* CONFIG_64BIT */ 179 180 /* 181 * {read,write}{b,w,l,q}() access little endian memory and return result in 182 * native endianness. 183 */ 184 185 #ifndef readb 186 #define readb readb 187 static inline u8 readb(const volatile void __iomem *addr) 188 { 189 u8 val; 190 191 log_read_mmio(8, addr, _THIS_IP_); 192 __io_br(); 193 val = __raw_readb(addr); 194 __io_ar(val); 195 log_post_read_mmio(val, 8, addr, _THIS_IP_); 196 return val; 197 } 198 #endif 199 200 #ifndef readw 201 #define readw readw 202 static inline u16 readw(const volatile void __iomem *addr) 203 { 204 u16 val; 205 206 log_read_mmio(16, addr, _THIS_IP_); 207 __io_br(); 208 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 209 __io_ar(val); 210 log_post_read_mmio(val, 16, addr, _THIS_IP_); 211 return val; 212 } 213 #endif 214 215 #ifndef readl 216 #define readl readl 217 static inline u32 readl(const volatile void __iomem *addr) 218 { 219 u32 val; 220 221 log_read_mmio(32, addr, _THIS_IP_); 222 __io_br(); 223 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 224 __io_ar(val); 225 log_post_read_mmio(val, 32, addr, _THIS_IP_); 226 return val; 227 } 228 #endif 229 230 #ifdef CONFIG_64BIT 231 #ifndef readq 232 #define readq readq 233 static inline u64 readq(const volatile void __iomem *addr) 234 { 235 u64 val; 236 237 log_read_mmio(64, addr, _THIS_IP_); 238 __io_br(); 239 val = __le64_to_cpu(__raw_readq(addr)); 240 __io_ar(val); 241 log_post_read_mmio(val, 64, addr, _THIS_IP_); 242 return val; 243 } 244 #endif 245 #endif /* CONFIG_64BIT */ 246 247 #ifndef writeb 248 #define writeb writeb 249 static inline void writeb(u8 value, volatile void __iomem *addr) 250 { 251 log_write_mmio(value, 8, addr, _THIS_IP_); 252 __io_bw(); 253 __raw_writeb(value, addr); 254 __io_aw(); 255 log_post_write_mmio(value, 8, addr, _THIS_IP_); 256 } 257 #endif 258 259 #ifndef writew 260 #define writew writew 261 static inline void writew(u16 value, volatile void __iomem *addr) 262 { 263 log_write_mmio(value, 16, addr, _THIS_IP_); 264 __io_bw(); 265 __raw_writew((u16 __force)cpu_to_le16(value), addr); 266 __io_aw(); 267 log_post_write_mmio(value, 16, addr, _THIS_IP_); 268 } 269 #endif 270 271 #ifndef writel 272 #define writel writel 273 static inline void writel(u32 value, volatile void __iomem *addr) 274 { 275 log_write_mmio(value, 32, addr, _THIS_IP_); 276 __io_bw(); 277 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 278 __io_aw(); 279 log_post_write_mmio(value, 32, addr, _THIS_IP_); 280 } 281 #endif 282 283 #ifdef CONFIG_64BIT 284 #ifndef writeq 285 #define writeq writeq 286 static inline void writeq(u64 value, volatile void __iomem *addr) 287 { 288 log_write_mmio(value, 64, addr, _THIS_IP_); 289 __io_bw(); 290 __raw_writeq(__cpu_to_le64(value), addr); 291 __io_aw(); 292 log_post_write_mmio(value, 64, addr, _THIS_IP_); 293 } 294 #endif 295 #endif /* CONFIG_64BIT */ 296 297 /* 298 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 299 * are not guaranteed to provide ordering against spinlocks or memory 300 * accesses. 301 */ 302 #ifndef readb_relaxed 303 #define readb_relaxed readb_relaxed 304 static inline u8 readb_relaxed(const volatile void __iomem *addr) 305 { 306 u8 val; 307 308 log_read_mmio(8, addr, _THIS_IP_); 309 val = __raw_readb(addr); 310 log_post_read_mmio(val, 8, addr, _THIS_IP_); 311 return val; 312 } 313 #endif 314 315 #ifndef readw_relaxed 316 #define readw_relaxed readw_relaxed 317 static inline u16 readw_relaxed(const volatile void __iomem *addr) 318 { 319 u16 val; 320 321 log_read_mmio(16, addr, _THIS_IP_); 322 val = __le16_to_cpu(__raw_readw(addr)); 323 log_post_read_mmio(val, 16, addr, _THIS_IP_); 324 return val; 325 } 326 #endif 327 328 #ifndef readl_relaxed 329 #define readl_relaxed readl_relaxed 330 static inline u32 readl_relaxed(const volatile void __iomem *addr) 331 { 332 u32 val; 333 334 log_read_mmio(32, addr, _THIS_IP_); 335 val = __le32_to_cpu(__raw_readl(addr)); 336 log_post_read_mmio(val, 32, addr, _THIS_IP_); 337 return val; 338 } 339 #endif 340 341 #if defined(readq) && !defined(readq_relaxed) 342 #define readq_relaxed readq_relaxed 343 static inline u64 readq_relaxed(const volatile void __iomem *addr) 344 { 345 u64 val; 346 347 log_read_mmio(64, addr, _THIS_IP_); 348 val = __le64_to_cpu(__raw_readq(addr)); 349 log_post_read_mmio(val, 64, addr, _THIS_IP_); 350 return val; 351 } 352 #endif 353 354 #ifndef writeb_relaxed 355 #define writeb_relaxed writeb_relaxed 356 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 357 { 358 log_write_mmio(value, 8, addr, _THIS_IP_); 359 __raw_writeb(value, addr); 360 log_post_write_mmio(value, 8, addr, _THIS_IP_); 361 } 362 #endif 363 364 #ifndef writew_relaxed 365 #define writew_relaxed writew_relaxed 366 static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 367 { 368 log_write_mmio(value, 16, addr, _THIS_IP_); 369 __raw_writew(cpu_to_le16(value), addr); 370 log_post_write_mmio(value, 16, addr, _THIS_IP_); 371 } 372 #endif 373 374 #ifndef writel_relaxed 375 #define writel_relaxed writel_relaxed 376 static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 377 { 378 log_write_mmio(value, 32, addr, _THIS_IP_); 379 __raw_writel(__cpu_to_le32(value), addr); 380 log_post_write_mmio(value, 32, addr, _THIS_IP_); 381 } 382 #endif 383 384 #if defined(writeq) && !defined(writeq_relaxed) 385 #define writeq_relaxed writeq_relaxed 386 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) 387 { 388 log_write_mmio(value, 64, addr, _THIS_IP_); 389 __raw_writeq(__cpu_to_le64(value), addr); 390 log_post_write_mmio(value, 64, addr, _THIS_IP_); 391 } 392 #endif 393 394 /* 395 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 396 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 397 */ 398 #ifndef readsb 399 #define readsb readsb 400 static inline void readsb(const volatile void __iomem *addr, void *buffer, 401 unsigned int count) 402 { 403 if (count) { 404 u8 *buf = buffer; 405 406 do { 407 u8 x = __raw_readb(addr); 408 *buf++ = x; 409 } while (--count); 410 } 411 } 412 #endif 413 414 #ifndef readsw 415 #define readsw readsw 416 static inline void readsw(const volatile void __iomem *addr, void *buffer, 417 unsigned int count) 418 { 419 if (count) { 420 u16 *buf = buffer; 421 422 do { 423 u16 x = __raw_readw(addr); 424 *buf++ = x; 425 } while (--count); 426 } 427 } 428 #endif 429 430 #ifndef readsl 431 #define readsl readsl 432 static inline void readsl(const volatile void __iomem *addr, void *buffer, 433 unsigned int count) 434 { 435 if (count) { 436 u32 *buf = buffer; 437 438 do { 439 u32 x = __raw_readl(addr); 440 *buf++ = x; 441 } while (--count); 442 } 443 } 444 #endif 445 446 #ifdef CONFIG_64BIT 447 #ifndef readsq 448 #define readsq readsq 449 static inline void readsq(const volatile void __iomem *addr, void *buffer, 450 unsigned int count) 451 { 452 if (count) { 453 u64 *buf = buffer; 454 455 do { 456 u64 x = __raw_readq(addr); 457 *buf++ = x; 458 } while (--count); 459 } 460 } 461 #endif 462 #endif /* CONFIG_64BIT */ 463 464 #ifndef writesb 465 #define writesb writesb 466 static inline void writesb(volatile void __iomem *addr, const void *buffer, 467 unsigned int count) 468 { 469 if (count) { 470 const u8 *buf = buffer; 471 472 do { 473 __raw_writeb(*buf++, addr); 474 } while (--count); 475 } 476 } 477 #endif 478 479 #ifndef writesw 480 #define writesw writesw 481 static inline void writesw(volatile void __iomem *addr, const void *buffer, 482 unsigned int count) 483 { 484 if (count) { 485 const u16 *buf = buffer; 486 487 do { 488 __raw_writew(*buf++, addr); 489 } while (--count); 490 } 491 } 492 #endif 493 494 #ifndef writesl 495 #define writesl writesl 496 static inline void writesl(volatile void __iomem *addr, const void *buffer, 497 unsigned int count) 498 { 499 if (count) { 500 const u32 *buf = buffer; 501 502 do { 503 __raw_writel(*buf++, addr); 504 } while (--count); 505 } 506 } 507 #endif 508 509 #ifdef CONFIG_64BIT 510 #ifndef writesq 511 #define writesq writesq 512 static inline void writesq(volatile void __iomem *addr, const void *buffer, 513 unsigned int count) 514 { 515 if (count) { 516 const u64 *buf = buffer; 517 518 do { 519 __raw_writeq(*buf++, addr); 520 } while (--count); 521 } 522 } 523 #endif 524 #endif /* CONFIG_64BIT */ 525 526 #ifndef PCI_IOBASE 527 #define PCI_IOBASE ((void __iomem *)0) 528 #endif 529 530 #ifndef IO_SPACE_LIMIT 531 #define IO_SPACE_LIMIT 0xffff 532 #endif 533 534 /* 535 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 536 * implemented on hardware that needs an additional delay for I/O accesses to 537 * take effect. 538 */ 539 540 #if !defined(inb) && !defined(_inb) 541 #define _inb _inb 542 static inline u8 _inb(unsigned long addr) 543 { 544 u8 val; 545 546 __io_pbr(); 547 val = __raw_readb(PCI_IOBASE + addr); 548 __io_par(val); 549 return val; 550 } 551 #endif 552 553 #if !defined(inw) && !defined(_inw) 554 #define _inw _inw 555 static inline u16 _inw(unsigned long addr) 556 { 557 u16 val; 558 559 __io_pbr(); 560 val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); 561 __io_par(val); 562 return val; 563 } 564 #endif 565 566 #if !defined(inl) && !defined(_inl) 567 #define _inl _inl 568 static inline u32 _inl(unsigned long addr) 569 { 570 u32 val; 571 572 __io_pbr(); 573 val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); 574 __io_par(val); 575 return val; 576 } 577 #endif 578 579 #if !defined(outb) && !defined(_outb) 580 #define _outb _outb 581 static inline void _outb(u8 value, unsigned long addr) 582 { 583 __io_pbw(); 584 __raw_writeb(value, PCI_IOBASE + addr); 585 __io_paw(); 586 } 587 #endif 588 589 #if !defined(outw) && !defined(_outw) 590 #define _outw _outw 591 static inline void _outw(u16 value, unsigned long addr) 592 { 593 __io_pbw(); 594 __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); 595 __io_paw(); 596 } 597 #endif 598 599 #if !defined(outl) && !defined(_outl) 600 #define _outl _outl 601 static inline void _outl(u32 value, unsigned long addr) 602 { 603 __io_pbw(); 604 __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); 605 __io_paw(); 606 } 607 #endif 608 609 #include <linux/logic_pio.h> 610 611 #ifndef inb 612 #define inb _inb 613 #endif 614 615 #ifndef inw 616 #define inw _inw 617 #endif 618 619 #ifndef inl 620 #define inl _inl 621 #endif 622 623 #ifndef outb 624 #define outb _outb 625 #endif 626 627 #ifndef outw 628 #define outw _outw 629 #endif 630 631 #ifndef outl 632 #define outl _outl 633 #endif 634 635 #ifndef inb_p 636 #define inb_p inb_p 637 static inline u8 inb_p(unsigned long addr) 638 { 639 return inb(addr); 640 } 641 #endif 642 643 #ifndef inw_p 644 #define inw_p inw_p 645 static inline u16 inw_p(unsigned long addr) 646 { 647 return inw(addr); 648 } 649 #endif 650 651 #ifndef inl_p 652 #define inl_p inl_p 653 static inline u32 inl_p(unsigned long addr) 654 { 655 return inl(addr); 656 } 657 #endif 658 659 #ifndef outb_p 660 #define outb_p outb_p 661 static inline void outb_p(u8 value, unsigned long addr) 662 { 663 outb(value, addr); 664 } 665 #endif 666 667 #ifndef outw_p 668 #define outw_p outw_p 669 static inline void outw_p(u16 value, unsigned long addr) 670 { 671 outw(value, addr); 672 } 673 #endif 674 675 #ifndef outl_p 676 #define outl_p outl_p 677 static inline void outl_p(u32 value, unsigned long addr) 678 { 679 outl(value, addr); 680 } 681 #endif 682 683 /* 684 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 685 * single I/O port multiple times. 686 */ 687 688 #ifndef insb 689 #define insb insb 690 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 691 { 692 readsb(PCI_IOBASE + addr, buffer, count); 693 } 694 #endif 695 696 #ifndef insw 697 #define insw insw 698 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 699 { 700 readsw(PCI_IOBASE + addr, buffer, count); 701 } 702 #endif 703 704 #ifndef insl 705 #define insl insl 706 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 707 { 708 readsl(PCI_IOBASE + addr, buffer, count); 709 } 710 #endif 711 712 #ifndef outsb 713 #define outsb outsb 714 static inline void outsb(unsigned long addr, const void *buffer, 715 unsigned int count) 716 { 717 writesb(PCI_IOBASE + addr, buffer, count); 718 } 719 #endif 720 721 #ifndef outsw 722 #define outsw outsw 723 static inline void outsw(unsigned long addr, const void *buffer, 724 unsigned int count) 725 { 726 writesw(PCI_IOBASE + addr, buffer, count); 727 } 728 #endif 729 730 #ifndef outsl 731 #define outsl outsl 732 static inline void outsl(unsigned long addr, const void *buffer, 733 unsigned int count) 734 { 735 writesl(PCI_IOBASE + addr, buffer, count); 736 } 737 #endif 738 739 #ifndef insb_p 740 #define insb_p insb_p 741 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 742 { 743 insb(addr, buffer, count); 744 } 745 #endif 746 747 #ifndef insw_p 748 #define insw_p insw_p 749 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 750 { 751 insw(addr, buffer, count); 752 } 753 #endif 754 755 #ifndef insl_p 756 #define insl_p insl_p 757 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 758 { 759 insl(addr, buffer, count); 760 } 761 #endif 762 763 #ifndef outsb_p 764 #define outsb_p outsb_p 765 static inline void outsb_p(unsigned long addr, const void *buffer, 766 unsigned int count) 767 { 768 outsb(addr, buffer, count); 769 } 770 #endif 771 772 #ifndef outsw_p 773 #define outsw_p outsw_p 774 static inline void outsw_p(unsigned long addr, const void *buffer, 775 unsigned int count) 776 { 777 outsw(addr, buffer, count); 778 } 779 #endif 780 781 #ifndef outsl_p 782 #define outsl_p outsl_p 783 static inline void outsl_p(unsigned long addr, const void *buffer, 784 unsigned int count) 785 { 786 outsl(addr, buffer, count); 787 } 788 #endif 789 790 #ifndef CONFIG_GENERIC_IOMAP 791 #ifndef ioread8 792 #define ioread8 ioread8 793 static inline u8 ioread8(const volatile void __iomem *addr) 794 { 795 return readb(addr); 796 } 797 #endif 798 799 #ifndef ioread16 800 #define ioread16 ioread16 801 static inline u16 ioread16(const volatile void __iomem *addr) 802 { 803 return readw(addr); 804 } 805 #endif 806 807 #ifndef ioread32 808 #define ioread32 ioread32 809 static inline u32 ioread32(const volatile void __iomem *addr) 810 { 811 return readl(addr); 812 } 813 #endif 814 815 #ifdef CONFIG_64BIT 816 #ifndef ioread64 817 #define ioread64 ioread64 818 static inline u64 ioread64(const volatile void __iomem *addr) 819 { 820 return readq(addr); 821 } 822 #endif 823 #endif /* CONFIG_64BIT */ 824 825 #ifndef iowrite8 826 #define iowrite8 iowrite8 827 static inline void iowrite8(u8 value, volatile void __iomem *addr) 828 { 829 writeb(value, addr); 830 } 831 #endif 832 833 #ifndef iowrite16 834 #define iowrite16 iowrite16 835 static inline void iowrite16(u16 value, volatile void __iomem *addr) 836 { 837 writew(value, addr); 838 } 839 #endif 840 841 #ifndef iowrite32 842 #define iowrite32 iowrite32 843 static inline void iowrite32(u32 value, volatile void __iomem *addr) 844 { 845 writel(value, addr); 846 } 847 #endif 848 849 #ifdef CONFIG_64BIT 850 #ifndef iowrite64 851 #define iowrite64 iowrite64 852 static inline void iowrite64(u64 value, volatile void __iomem *addr) 853 { 854 writeq(value, addr); 855 } 856 #endif 857 #endif /* CONFIG_64BIT */ 858 859 #ifndef ioread16be 860 #define ioread16be ioread16be 861 static inline u16 ioread16be(const volatile void __iomem *addr) 862 { 863 return swab16(readw(addr)); 864 } 865 #endif 866 867 #ifndef ioread32be 868 #define ioread32be ioread32be 869 static inline u32 ioread32be(const volatile void __iomem *addr) 870 { 871 return swab32(readl(addr)); 872 } 873 #endif 874 875 #ifdef CONFIG_64BIT 876 #ifndef ioread64be 877 #define ioread64be ioread64be 878 static inline u64 ioread64be(const volatile void __iomem *addr) 879 { 880 return swab64(readq(addr)); 881 } 882 #endif 883 #endif /* CONFIG_64BIT */ 884 885 #ifndef iowrite16be 886 #define iowrite16be iowrite16be 887 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 888 { 889 writew(swab16(value), addr); 890 } 891 #endif 892 893 #ifndef iowrite32be 894 #define iowrite32be iowrite32be 895 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 896 { 897 writel(swab32(value), addr); 898 } 899 #endif 900 901 #ifdef CONFIG_64BIT 902 #ifndef iowrite64be 903 #define iowrite64be iowrite64be 904 static inline void iowrite64be(u64 value, volatile void __iomem *addr) 905 { 906 writeq(swab64(value), addr); 907 } 908 #endif 909 #endif /* CONFIG_64BIT */ 910 911 #ifndef ioread8_rep 912 #define ioread8_rep ioread8_rep 913 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 914 unsigned int count) 915 { 916 readsb(addr, buffer, count); 917 } 918 #endif 919 920 #ifndef ioread16_rep 921 #define ioread16_rep ioread16_rep 922 static inline void ioread16_rep(const volatile void __iomem *addr, 923 void *buffer, unsigned int count) 924 { 925 readsw(addr, buffer, count); 926 } 927 #endif 928 929 #ifndef ioread32_rep 930 #define ioread32_rep ioread32_rep 931 static inline void ioread32_rep(const volatile void __iomem *addr, 932 void *buffer, unsigned int count) 933 { 934 readsl(addr, buffer, count); 935 } 936 #endif 937 938 #ifdef CONFIG_64BIT 939 #ifndef ioread64_rep 940 #define ioread64_rep ioread64_rep 941 static inline void ioread64_rep(const volatile void __iomem *addr, 942 void *buffer, unsigned int count) 943 { 944 readsq(addr, buffer, count); 945 } 946 #endif 947 #endif /* CONFIG_64BIT */ 948 949 #ifndef iowrite8_rep 950 #define iowrite8_rep iowrite8_rep 951 static inline void iowrite8_rep(volatile void __iomem *addr, 952 const void *buffer, 953 unsigned int count) 954 { 955 writesb(addr, buffer, count); 956 } 957 #endif 958 959 #ifndef iowrite16_rep 960 #define iowrite16_rep iowrite16_rep 961 static inline void iowrite16_rep(volatile void __iomem *addr, 962 const void *buffer, 963 unsigned int count) 964 { 965 writesw(addr, buffer, count); 966 } 967 #endif 968 969 #ifndef iowrite32_rep 970 #define iowrite32_rep iowrite32_rep 971 static inline void iowrite32_rep(volatile void __iomem *addr, 972 const void *buffer, 973 unsigned int count) 974 { 975 writesl(addr, buffer, count); 976 } 977 #endif 978 979 #ifdef CONFIG_64BIT 980 #ifndef iowrite64_rep 981 #define iowrite64_rep iowrite64_rep 982 static inline void iowrite64_rep(volatile void __iomem *addr, 983 const void *buffer, 984 unsigned int count) 985 { 986 writesq(addr, buffer, count); 987 } 988 #endif 989 #endif /* CONFIG_64BIT */ 990 #endif /* CONFIG_GENERIC_IOMAP */ 991 992 #ifdef __KERNEL__ 993 994 #include <linux/vmalloc.h> 995 #define __io_virt(x) ((void __force *)(x)) 996 997 /* 998 * Change virtual addresses to physical addresses and vv. 999 * These are pretty trivial 1000 */ 1001 #ifndef virt_to_phys 1002 #define virt_to_phys virt_to_phys 1003 static inline unsigned long virt_to_phys(volatile void *address) 1004 { 1005 return __pa((unsigned long)address); 1006 } 1007 #endif 1008 1009 #ifndef phys_to_virt 1010 #define phys_to_virt phys_to_virt 1011 static inline void *phys_to_virt(unsigned long address) 1012 { 1013 return __va(address); 1014 } 1015 #endif 1016 1017 /** 1018 * DOC: ioremap() and ioremap_*() variants 1019 * 1020 * Architectures with an MMU are expected to provide ioremap() and iounmap() 1021 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide 1022 * a default nop-op implementation that expect that the physical address used 1023 * for MMIO are already marked as uncached, and can be used as kernel virtual 1024 * addresses. 1025 * 1026 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes 1027 * for specific drivers if the architecture choses to implement them. If they 1028 * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() 1029 * can provide stricter non-posted write semantics if the architecture 1030 * implements them. 1031 */ 1032 #ifndef CONFIG_MMU 1033 #ifndef ioremap 1034 #define ioremap ioremap 1035 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 1036 { 1037 return (void __iomem *)(unsigned long)offset; 1038 } 1039 #endif 1040 1041 #ifndef iounmap 1042 #define iounmap iounmap 1043 static inline void iounmap(volatile void __iomem *addr) 1044 { 1045 } 1046 #endif 1047 #elif defined(CONFIG_GENERIC_IOREMAP) 1048 #include <linux/pgtable.h> 1049 1050 /* 1051 * Arch code can implement the following two hooks when using GENERIC_IOREMAP 1052 * ioremap_allowed() return a bool, 1053 * - true means continue to remap 1054 * - false means skip remap and return directly 1055 * iounmap_allowed() return a bool, 1056 * - true means continue to vunmap 1057 * - false means skip vunmap and return directly 1058 */ 1059 #ifndef ioremap_allowed 1060 #define ioremap_allowed ioremap_allowed 1061 static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, 1062 unsigned long prot) 1063 { 1064 return true; 1065 } 1066 #endif 1067 1068 #ifndef iounmap_allowed 1069 #define iounmap_allowed iounmap_allowed 1070 static inline bool iounmap_allowed(void *addr) 1071 { 1072 return true; 1073 } 1074 #endif 1075 1076 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 1077 unsigned long prot); 1078 void iounmap(volatile void __iomem *addr); 1079 1080 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 1081 { 1082 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 1083 return ioremap_prot(addr, size, _PAGE_IOREMAP); 1084 } 1085 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ 1086 1087 #ifndef ioremap_wc 1088 #define ioremap_wc ioremap 1089 #endif 1090 1091 #ifndef ioremap_wt 1092 #define ioremap_wt ioremap 1093 #endif 1094 1095 /* 1096 * ioremap_uc is special in that we do require an explicit architecture 1097 * implementation. In general you do not want to use this function in a 1098 * driver and use plain ioremap, which is uncached by default. Similarly 1099 * architectures should not implement it unless they have a very good 1100 * reason. 1101 */ 1102 #ifndef ioremap_uc 1103 #define ioremap_uc ioremap_uc 1104 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 1105 { 1106 return NULL; 1107 } 1108 #endif 1109 1110 /* 1111 * ioremap_np needs an explicit architecture implementation, as it 1112 * requests stronger semantics than regular ioremap(). Portable drivers 1113 * should instead use one of the higher-level abstractions, like 1114 * devm_ioremap_resource(), to choose the correct variant for any given 1115 * device and bus. Portable drivers with a good reason to want non-posted 1116 * write semantics should always provide an ioremap() fallback in case 1117 * ioremap_np() is not available. 1118 */ 1119 #ifndef ioremap_np 1120 #define ioremap_np ioremap_np 1121 static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) 1122 { 1123 return NULL; 1124 } 1125 #endif 1126 1127 #ifdef CONFIG_HAS_IOPORT_MAP 1128 #ifndef CONFIG_GENERIC_IOMAP 1129 #ifndef ioport_map 1130 #define ioport_map ioport_map 1131 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 1132 { 1133 port &= IO_SPACE_LIMIT; 1134 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 1135 } 1136 #define ARCH_HAS_GENERIC_IOPORT_MAP 1137 #endif 1138 1139 #ifndef ioport_unmap 1140 #define ioport_unmap ioport_unmap 1141 static inline void ioport_unmap(void __iomem *p) 1142 { 1143 } 1144 #endif 1145 #else /* CONFIG_GENERIC_IOMAP */ 1146 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 1147 extern void ioport_unmap(void __iomem *p); 1148 #endif /* CONFIG_GENERIC_IOMAP */ 1149 #endif /* CONFIG_HAS_IOPORT_MAP */ 1150 1151 #ifndef CONFIG_GENERIC_IOMAP 1152 #ifndef pci_iounmap 1153 #define ARCH_WANTS_GENERIC_PCI_IOUNMAP 1154 #endif 1155 #endif 1156 1157 #ifndef xlate_dev_mem_ptr 1158 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 1159 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 1160 { 1161 return __va(addr); 1162 } 1163 #endif 1164 1165 #ifndef unxlate_dev_mem_ptr 1166 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 1167 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 1168 { 1169 } 1170 #endif 1171 1172 #ifndef memset_io 1173 #define memset_io memset_io 1174 /** 1175 * memset_io Set a range of I/O memory to a constant value 1176 * @addr: The beginning of the I/O-memory range to set 1177 * @val: The value to set the memory to 1178 * @count: The number of bytes to set 1179 * 1180 * Set a range of I/O memory to a given value. 1181 */ 1182 static inline void memset_io(volatile void __iomem *addr, int value, 1183 size_t size) 1184 { 1185 memset(__io_virt(addr), value, size); 1186 } 1187 #endif 1188 1189 #ifndef memcpy_fromio 1190 #define memcpy_fromio memcpy_fromio 1191 /** 1192 * memcpy_fromio Copy a block of data from I/O memory 1193 * @dst: The (RAM) destination for the copy 1194 * @src: The (I/O memory) source for the data 1195 * @count: The number of bytes to copy 1196 * 1197 * Copy a block of data from I/O memory. 1198 */ 1199 static inline void memcpy_fromio(void *buffer, 1200 const volatile void __iomem *addr, 1201 size_t size) 1202 { 1203 memcpy(buffer, __io_virt(addr), size); 1204 } 1205 #endif 1206 1207 #ifndef memcpy_toio 1208 #define memcpy_toio memcpy_toio 1209 /** 1210 * memcpy_toio Copy a block of data into I/O memory 1211 * @dst: The (I/O memory) destination for the copy 1212 * @src: The (RAM) source for the data 1213 * @count: The number of bytes to copy 1214 * 1215 * Copy a block of data to I/O memory. 1216 */ 1217 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 1218 size_t size) 1219 { 1220 memcpy(__io_virt(addr), buffer, size); 1221 } 1222 #endif 1223 1224 extern int devmem_is_allowed(unsigned long pfn); 1225 1226 #endif /* __KERNEL__ */ 1227 1228 #endif /* __ASM_GENERIC_IO_H */ 1229