1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 #define ARCH_HAS_IOREMAP_WC 5 6 #include <linux/compiler.h> 7 #include <asm-generic/int-ll64.h> 8 #include <asm/page.h> 9 10 #define build_mmio_read(name, size, type, reg, barrier) \ 11 static inline type name(const volatile void __iomem *addr) \ 12 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 13 :"m" (*(volatile type __force *)addr) barrier); return ret; } 14 15 #define build_mmio_write(name, size, type, reg, barrier) \ 16 static inline void name(type val, volatile void __iomem *addr) \ 17 { asm volatile("mov" size " %0,%1": :reg (val), \ 18 "m" (*(volatile type __force *)addr) barrier); } 19 20 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 21 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 22 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 23 24 build_mmio_read(__readb, "b", unsigned char, "=q", ) 25 build_mmio_read(__readw, "w", unsigned short, "=r", ) 26 build_mmio_read(__readl, "l", unsigned int, "=r", ) 27 28 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 29 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 30 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 31 32 build_mmio_write(__writeb, "b", unsigned char, "q", ) 33 build_mmio_write(__writew, "w", unsigned short, "r", ) 34 build_mmio_write(__writel, "l", unsigned int, "r", ) 35 36 #define readb_relaxed(a) __readb(a) 37 #define readw_relaxed(a) __readw(a) 38 #define readl_relaxed(a) __readl(a) 39 #define __raw_readb __readb 40 #define __raw_readw __readw 41 #define __raw_readl __readl 42 43 #define __raw_writeb __writeb 44 #define __raw_writew __writew 45 #define __raw_writel __writel 46 47 #define mmiowb() barrier() 48 49 #ifdef CONFIG_X86_64 50 51 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 52 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 53 54 #else 55 56 static inline __u64 readq(const volatile void __iomem *addr) 57 { 58 const volatile u32 __iomem *p = addr; 59 u32 low, high; 60 61 low = readl(p); 62 high = readl(p + 1); 63 64 return low + ((u64)high << 32); 65 } 66 67 static inline void writeq(__u64 val, volatile void __iomem *addr) 68 { 69 writel(val, addr); 70 writel(val >> 32, addr+4); 71 } 72 73 #endif 74 75 #define readq_relaxed(a) readq(a) 76 77 #define __raw_readq(a) readq(a) 78 #define __raw_writeq(val, addr) writeq(val, addr) 79 80 /* Let people know that we have them */ 81 #define readq readq 82 #define writeq writeq 83 84 /** 85 * virt_to_phys - map virtual addresses to physical 86 * @address: address to remap 87 * 88 * The returned physical address is the physical (CPU) mapping for 89 * the memory address given. It is only valid to use this function on 90 * addresses directly mapped or allocated via kmalloc. 91 * 92 * This function does not give bus mappings for DMA transfers. In 93 * almost all conceivable cases a device driver should not be using 94 * this function 95 */ 96 97 static inline phys_addr_t virt_to_phys(volatile void *address) 98 { 99 return __pa(address); 100 } 101 102 /** 103 * phys_to_virt - map physical address to virtual 104 * @address: address to remap 105 * 106 * The returned virtual address is a current CPU mapping for 107 * the memory address given. It is only valid to use this function on 108 * addresses that have a kernel mapping 109 * 110 * This function does not handle bus mappings for DMA transfers. In 111 * almost all conceivable cases a device driver should not be using 112 * this function 113 */ 114 115 static inline void *phys_to_virt(phys_addr_t address) 116 { 117 return __va(address); 118 } 119 120 /* 121 * Change "struct page" to physical address. 122 */ 123 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 124 125 /* 126 * ISA I/O bus memory addresses are 1:1 with the physical address. 127 * However, we truncate the address to unsigned int to avoid undesirable 128 * promitions in legacy drivers. 129 */ 130 static inline unsigned int isa_virt_to_bus(volatile void *address) 131 { 132 return (unsigned int)virt_to_phys(address); 133 } 134 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 135 #define isa_bus_to_virt phys_to_virt 136 137 /* 138 * However PCI ones are not necessarily 1:1 and therefore these interfaces 139 * are forbidden in portable PCI drivers. 140 * 141 * Allow them on x86 for legacy drivers, though. 142 */ 143 #define virt_to_bus virt_to_phys 144 #define bus_to_virt phys_to_virt 145 146 /** 147 * ioremap - map bus memory into CPU space 148 * @offset: bus address of the memory 149 * @size: size of the resource to map 150 * 151 * ioremap performs a platform specific sequence of operations to 152 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 153 * writew/writel functions and the other mmio helpers. The returned 154 * address is not guaranteed to be usable directly as a virtual 155 * address. 156 * 157 * If the area you are trying to map is a PCI BAR you should have a 158 * look at pci_iomap(). 159 */ 160 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 161 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 162 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 163 unsigned long prot_val); 164 165 /* 166 * The default ioremap() behavior is non-cached: 167 */ 168 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 169 { 170 return ioremap_nocache(offset, size); 171 } 172 173 extern void iounmap(volatile void __iomem *addr); 174 175 176 #ifdef CONFIG_X86_32 177 # include "io_32.h" 178 #else 179 # include "io_64.h" 180 #endif 181 182 extern void *xlate_dev_mem_ptr(unsigned long phys); 183 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 184 185 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 186 unsigned long prot_val); 187 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 188 189 /* 190 * early_ioremap() and early_iounmap() are for temporary early boot-time 191 * mappings, before the real ioremap() is functional. 192 * A boot-time mapping is currently limited to at most 16 pages. 193 */ 194 extern void early_ioremap_init(void); 195 extern void early_ioremap_reset(void); 196 extern void __iomem *early_ioremap(resource_size_t phys_addr, 197 unsigned long size); 198 extern void __iomem *early_memremap(resource_size_t phys_addr, 199 unsigned long size); 200 extern void early_iounmap(void __iomem *addr, unsigned long size); 201 202 #define IO_SPACE_LIMIT 0xffff 203 204 #endif /* _ASM_X86_IO_H */ 205