1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 /* 5 * This file contains the definitions for the x86 IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated 11 * to (a) handle it all in a way that makes gcc able to optimize it 12 * as well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 */ 16 17 /* 18 * Thanks to James van Artsdalen for a better timing-fix than 19 * the two short jumps: using outb's to a nonexistent port seems 20 * to guarantee better timings even on fast machines. 21 * 22 * On the other hand, I'd like to be sure of a non-existent port: 23 * I feel a bit unsafe about using 0x80 (should be safe, though) 24 * 25 * Linus 26 */ 27 28 /* 29 * Bit simplified and optimized by Jan Hubicka 30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 31 * 32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 33 * isa_read[wl] and isa_write[wl] fixed 34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 35 */ 36 37 #define ARCH_HAS_IOREMAP_WC 38 39 #include <linux/string.h> 40 #include <linux/compiler.h> 41 #include <asm-generic/int-ll64.h> 42 #include <asm/page.h> 43 44 #define build_mmio_read(name, size, type, reg, barrier) \ 45 static inline type name(const volatile void __iomem *addr) \ 46 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 47 :"m" (*(volatile type __force *)addr) barrier); return ret; } 48 49 #define build_mmio_write(name, size, type, reg, barrier) \ 50 static inline void name(type val, volatile void __iomem *addr) \ 51 { asm volatile("mov" size " %0,%1": :reg (val), \ 52 "m" (*(volatile type __force *)addr) barrier); } 53 54 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 55 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 56 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 57 58 build_mmio_read(__readb, "b", unsigned char, "=q", ) 59 build_mmio_read(__readw, "w", unsigned short, "=r", ) 60 build_mmio_read(__readl, "l", unsigned int, "=r", ) 61 62 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 63 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 64 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 65 66 build_mmio_write(__writeb, "b", unsigned char, "q", ) 67 build_mmio_write(__writew, "w", unsigned short, "r", ) 68 build_mmio_write(__writel, "l", unsigned int, "r", ) 69 70 #define readb_relaxed(a) __readb(a) 71 #define readw_relaxed(a) __readw(a) 72 #define readl_relaxed(a) __readl(a) 73 #define __raw_readb __readb 74 #define __raw_readw __readw 75 #define __raw_readl __readl 76 77 #define __raw_writeb __writeb 78 #define __raw_writew __writew 79 #define __raw_writel __writel 80 81 #define mmiowb() barrier() 82 83 #ifdef CONFIG_X86_64 84 85 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 86 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 87 88 #else 89 90 static inline __u64 readq(const volatile void __iomem *addr) 91 { 92 const volatile u32 __iomem *p = addr; 93 u32 low, high; 94 95 low = readl(p); 96 high = readl(p + 1); 97 98 return low + ((u64)high << 32); 99 } 100 101 static inline void writeq(__u64 val, volatile void __iomem *addr) 102 { 103 writel(val, addr); 104 writel(val >> 32, addr+4); 105 } 106 107 #endif 108 109 #define readq_relaxed(a) readq(a) 110 111 #define __raw_readq(a) readq(a) 112 #define __raw_writeq(val, addr) writeq(val, addr) 113 114 /* Let people know that we have them */ 115 #define readq readq 116 #define writeq writeq 117 118 /** 119 * virt_to_phys - map virtual addresses to physical 120 * @address: address to remap 121 * 122 * The returned physical address is the physical (CPU) mapping for 123 * the memory address given. It is only valid to use this function on 124 * addresses directly mapped or allocated via kmalloc. 125 * 126 * This function does not give bus mappings for DMA transfers. In 127 * almost all conceivable cases a device driver should not be using 128 * this function 129 */ 130 131 static inline phys_addr_t virt_to_phys(volatile void *address) 132 { 133 return __pa(address); 134 } 135 136 /** 137 * phys_to_virt - map physical address to virtual 138 * @address: address to remap 139 * 140 * The returned virtual address is a current CPU mapping for 141 * the memory address given. It is only valid to use this function on 142 * addresses that have a kernel mapping 143 * 144 * This function does not handle bus mappings for DMA transfers. In 145 * almost all conceivable cases a device driver should not be using 146 * this function 147 */ 148 149 static inline void *phys_to_virt(phys_addr_t address) 150 { 151 return __va(address); 152 } 153 154 /* 155 * Change "struct page" to physical address. 156 */ 157 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 158 159 /* 160 * ISA I/O bus memory addresses are 1:1 with the physical address. 161 * However, we truncate the address to unsigned int to avoid undesirable 162 * promitions in legacy drivers. 163 */ 164 static inline unsigned int isa_virt_to_bus(volatile void *address) 165 { 166 return (unsigned int)virt_to_phys(address); 167 } 168 #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) 169 #define isa_bus_to_virt phys_to_virt 170 171 /* 172 * However PCI ones are not necessarily 1:1 and therefore these interfaces 173 * are forbidden in portable PCI drivers. 174 * 175 * Allow them on x86 for legacy drivers, though. 176 */ 177 #define virt_to_bus virt_to_phys 178 #define bus_to_virt phys_to_virt 179 180 /** 181 * ioremap - map bus memory into CPU space 182 * @offset: bus address of the memory 183 * @size: size of the resource to map 184 * 185 * ioremap performs a platform specific sequence of operations to 186 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 187 * writew/writel functions and the other mmio helpers. The returned 188 * address is not guaranteed to be usable directly as a virtual 189 * address. 190 * 191 * If the area you are trying to map is a PCI BAR you should have a 192 * look at pci_iomap(). 193 */ 194 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 195 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 196 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, 197 unsigned long prot_val); 198 199 /* 200 * The default ioremap() behavior is non-cached: 201 */ 202 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 203 { 204 return ioremap_nocache(offset, size); 205 } 206 207 extern void iounmap(volatile void __iomem *addr); 208 209 210 #ifdef __KERNEL__ 211 212 #include <asm-generic/iomap.h> 213 214 #include <linux/vmalloc.h> 215 216 /* 217 * Convert a virtual cached pointer to an uncached pointer 218 */ 219 #define xlate_dev_kmem_ptr(p) p 220 221 static inline void 222 memset_io(volatile void __iomem *addr, unsigned char val, size_t count) 223 { 224 memset((void __force *)addr, val, count); 225 } 226 227 static inline void 228 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) 229 { 230 memcpy(dst, (const void __force *)src, count); 231 } 232 233 static inline void 234 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) 235 { 236 memcpy((void __force *)dst, src, count); 237 } 238 239 /* 240 * ISA space is 'always mapped' on a typical x86 system, no need to 241 * explicitly ioremap() it. The fact that the ISA IO space is mapped 242 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 243 * are physical addresses. The following constant pointer can be 244 * used as the IO-area pointer (it can be iounmapped as well, so the 245 * analogy with PCI is quite large): 246 */ 247 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) 248 249 /* 250 * Cache management 251 * 252 * This needed for two cases 253 * 1. Out of order aware processors 254 * 2. Accidentally out of order processors (PPro errata #51) 255 */ 256 257 static inline void flush_write_buffers(void) 258 { 259 #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 260 asm volatile("lock; addl $0,0(%%esp)": : :"memory"); 261 #endif 262 } 263 264 #endif /* __KERNEL__ */ 265 266 extern void native_io_delay(void); 267 268 extern int io_delay_type; 269 extern void io_delay_init(void); 270 271 #if defined(CONFIG_PARAVIRT) 272 #include <asm/paravirt.h> 273 #else 274 275 static inline void slow_down_io(void) 276 { 277 native_io_delay(); 278 #ifdef REALLY_SLOW_IO 279 native_io_delay(); 280 native_io_delay(); 281 native_io_delay(); 282 #endif 283 } 284 285 #endif 286 287 #define BUILDIO(bwl, bw, type) \ 288 static inline void out##bwl(unsigned type value, int port) \ 289 { \ 290 asm volatile("out" #bwl " %" #bw "0, %w1" \ 291 : : "a"(value), "Nd"(port)); \ 292 } \ 293 \ 294 static inline unsigned type in##bwl(int port) \ 295 { \ 296 unsigned type value; \ 297 asm volatile("in" #bwl " %w1, %" #bw "0" \ 298 : "=a"(value) : "Nd"(port)); \ 299 return value; \ 300 } \ 301 \ 302 static inline void out##bwl##_p(unsigned type value, int port) \ 303 { \ 304 out##bwl(value, port); \ 305 slow_down_io(); \ 306 } \ 307 \ 308 static inline unsigned type in##bwl##_p(int port) \ 309 { \ 310 unsigned type value = in##bwl(port); \ 311 slow_down_io(); \ 312 return value; \ 313 } \ 314 \ 315 static inline void outs##bwl(int port, const void *addr, unsigned long count) \ 316 { \ 317 asm volatile("rep; outs" #bwl \ 318 : "+S"(addr), "+c"(count) : "d"(port)); \ 319 } \ 320 \ 321 static inline void ins##bwl(int port, void *addr, unsigned long count) \ 322 { \ 323 asm volatile("rep; ins" #bwl \ 324 : "+D"(addr), "+c"(count) : "d"(port)); \ 325 } 326 327 BUILDIO(b, b, char) 328 BUILDIO(w, w, short) 329 BUILDIO(l, , int) 330 331 extern void *xlate_dev_mem_ptr(unsigned long phys); 332 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 333 334 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 335 unsigned long prot_val); 336 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); 337 338 /* 339 * early_ioremap() and early_iounmap() are for temporary early boot-time 340 * mappings, before the real ioremap() is functional. 341 * A boot-time mapping is currently limited to at most 16 pages. 342 */ 343 extern void early_ioremap_init(void); 344 extern void early_ioremap_reset(void); 345 extern void __iomem *early_ioremap(resource_size_t phys_addr, 346 unsigned long size); 347 extern void __iomem *early_memremap(resource_size_t phys_addr, 348 unsigned long size); 349 extern void early_iounmap(void __iomem *addr, unsigned long size); 350 351 #define IO_SPACE_LIMIT 0xffff 352 353 #endif /* _ASM_X86_IO_H */ 354