1 #ifndef _ASM_IO_H 2 #define _ASM_IO_H 3 4 #include <linux/compiler.h> 5 6 /* 7 * This file contains the definitions for the x86 IO instructions 8 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 9 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 10 * versions of the single-IO instructions (inb_p/inw_p/..). 11 * 12 * This file is not meant to be obfuscating: it's just complicated 13 * to (a) handle it all in a way that makes gcc able to optimize it 14 * as well as possible and (b) trying to avoid writing the same thing 15 * over and over again with slight variations and possibly making a 16 * mistake somewhere. 17 */ 18 19 /* 20 * Thanks to James van Artsdalen for a better timing-fix than 21 * the two short jumps: using outb's to a nonexistent port seems 22 * to guarantee better timings even on fast machines. 23 * 24 * On the other hand, I'd like to be sure of a non-existent port: 25 * I feel a bit unsafe about using 0x80 (should be safe, though) 26 * 27 * Linus 28 */ 29 30 /* 31 * Bit simplified and optimized by Jan Hubicka 32 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 33 * 34 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 35 * isa_read[wl] and isa_write[wl] fixed 36 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 37 */ 38 39 #define IO_SPACE_LIMIT 0xffff 40 41 #include <asm/types.h> 42 43 44 #ifdef __KERNEL__ 45 46 47 /* 48 * readX/writeX() are used to access memory mapped devices. On some 49 * architectures the memory mapped IO stuff needs to be accessed 50 * differently. On the x86 architecture, we just read/write the 51 * memory location directly. 52 */ 53 54 #define readb(addr) (*(volatile unsigned char *) (addr)) 55 #define readw(addr) (*(volatile unsigned short *) (addr)) 56 #define readl(addr) (*(volatile unsigned int *) (addr)) 57 #define __raw_readb readb 58 #define __raw_readw readw 59 #define __raw_readl readl 60 61 #define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b)) 62 #define writew(b,addr) (*(volatile unsigned short *) (addr) = (b)) 63 #define writel(b,addr) (*(volatile unsigned int *) (addr) = (b)) 64 #define __raw_writeb writeb 65 #define __raw_writew writew 66 #define __raw_writel writel 67 68 #define memset_io(a,b,c) memset((a),(b),(c)) 69 #define memcpy_fromio(a,b,c) memcpy((a),(b),(c)) 70 #define memcpy_toio(a,b,c) memcpy((a),(b),(c)) 71 72 #define write_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) 73 #define read_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) 74 75 #define write_le64(a, v) write_arch(q, le64, a, v) 76 #define write_le32(a, v) write_arch(l, le32, a, v) 77 #define write_le16(a, v) write_arch(w, le16, a, v) 78 79 #define read_le64(a) read_arch(q, le64, a) 80 #define read_le32(a) read_arch(l, le32, a) 81 #define read_le16(a) read_arch(w, le16, a) 82 83 #define write_be32(a, v) write_arch(l, be32, a, v) 84 #define write_be16(a, v) write_arch(w, be16, a, v) 85 86 #define read_be32(a) read_arch(l, be32, a) 87 #define read_be16(a) read_arch(w, be16, a) 88 89 #define write_8(a, v) __raw_writeb(v, a) 90 #define read_8(a) __raw_readb(a) 91 92 #define clrbits(type, addr, clear) \ 93 write_##type((addr), read_##type(addr) & ~(clear)) 94 95 #define setbits(type, addr, set) \ 96 write_##type((addr), read_##type(addr) | (set)) 97 98 #define clrsetbits(type, addr, clear, set) \ 99 write_##type((addr), (read_##type(addr) & ~(clear)) | (set)) 100 101 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear) 102 #define setbits_be32(addr, set) setbits(be32, addr, set) 103 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) 104 105 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear) 106 #define setbits_le32(addr, set) setbits(le32, addr, set) 107 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) 108 109 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear) 110 #define setbits_be16(addr, set) setbits(be16, addr, set) 111 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) 112 113 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear) 114 #define setbits_le16(addr, set) setbits(le16, addr, set) 115 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) 116 117 #define clrbits_8(addr, clear) clrbits(8, addr, clear) 118 #define setbits_8(addr, set) setbits(8, addr, set) 119 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) 120 121 /* 122 * ISA space is 'always mapped' on a typical x86 system, no need to 123 * explicitly ioremap() it. The fact that the ISA IO space is mapped 124 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 125 * are physical addresses. The following constant pointer can be 126 * used as the IO-area pointer (it can be iounmapped as well, so the 127 * analogy with PCI is quite large): 128 */ 129 #define isa_readb(a) readb((a)) 130 #define isa_readw(a) readw((a)) 131 #define isa_readl(a) readl((a)) 132 #define isa_writeb(b,a) writeb(b,(a)) 133 #define isa_writew(w,a) writew(w,(a)) 134 #define isa_writel(l,a) writel(l,(a)) 135 #define isa_memset_io(a,b,c) memset_io((a),(b),(c)) 136 #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),(b),(c)) 137 #define isa_memcpy_toio(a,b,c) memcpy_toio((a),(b),(c)) 138 139 140 static inline int check_signature(unsigned long io_addr, 141 const unsigned char *signature, int length) 142 { 143 int retval = 0; 144 do { 145 if (readb(io_addr) != *signature) 146 goto out; 147 io_addr++; 148 signature++; 149 length--; 150 } while (length); 151 retval = 1; 152 out: 153 return retval; 154 } 155 156 /** 157 * isa_check_signature - find BIOS signatures 158 * @io_addr: mmio address to check 159 * @signature: signature block 160 * @length: length of signature 161 * 162 * Perform a signature comparison with the ISA mmio address io_addr. 163 * Returns 1 on a match. 164 * 165 * This function is deprecated. New drivers should use ioremap and 166 * check_signature. 167 */ 168 169 170 static inline int isa_check_signature(unsigned long io_addr, 171 const unsigned char *signature, int length) 172 { 173 int retval = 0; 174 do { 175 if (isa_readb(io_addr) != *signature) 176 goto out; 177 io_addr++; 178 signature++; 179 length--; 180 } while (length); 181 retval = 1; 182 out: 183 return retval; 184 } 185 186 #endif /* __KERNEL__ */ 187 188 #ifdef SLOW_IO_BY_JUMPING 189 #define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:" 190 #else 191 #define __SLOW_DOWN_IO "\noutb %%al,$0xed" 192 #endif 193 194 #ifdef REALLY_SLOW_IO 195 #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 196 #else 197 #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO 198 #endif 199 200 201 /* 202 * Talk about misusing macros.. 203 */ 204 #define __OUT1(s,x) \ 205 static inline void out##s(unsigned x value, unsigned short port) { 206 207 #define __OUT2(s,s1,s2) \ 208 __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" 209 210 211 #define __OUT(s,s1,x) \ 212 __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ 213 __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} 214 215 #define __IN1(s) \ 216 static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; 217 218 #define __IN2(s,s1,s2) \ 219 __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" 220 221 #define __IN(s,s1,i...) \ 222 __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 223 __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } 224 225 #define __INS(s) \ 226 static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ 227 { __asm__ __volatile__ ("rep ; ins" #s \ 228 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 229 230 #define __OUTS(s) \ 231 static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ 232 { __asm__ __volatile__ ("rep ; outs" #s \ 233 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 234 235 #define RETURN_TYPE unsigned char 236 __IN(b,"") 237 #undef RETURN_TYPE 238 #define RETURN_TYPE unsigned short 239 __IN(w,"") 240 #undef RETURN_TYPE 241 #define RETURN_TYPE unsigned int 242 __IN(l,"") 243 #undef RETURN_TYPE 244 245 __OUT(b,"b",char) 246 __OUT(w,"w",short) 247 __OUT(l,,int) 248 249 __INS(b) 250 __INS(w) 251 __INS(l) 252 253 __OUTS(b) 254 __OUTS(w) 255 __OUTS(l) 256 257 static inline void sync(void) 258 { 259 } 260 261 /* 262 * Given a physical address and a length, return a virtual address 263 * that can be used to access the memory range with the caching 264 * properties specified by "flags". 265 */ 266 #define MAP_NOCACHE (0) 267 #define MAP_WRCOMBINE (0) 268 #define MAP_WRBACK (0) 269 #define MAP_WRTHROUGH (0) 270 271 static inline void * 272 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 273 { 274 return (void *)(uintptr_t)paddr; 275 } 276 277 /* 278 * Take down a mapping set up by map_physmem(). 279 */ 280 static inline void unmap_physmem(void *vaddr, unsigned long flags) 281 { 282 283 } 284 285 static inline phys_addr_t virt_to_phys(void * vaddr) 286 { 287 return (phys_addr_t)(uintptr_t)(vaddr); 288 } 289 290 /* 291 * TODO: The kernel offers some more advanced versions of barriers, it might 292 * have some advantages to use them instead of the simple one here. 293 */ 294 #define dmb() __asm__ __volatile__ ("" : : : "memory") 295 #define __iormb() dmb() 296 #define __iowmb() dmb() 297 298 #endif 299