1 /* 2 * linux/include/asm-arm/io.h 3 * 4 * Copyright (C) 1996-2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Modifications: 11 * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both 12 * constant addresses and variable addresses. 13 * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture 14 * specific IO header files. 15 * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. 16 * 04-Apr-1999 PJB Added check_signature. 17 * 12-Dec-1999 RMK More cleanups 18 * 18-Jun-2000 RMK Removed virt_to_* and friends definitions 19 */ 20 #ifndef __ASM_ARM_IO_H 21 #define __ASM_ARM_IO_H 22 23 #ifdef __KERNEL__ 24 25 #include <linux/types.h> 26 #include <asm/byteorder.h> 27 #include <asm/memory.h> 28 #if 0 /* XXX###XXX */ 29 #include <asm/arch/hardware.h> 30 #endif /* XXX###XXX */ 31 32 static inline void sync(void) 33 { 34 } 35 36 /* 37 * Given a physical address and a length, return a virtual address 38 * that can be used to access the memory range with the caching 39 * properties specified by "flags". 40 */ 41 #define MAP_NOCACHE (0) 42 #define MAP_WRCOMBINE (0) 43 #define MAP_WRBACK (0) 44 #define MAP_WRTHROUGH (0) 45 46 static inline void * 47 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 48 { 49 return (void *)((unsigned long)paddr); 50 } 51 52 /* 53 * Take down a mapping set up by map_physmem(). 54 */ 55 static inline void unmap_physmem(void *vaddr, unsigned long flags) 56 { 57 58 } 59 60 static inline phys_addr_t virt_to_phys(void * vaddr) 61 { 62 return (phys_addr_t)((unsigned long)vaddr); 63 } 64 65 /* 66 * Generic virtual read/write. Note that we don't support half-word 67 * read/writes. We define __arch_*[bl] here, and leave __arch_*w 68 * to the architecture specific code. 69 */ 70 #define __arch_getb(a) (*(volatile unsigned char *)(a)) 71 #define __arch_getw(a) (*(volatile unsigned short *)(a)) 72 #define __arch_getl(a) (*(volatile unsigned int *)(a)) 73 #define __arch_getq(a) (*(volatile unsigned long long *)(a)) 74 75 #define __arch_putb(v,a) (*(volatile unsigned char *)(a) = (v)) 76 #define __arch_putw(v,a) (*(volatile unsigned short *)(a) = (v)) 77 #define __arch_putl(v,a) (*(volatile unsigned int *)(a) = (v)) 78 #define __arch_putq(v,a) (*(volatile unsigned long long *)(a) = (v)) 79 80 static inline void __raw_writesb(unsigned long addr, const void *data, 81 int bytelen) 82 { 83 uint8_t *buf = (uint8_t *)data; 84 while(bytelen--) 85 __arch_putb(*buf++, addr); 86 } 87 88 static inline void __raw_writesw(unsigned long addr, const void *data, 89 int wordlen) 90 { 91 uint16_t *buf = (uint16_t *)data; 92 while(wordlen--) 93 __arch_putw(*buf++, addr); 94 } 95 96 static inline void __raw_writesl(unsigned long addr, const void *data, 97 int longlen) 98 { 99 uint32_t *buf = (uint32_t *)data; 100 while(longlen--) 101 __arch_putl(*buf++, addr); 102 } 103 104 static inline void __raw_readsb(unsigned long addr, void *data, int bytelen) 105 { 106 uint8_t *buf = (uint8_t *)data; 107 while(bytelen--) 108 *buf++ = __arch_getb(addr); 109 } 110 111 static inline void __raw_readsw(unsigned long addr, void *data, int wordlen) 112 { 113 uint16_t *buf = (uint16_t *)data; 114 while(wordlen--) 115 *buf++ = __arch_getw(addr); 116 } 117 118 static inline void __raw_readsl(unsigned long addr, void *data, int longlen) 119 { 120 uint32_t *buf = (uint32_t *)data; 121 while(longlen--) 122 *buf++ = __arch_getl(addr); 123 } 124 125 #define __raw_writeb(v,a) __arch_putb(v,a) 126 #define __raw_writew(v,a) __arch_putw(v,a) 127 #define __raw_writel(v,a) __arch_putl(v,a) 128 #define __raw_writeq(v,a) __arch_putq(v,a) 129 130 #define __raw_readb(a) __arch_getb(a) 131 #define __raw_readw(a) __arch_getw(a) 132 #define __raw_readl(a) __arch_getl(a) 133 #define __raw_readq(a) __arch_getq(a) 134 135 /* 136 * TODO: The kernel offers some more advanced versions of barriers, it might 137 * have some advantages to use them instead of the simple one here. 138 */ 139 #define mb() asm volatile("dsb sy" : : : "memory") 140 #define dmb() __asm__ __volatile__ ("" : : : "memory") 141 #define __iormb() dmb() 142 #define __iowmb() dmb() 143 144 #define writeb(v,c) ({ u8 __v = v; __iowmb(); __arch_putb(__v,c); __v; }) 145 #define writew(v,c) ({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; }) 146 #define writel(v,c) ({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; }) 147 #define writeq(v,c) ({ u64 __v = v; __iowmb(); __arch_putq(__v,c); __v; }) 148 149 #define readb(c) ({ u8 __v = __arch_getb(c); __iormb(); __v; }) 150 #define readw(c) ({ u16 __v = __arch_getw(c); __iormb(); __v; }) 151 #define readl(c) ({ u32 __v = __arch_getl(c); __iormb(); __v; }) 152 #define readq(c) ({ u64 __v = __arch_getq(c); __iormb(); __v; }) 153 154 /* 155 * The compiler seems to be incapable of optimising constants 156 * properly. Spell it out to the compiler in some cases. 157 * These are only valid for small values of "off" (< 1<<12) 158 */ 159 #define __raw_base_writeb(val,base,off) __arch_base_putb(val,base,off) 160 #define __raw_base_writew(val,base,off) __arch_base_putw(val,base,off) 161 #define __raw_base_writel(val,base,off) __arch_base_putl(val,base,off) 162 163 #define __raw_base_readb(base,off) __arch_base_getb(base,off) 164 #define __raw_base_readw(base,off) __arch_base_getw(base,off) 165 #define __raw_base_readl(base,off) __arch_base_getl(base,off) 166 167 /* 168 * Clear and set bits in one shot. These macros can be used to clear and 169 * set multiple bits in a register using a single call. These macros can 170 * also be used to set a multiple-bit bit pattern using a mask, by 171 * specifying the mask in the 'clear' parameter and the new bit pattern 172 * in the 'set' parameter. 173 */ 174 175 #define out_arch(type,endian,a,v) __raw_write##type(cpu_to_##endian(v),a) 176 #define in_arch(type,endian,a) endian##_to_cpu(__raw_read##type(a)) 177 178 #define out_le64(a,v) out_arch(q,le64,a,v) 179 #define out_le32(a,v) out_arch(l,le32,a,v) 180 #define out_le16(a,v) out_arch(w,le16,a,v) 181 182 #define in_le64(a) in_arch(q,le64,a) 183 #define in_le32(a) in_arch(l,le32,a) 184 #define in_le16(a) in_arch(w,le16,a) 185 186 #define out_be32(a,v) out_arch(l,be32,a,v) 187 #define out_be16(a,v) out_arch(w,be16,a,v) 188 189 #define in_be32(a) in_arch(l,be32,a) 190 #define in_be16(a) in_arch(w,be16,a) 191 192 #define out_8(a,v) __raw_writeb(v,a) 193 #define in_8(a) __raw_readb(a) 194 195 #define clrbits(type, addr, clear) \ 196 out_##type((addr), in_##type(addr) & ~(clear)) 197 198 #define setbits(type, addr, set) \ 199 out_##type((addr), in_##type(addr) | (set)) 200 201 #define clrsetbits(type, addr, clear, set) \ 202 out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) 203 204 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear) 205 #define setbits_be32(addr, set) setbits(be32, addr, set) 206 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) 207 208 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear) 209 #define setbits_le32(addr, set) setbits(le32, addr, set) 210 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) 211 212 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear) 213 #define setbits_be16(addr, set) setbits(be16, addr, set) 214 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) 215 216 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear) 217 #define setbits_le16(addr, set) setbits(le16, addr, set) 218 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) 219 220 #define clrbits_8(addr, clear) clrbits(8, addr, clear) 221 #define setbits_8(addr, set) setbits(8, addr, set) 222 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) 223 224 /* 225 * Now, pick up the machine-defined IO definitions 226 */ 227 #if 0 /* XXX###XXX */ 228 #include <asm/arch/io.h> 229 #endif /* XXX###XXX */ 230 231 /* 232 * IO port access primitives 233 * ------------------------- 234 * 235 * The ARM doesn't have special IO access instructions; all IO is memory 236 * mapped. Note that these are defined to perform little endian accesses 237 * only. Their primary purpose is to access PCI and ISA peripherals. 238 * 239 * Note that for a big endian machine, this implies that the following 240 * big endian mode connectivity is in place, as described by numerous 241 * ARM documents: 242 * 243 * PCI: D0-D7 D8-D15 D16-D23 D24-D31 244 * ARM: D24-D31 D16-D23 D8-D15 D0-D7 245 * 246 * The machine specific io.h include defines __io to translate an "IO" 247 * address to a memory address. 248 * 249 * Note that we prevent GCC re-ordering or caching values in expressions 250 * by introducing sequence points into the in*() definitions. Note that 251 * __raw_* do not guarantee this behaviour. 252 * 253 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. 254 */ 255 #ifdef __io 256 #define outb(v,p) __raw_writeb(v,__io(p)) 257 #define outw(v,p) __raw_writew(cpu_to_le16(v),__io(p)) 258 #define outl(v,p) __raw_writel(cpu_to_le32(v),__io(p)) 259 260 #define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) 261 #define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) 262 #define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) 263 264 #define outsb(p,d,l) __raw_writesb(__io(p),d,l) 265 #define outsw(p,d,l) __raw_writesw(__io(p),d,l) 266 #define outsl(p,d,l) __raw_writesl(__io(p),d,l) 267 268 #define insb(p,d,l) __raw_readsb(__io(p),d,l) 269 #define insw(p,d,l) __raw_readsw(__io(p),d,l) 270 #define insl(p,d,l) __raw_readsl(__io(p),d,l) 271 #endif 272 273 #define outb_p(val,port) outb((val),(port)) 274 #define outw_p(val,port) outw((val),(port)) 275 #define outl_p(val,port) outl((val),(port)) 276 #define inb_p(port) inb((port)) 277 #define inw_p(port) inw((port)) 278 #define inl_p(port) inl((port)) 279 280 #define outsb_p(port,from,len) outsb(port,from,len) 281 #define outsw_p(port,from,len) outsw(port,from,len) 282 #define outsl_p(port,from,len) outsl(port,from,len) 283 #define insb_p(port,to,len) insb(port,to,len) 284 #define insw_p(port,to,len) insw(port,to,len) 285 #define insl_p(port,to,len) insl(port,to,len) 286 287 #define writesl(a, d, s) __raw_writesl((unsigned long)a, d, s) 288 #define readsl(a, d, s) __raw_readsl((unsigned long)a, d, s) 289 #define writesw(a, d, s) __raw_writesw((unsigned long)a, d, s) 290 #define readsw(a, d, s) __raw_readsw((unsigned long)a, d, s) 291 #define writesb(a, d, s) __raw_writesb((unsigned long)a, d, s) 292 #define readsb(a, d, s) __raw_readsb((unsigned long)a, d, s) 293 294 /* 295 * DMA-consistent mapping functions. These allocate/free a region of 296 * uncached, unwrite-buffered mapped memory space for use with DMA 297 * devices. This is the "generic" version. The PCI specific version 298 * is in pci.h 299 */ 300 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); 301 extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle); 302 extern void consistent_sync(void *vaddr, size_t size, int rw); 303 304 /* 305 * String version of IO memory access ops: 306 */ 307 extern void _memcpy_fromio(void *, unsigned long, size_t); 308 extern void _memcpy_toio(unsigned long, const void *, size_t); 309 extern void _memset_io(unsigned long, int, size_t); 310 311 extern void __readwrite_bug(const char *fn); 312 313 /* 314 * If this architecture has PCI memory IO, then define the read/write 315 * macros. These should only be used with the cookie passed from 316 * ioremap. 317 */ 318 #ifdef __mem_pci 319 320 #define readb(c) ({ unsigned int __v = __raw_readb(__mem_pci(c)); __v; }) 321 #define readw(c) ({ unsigned int __v = le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) 322 #define readl(c) ({ unsigned int __v = le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) 323 324 #define writeb(v,c) __raw_writeb(v,__mem_pci(c)) 325 #define writew(v,c) __raw_writew(cpu_to_le16(v),__mem_pci(c)) 326 #define writel(v,c) __raw_writel(cpu_to_le32(v),__mem_pci(c)) 327 328 #define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) 329 #define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) 330 #define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) 331 332 #define eth_io_copy_and_sum(s,c,l,b) \ 333 eth_copy_and_sum((s),__mem_pci(c),(l),(b)) 334 335 static inline int 336 check_signature(unsigned long io_addr, const unsigned char *signature, 337 int length) 338 { 339 int retval = 0; 340 do { 341 if (readb(io_addr) != *signature) 342 goto out; 343 io_addr++; 344 signature++; 345 length--; 346 } while (length); 347 retval = 1; 348 out: 349 return retval; 350 } 351 352 #else 353 #define memset_io(a, b, c) memset((void *)(a), (b), (c)) 354 #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 355 #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 356 357 #if !defined(readb) 358 359 #define readb(addr) (__readwrite_bug("readb"),0) 360 #define readw(addr) (__readwrite_bug("readw"),0) 361 #define readl(addr) (__readwrite_bug("readl"),0) 362 #define writeb(v,addr) __readwrite_bug("writeb") 363 #define writew(v,addr) __readwrite_bug("writew") 364 #define writel(v,addr) __readwrite_bug("writel") 365 366 #define eth_io_copy_and_sum(a,b,c,d) __readwrite_bug("eth_io_copy_and_sum") 367 368 #define check_signature(io,sig,len) (0) 369 370 #endif 371 #endif /* __mem_pci */ 372 373 /* 374 * If this architecture has ISA IO, then define the isa_read/isa_write 375 * macros. 376 */ 377 #ifdef __mem_isa 378 379 #define isa_readb(addr) __raw_readb(__mem_isa(addr)) 380 #define isa_readw(addr) __raw_readw(__mem_isa(addr)) 381 #define isa_readl(addr) __raw_readl(__mem_isa(addr)) 382 #define isa_writeb(val,addr) __raw_writeb(val,__mem_isa(addr)) 383 #define isa_writew(val,addr) __raw_writew(val,__mem_isa(addr)) 384 #define isa_writel(val,addr) __raw_writel(val,__mem_isa(addr)) 385 #define isa_memset_io(a,b,c) _memset_io(__mem_isa(a),(b),(c)) 386 #define isa_memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_isa(b),(c)) 387 #define isa_memcpy_toio(a,b,c) _memcpy_toio(__mem_isa((a)),(b),(c)) 388 389 #define isa_eth_io_copy_and_sum(a,b,c,d) \ 390 eth_copy_and_sum((a),__mem_isa(b),(c),(d)) 391 392 static inline int 393 isa_check_signature(unsigned long io_addr, const unsigned char *signature, 394 int length) 395 { 396 int retval = 0; 397 do { 398 if (isa_readb(io_addr) != *signature) 399 goto out; 400 io_addr++; 401 signature++; 402 length--; 403 } while (length); 404 retval = 1; 405 out: 406 return retval; 407 } 408 409 #else /* __mem_isa */ 410 411 #define isa_readb(addr) (__readwrite_bug("isa_readb"),0) 412 #define isa_readw(addr) (__readwrite_bug("isa_readw"),0) 413 #define isa_readl(addr) (__readwrite_bug("isa_readl"),0) 414 #define isa_writeb(val,addr) __readwrite_bug("isa_writeb") 415 #define isa_writew(val,addr) __readwrite_bug("isa_writew") 416 #define isa_writel(val,addr) __readwrite_bug("isa_writel") 417 #define isa_memset_io(a,b,c) __readwrite_bug("isa_memset_io") 418 #define isa_memcpy_fromio(a,b,c) __readwrite_bug("isa_memcpy_fromio") 419 #define isa_memcpy_toio(a,b,c) __readwrite_bug("isa_memcpy_toio") 420 421 #define isa_eth_io_copy_and_sum(a,b,c,d) \ 422 __readwrite_bug("isa_eth_io_copy_and_sum") 423 424 #define isa_check_signature(io,sig,len) (0) 425 426 #endif /* __mem_isa */ 427 #endif /* __KERNEL__ */ 428 429 #include <iotrace.h> 430 431 #endif /* __ASM_ARM_IO_H */ 432