1 /* 2 * linux/include/asm-arm/io.h 3 * 4 * Copyright (C) 1996-2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Modifications: 11 * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both 12 * constant addresses and variable addresses. 13 * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture 14 * specific IO header files. 15 * 27-Mar-1999 PJB Second parameter of memcpy_toio is const.. 16 * 04-Apr-1999 PJB Added check_signature. 17 * 12-Dec-1999 RMK More cleanups 18 * 18-Jun-2000 RMK Removed virt_to_* and friends definitions 19 */ 20 #ifndef __ASM_ARM_IO_H 21 #define __ASM_ARM_IO_H 22 23 #ifdef __KERNEL__ 24 25 #include <linux/types.h> 26 #include <asm/byteorder.h> 27 #include <asm/memory.h> 28 #if 0 /* XXX###XXX */ 29 #include <asm/arch/hardware.h> 30 #endif /* XXX###XXX */ 31 32 static inline void sync(void) 33 { 34 } 35 36 /* 37 * Given a physical address and a length, return a virtual address 38 * that can be used to access the memory range with the caching 39 * properties specified by "flags". 40 */ 41 #define MAP_NOCACHE (0) 42 #define MAP_WRCOMBINE (0) 43 #define MAP_WRBACK (0) 44 #define MAP_WRTHROUGH (0) 45 46 static inline void * 47 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 48 { 49 return (void *)((unsigned long)paddr); 50 } 51 52 /* 53 * Take down a mapping set up by map_physmem(). 54 */ 55 static inline void unmap_physmem(void *vaddr, unsigned long flags) 56 { 57 58 } 59 60 static inline phys_addr_t virt_to_phys(void * vaddr) 61 { 62 return (phys_addr_t)((unsigned long)vaddr); 63 } 64 65 /* 66 * Generic virtual read/write. Note that we don't support half-word 67 * read/writes. We define __arch_*[bl] here, and leave __arch_*w 68 * to the architecture specific code. 69 */ 70 #define __arch_getb(a) (*(volatile unsigned char *)(a)) 71 #define __arch_getw(a) (*(volatile unsigned short *)(a)) 72 #define __arch_getl(a) (*(volatile unsigned int *)(a)) 73 #define __arch_getq(a) (*(volatile unsigned long long *)(a)) 74 75 #define __arch_putb(v,a) (*(volatile unsigned char *)(a) = (v)) 76 #define __arch_putw(v,a) (*(volatile unsigned short *)(a) = (v)) 77 #define __arch_putl(v,a) (*(volatile unsigned int *)(a) = (v)) 78 #define __arch_putq(v,a) (*(volatile unsigned long long *)(a) = (v)) 79 80 static inline void __raw_writesb(unsigned long addr, const void *data, 81 int bytelen) 82 { 83 uint8_t *buf = (uint8_t *)data; 84 while(bytelen--) 85 __arch_putb(*buf++, addr); 86 } 87 88 static inline void __raw_writesw(unsigned long addr, const void *data, 89 int wordlen) 90 { 91 uint16_t *buf = (uint16_t *)data; 92 while(wordlen--) 93 __arch_putw(*buf++, addr); 94 } 95 96 static inline void __raw_writesl(unsigned long addr, const void *data, 97 int longlen) 98 { 99 uint32_t *buf = (uint32_t *)data; 100 while(longlen--) 101 __arch_putl(*buf++, addr); 102 } 103 104 static inline void __raw_readsb(unsigned long addr, void *data, int bytelen) 105 { 106 uint8_t *buf = (uint8_t *)data; 107 while(bytelen--) 108 *buf++ = __arch_getb(addr); 109 } 110 111 static inline void __raw_readsw(unsigned long addr, void *data, int wordlen) 112 { 113 uint16_t *buf = (uint16_t *)data; 114 while(wordlen--) 115 *buf++ = __arch_getw(addr); 116 } 117 118 static inline void __raw_readsl(unsigned long addr, void *data, int longlen) 119 { 120 uint32_t *buf = (uint32_t *)data; 121 while(longlen--) 122 *buf++ = __arch_getl(addr); 123 } 124 125 #define __raw_writeb(v,a) __arch_putb(v,a) 126 #define __raw_writew(v,a) __arch_putw(v,a) 127 #define __raw_writel(v,a) __arch_putl(v,a) 128 #define __raw_writeq(v,a) __arch_putq(v,a) 129 130 #define __raw_readb(a) __arch_getb(a) 131 #define __raw_readw(a) __arch_getw(a) 132 #define __raw_readl(a) __arch_getl(a) 133 #define __raw_readq(a) __arch_getq(a) 134 135 /* 136 * TODO: The kernel offers some more advanced versions of barriers, it might 137 * have some advantages to use them instead of the simple one here. 138 */ 139 #define mb() asm volatile("dsb sy" : : : "memory") 140 #define dmb() __asm__ __volatile__ ("" : : : "memory") 141 #define __iormb() dmb() 142 #define __iowmb() dmb() 143 144 #define writeb(v,c) ({ u8 __v = v; __iowmb(); __arch_putb(__v,c); __v; }) 145 #define writew(v,c) ({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; }) 146 #define writel(v,c) ({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; }) 147 #define writeq(v,c) ({ u64 __v = v; __iowmb(); __arch_putq(__v,c); __v; }) 148 149 #define readb(c) ({ u8 __v = __arch_getb(c); __iormb(); __v; }) 150 #define readw(c) ({ u16 __v = __arch_getw(c); __iormb(); __v; }) 151 #define readl(c) ({ u32 __v = __arch_getl(c); __iormb(); __v; }) 152 #define readq(c) ({ u64 __v = __arch_getq(c); __iormb(); __v; }) 153 154 /* 155 * The compiler seems to be incapable of optimising constants 156 * properly. Spell it out to the compiler in some cases. 157 * These are only valid for small values of "off" (< 1<<12) 158 */ 159 #define __raw_base_writeb(val,base,off) __arch_base_putb(val,base,off) 160 #define __raw_base_writew(val,base,off) __arch_base_putw(val,base,off) 161 #define __raw_base_writel(val,base,off) __arch_base_putl(val,base,off) 162 163 #define __raw_base_readb(base,off) __arch_base_getb(base,off) 164 #define __raw_base_readw(base,off) __arch_base_getw(base,off) 165 #define __raw_base_readl(base,off) __arch_base_getl(base,off) 166 167 /* 168 * Clear and set bits in one shot. These macros can be used to clear and 169 * set multiple bits in a register using a single call. These macros can 170 * also be used to set a multiple-bit bit pattern using a mask, by 171 * specifying the mask in the 'clear' parameter and the new bit pattern 172 * in the 'set' parameter. 173 */ 174 175 #define out_arch(type,endian,a,v) __raw_write##type(cpu_to_##endian(v),a) 176 #define in_arch(type,endian,a) endian##_to_cpu(__raw_read##type(a)) 177 178 #define out_le64(a,v) out_arch(q,le64,a,v) 179 #define out_le32(a,v) out_arch(l,le32,a,v) 180 #define out_le16(a,v) out_arch(w,le16,a,v) 181 182 #define in_le64(a) in_arch(q,le64,a) 183 #define in_le32(a) in_arch(l,le32,a) 184 #define in_le16(a) in_arch(w,le16,a) 185 186 #define out_be32(a,v) out_arch(l,be32,a,v) 187 #define out_be16(a,v) out_arch(w,be16,a,v) 188 189 #define in_be32(a) in_arch(l,be32,a) 190 #define in_be16(a) in_arch(w,be16,a) 191 192 #define out_8(a,v) __raw_writeb(v,a) 193 #define in_8(a) __raw_readb(a) 194 195 #define clrbits(type, addr, clear) \ 196 out_##type((addr), in_##type(addr) & ~(clear)) 197 198 #define setbits(type, addr, set) \ 199 out_##type((addr), in_##type(addr) | (set)) 200 201 #define clrsetbits(type, addr, clear, set) \ 202 out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) 203 204 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear) 205 #define setbits_be32(addr, set) setbits(be32, addr, set) 206 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) 207 208 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear) 209 #define setbits_le32(addr, set) setbits(le32, addr, set) 210 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) 211 212 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear) 213 #define setbits_be16(addr, set) setbits(be16, addr, set) 214 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) 215 216 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear) 217 #define setbits_le16(addr, set) setbits(le16, addr, set) 218 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) 219 220 #define clrbits_8(addr, clear) clrbits(8, addr, clear) 221 #define setbits_8(addr, set) setbits(8, addr, set) 222 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) 223 224 /* 225 * Now, pick up the machine-defined IO definitions 226 */ 227 #if 0 /* XXX###XXX */ 228 #include <asm/arch/io.h> 229 #endif /* XXX###XXX */ 230 231 /* 232 * IO port access primitives 233 * ------------------------- 234 * 235 * The ARM doesn't have special IO access instructions; all IO is memory 236 * mapped. Note that these are defined to perform little endian accesses 237 * only. Their primary purpose is to access PCI and ISA peripherals. 238 * 239 * Note that for a big endian machine, this implies that the following 240 * big endian mode connectivity is in place, as described by numerous 241 * ARM documents: 242 * 243 * PCI: D0-D7 D8-D15 D16-D23 D24-D31 244 * ARM: D24-D31 D16-D23 D8-D15 D0-D7 245 * 246 * The machine specific io.h include defines __io to translate an "IO" 247 * address to a memory address. 248 * 249 * Note that we prevent GCC re-ordering or caching values in expressions 250 * by introducing sequence points into the in*() definitions. Note that 251 * __raw_* do not guarantee this behaviour. 252 * 253 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. 254 */ 255 #ifdef __io 256 #define outb(v,p) __raw_writeb(v,__io(p)) 257 #define outw(v,p) __raw_writew(cpu_to_le16(v),__io(p)) 258 #define outl(v,p) __raw_writel(cpu_to_le32(v),__io(p)) 259 260 #define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) 261 #define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) 262 #define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) 263 264 #define outsb(p,d,l) __raw_writesb(__io(p),d,l) 265 #define outsw(p,d,l) __raw_writesw(__io(p),d,l) 266 #define outsl(p,d,l) __raw_writesl(__io(p),d,l) 267 268 #define insb(p,d,l) __raw_readsb(__io(p),d,l) 269 #define insw(p,d,l) __raw_readsw(__io(p),d,l) 270 #define insl(p,d,l) __raw_readsl(__io(p),d,l) 271 #endif 272 273 #define outb_p(val,port) outb((val),(port)) 274 #define outw_p(val,port) outw((val),(port)) 275 #define outl_p(val,port) outl((val),(port)) 276 #define inb_p(port) inb((port)) 277 #define inw_p(port) inw((port)) 278 #define inl_p(port) inl((port)) 279 280 #define outsb_p(port,from,len) outsb(port,from,len) 281 #define outsw_p(port,from,len) outsw(port,from,len) 282 #define outsl_p(port,from,len) outsl(port,from,len) 283 #define insb_p(port,to,len) insb(port,to,len) 284 #define insw_p(port,to,len) insw(port,to,len) 285 #define insl_p(port,to,len) insl(port,to,len) 286 287 /* 288 * ioremap and friends. 289 * 290 * ioremap takes a PCI memory address, as specified in 291 * linux/Documentation/IO-mapping.txt. If you want a 292 * physical address, use __ioremap instead. 293 */ 294 extern void * __ioremap(unsigned long offset, size_t size, unsigned long flags); 295 extern void __iounmap(void *addr); 296 297 /* 298 * Generic ioremap support. 299 * 300 * Define: 301 * iomem_valid_addr(off,size) 302 * iomem_to_phys(off) 303 */ 304 #ifdef iomem_valid_addr 305 #define __arch_ioremap(off,sz,nocache) \ 306 ({ \ 307 unsigned long _off = (off), _size = (sz); \ 308 void *_ret = (void *)0; \ 309 if (iomem_valid_addr(_off, _size)) \ 310 _ret = __ioremap(iomem_to_phys(_off),_size,nocache); \ 311 _ret; \ 312 }) 313 314 #define __arch_iounmap __iounmap 315 #endif 316 317 #define ioremap(off,sz) __arch_ioremap((off),(sz),0) 318 #define ioremap_nocache(off,sz) __arch_ioremap((off),(sz),1) 319 #define iounmap(_addr) __arch_iounmap(_addr) 320 321 /* 322 * DMA-consistent mapping functions. These allocate/free a region of 323 * uncached, unwrite-buffered mapped memory space for use with DMA 324 * devices. This is the "generic" version. The PCI specific version 325 * is in pci.h 326 */ 327 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); 328 extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle); 329 extern void consistent_sync(void *vaddr, size_t size, int rw); 330 331 /* 332 * String version of IO memory access ops: 333 */ 334 extern void _memcpy_fromio(void *, unsigned long, size_t); 335 extern void _memcpy_toio(unsigned long, const void *, size_t); 336 extern void _memset_io(unsigned long, int, size_t); 337 338 extern void __readwrite_bug(const char *fn); 339 340 /* 341 * If this architecture has PCI memory IO, then define the read/write 342 * macros. These should only be used with the cookie passed from 343 * ioremap. 344 */ 345 #ifdef __mem_pci 346 347 #define readb(c) ({ unsigned int __v = __raw_readb(__mem_pci(c)); __v; }) 348 #define readw(c) ({ unsigned int __v = le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) 349 #define readl(c) ({ unsigned int __v = le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) 350 351 #define writeb(v,c) __raw_writeb(v,__mem_pci(c)) 352 #define writew(v,c) __raw_writew(cpu_to_le16(v),__mem_pci(c)) 353 #define writel(v,c) __raw_writel(cpu_to_le32(v),__mem_pci(c)) 354 355 #define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) 356 #define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) 357 #define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) 358 359 #define eth_io_copy_and_sum(s,c,l,b) \ 360 eth_copy_and_sum((s),__mem_pci(c),(l),(b)) 361 362 static inline int 363 check_signature(unsigned long io_addr, const unsigned char *signature, 364 int length) 365 { 366 int retval = 0; 367 do { 368 if (readb(io_addr) != *signature) 369 goto out; 370 io_addr++; 371 signature++; 372 length--; 373 } while (length); 374 retval = 1; 375 out: 376 return retval; 377 } 378 379 #else 380 #define memset_io(a, b, c) memset((void *)(a), (b), (c)) 381 #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 382 #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 383 384 #if !defined(readb) 385 386 #define readb(addr) (__readwrite_bug("readb"),0) 387 #define readw(addr) (__readwrite_bug("readw"),0) 388 #define readl(addr) (__readwrite_bug("readl"),0) 389 #define writeb(v,addr) __readwrite_bug("writeb") 390 #define writew(v,addr) __readwrite_bug("writew") 391 #define writel(v,addr) __readwrite_bug("writel") 392 393 #define eth_io_copy_and_sum(a,b,c,d) __readwrite_bug("eth_io_copy_and_sum") 394 395 #define check_signature(io,sig,len) (0) 396 397 #endif 398 #endif /* __mem_pci */ 399 400 /* 401 * If this architecture has ISA IO, then define the isa_read/isa_write 402 * macros. 403 */ 404 #ifdef __mem_isa 405 406 #define isa_readb(addr) __raw_readb(__mem_isa(addr)) 407 #define isa_readw(addr) __raw_readw(__mem_isa(addr)) 408 #define isa_readl(addr) __raw_readl(__mem_isa(addr)) 409 #define isa_writeb(val,addr) __raw_writeb(val,__mem_isa(addr)) 410 #define isa_writew(val,addr) __raw_writew(val,__mem_isa(addr)) 411 #define isa_writel(val,addr) __raw_writel(val,__mem_isa(addr)) 412 #define isa_memset_io(a,b,c) _memset_io(__mem_isa(a),(b),(c)) 413 #define isa_memcpy_fromio(a,b,c) _memcpy_fromio((a),__mem_isa(b),(c)) 414 #define isa_memcpy_toio(a,b,c) _memcpy_toio(__mem_isa((a)),(b),(c)) 415 416 #define isa_eth_io_copy_and_sum(a,b,c,d) \ 417 eth_copy_and_sum((a),__mem_isa(b),(c),(d)) 418 419 static inline int 420 isa_check_signature(unsigned long io_addr, const unsigned char *signature, 421 int length) 422 { 423 int retval = 0; 424 do { 425 if (isa_readb(io_addr) != *signature) 426 goto out; 427 io_addr++; 428 signature++; 429 length--; 430 } while (length); 431 retval = 1; 432 out: 433 return retval; 434 } 435 436 #else /* __mem_isa */ 437 438 #define isa_readb(addr) (__readwrite_bug("isa_readb"),0) 439 #define isa_readw(addr) (__readwrite_bug("isa_readw"),0) 440 #define isa_readl(addr) (__readwrite_bug("isa_readl"),0) 441 #define isa_writeb(val,addr) __readwrite_bug("isa_writeb") 442 #define isa_writew(val,addr) __readwrite_bug("isa_writew") 443 #define isa_writel(val,addr) __readwrite_bug("isa_writel") 444 #define isa_memset_io(a,b,c) __readwrite_bug("isa_memset_io") 445 #define isa_memcpy_fromio(a,b,c) __readwrite_bug("isa_memcpy_fromio") 446 #define isa_memcpy_toio(a,b,c) __readwrite_bug("isa_memcpy_toio") 447 448 #define isa_eth_io_copy_and_sum(a,b,c,d) \ 449 __readwrite_bug("isa_eth_io_copy_and_sum") 450 451 #define isa_check_signature(io,sig,len) (0) 452 453 #endif /* __mem_isa */ 454 #endif /* __KERNEL__ */ 455 456 #include <iotrace.h> 457 458 #endif /* __ASM_ARM_IO_H */ 459