1 #ifndef _ASM_IA64_IO_H 2 #define _ASM_IA64_IO_H 3 4 /* 5 * This file contains the definitions for the emulated IO instructions 6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 8 * versions of the single-IO instructions (inb_p/inw_p/..). 9 * 10 * This file is not meant to be obfuscating: it's just complicated to 11 * (a) handle it all in a way that makes gcc able to optimize it as 12 * well as possible and (b) trying to avoid writing the same thing 13 * over and over again with slight variations and possibly making a 14 * mistake somewhere. 15 * 16 * Copyright (C) 1998-2003 Hewlett-Packard Co 17 * David Mosberger-Tang <davidm@hpl.hp.com> 18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 20 */ 21 22 #include <asm/unaligned.h> 23 24 /* We don't use IO slowdowns on the ia64, but.. */ 25 #define __SLOW_DOWN_IO do { } while (0) 26 #define SLOW_DOWN_IO do { } while (0) 27 28 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) 29 30 /* 31 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but 32 * large machines may have multiple other I/O spaces so we can't place any a priori limit 33 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. 34 */ 35 #define IO_SPACE_LIMIT 0xffffffffffffffffUL 36 37 #define MAX_IO_SPACES_BITS 8 38 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) 39 #define IO_SPACE_BITS 24 40 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) 41 42 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) 43 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) 44 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) 45 46 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) 47 48 struct io_space { 49 unsigned long mmio_base; /* base in MMIO space */ 50 int sparse; 51 }; 52 53 extern struct io_space io_space[]; 54 extern unsigned int num_io_spaces; 55 56 # ifdef __KERNEL__ 57 58 /* 59 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: 60 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) 61 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) 62 * 63 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch 64 * code that uses bare port numbers without the prerequisite pci_iomap(). 65 */ 66 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) 67 #define PIO_MASK (PIO_OFFSET - 1) 68 #define PIO_RESERVED __IA64_UNCACHED_OFFSET 69 #define HAVE_ARCH_PIO_SIZE 70 71 #include <asm/intrinsics.h> 72 #include <asm/machvec.h> 73 #include <asm/page.h> 74 #include <asm-generic/iomap.h> 75 76 /* 77 * Change virtual addresses to physical addresses and vv. 78 */ 79 static inline unsigned long 80 virt_to_phys (volatile void *address) 81 { 82 return (unsigned long) address - PAGE_OFFSET; 83 } 84 85 static inline void* 86 phys_to_virt (unsigned long address) 87 { 88 return (void *) (address + PAGE_OFFSET); 89 } 90 91 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 92 extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); 93 extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */ 94 extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); 95 96 /* 97 * The following two macros are deprecated and scheduled for removal. 98 * Please use the PCI-DMA interface defined in <asm/pci.h> instead. 99 */ 100 #define bus_to_virt phys_to_virt 101 #define virt_to_bus virt_to_phys 102 #define page_to_bus page_to_phys 103 104 # endif /* KERNEL */ 105 106 /* 107 * Memory fence w/accept. This should never be used in code that is 108 * not IA-64 specific. 109 */ 110 #define __ia64_mf_a() ia64_mfa() 111 112 /** 113 * ___ia64_mmiowb - I/O write barrier 114 * 115 * Ensure ordering of I/O space writes. This will make sure that writes 116 * following the barrier will arrive after all previous writes. For most 117 * ia64 platforms, this is a simple 'mf.a' instruction. 118 * 119 * See Documentation/DocBook/deviceiobook.tmpl for more information. 120 */ 121 static inline void ___ia64_mmiowb(void) 122 { 123 ia64_mfa(); 124 } 125 126 static inline void* 127 __ia64_mk_io_addr (unsigned long port) 128 { 129 struct io_space *space; 130 unsigned long offset; 131 132 space = &io_space[IO_SPACE_NR(port)]; 133 port = IO_SPACE_PORT(port); 134 if (space->sparse) 135 offset = IO_SPACE_SPARSE_ENCODING(port); 136 else 137 offset = port; 138 139 return (void *) (space->mmio_base | offset); 140 } 141 142 #define __ia64_inb ___ia64_inb 143 #define __ia64_inw ___ia64_inw 144 #define __ia64_inl ___ia64_inl 145 #define __ia64_outb ___ia64_outb 146 #define __ia64_outw ___ia64_outw 147 #define __ia64_outl ___ia64_outl 148 #define __ia64_readb ___ia64_readb 149 #define __ia64_readw ___ia64_readw 150 #define __ia64_readl ___ia64_readl 151 #define __ia64_readq ___ia64_readq 152 #define __ia64_readb_relaxed ___ia64_readb 153 #define __ia64_readw_relaxed ___ia64_readw 154 #define __ia64_readl_relaxed ___ia64_readl 155 #define __ia64_readq_relaxed ___ia64_readq 156 #define __ia64_writeb ___ia64_writeb 157 #define __ia64_writew ___ia64_writew 158 #define __ia64_writel ___ia64_writel 159 #define __ia64_writeq ___ia64_writeq 160 #define __ia64_mmiowb ___ia64_mmiowb 161 162 /* 163 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure 164 * that the access has completed before executing other I/O accesses. Since we're doing 165 * the accesses through an uncachable (UC) translation, the CPU will execute them in 166 * program order. However, we still need to tell the compiler not to shuffle them around 167 * during optimization, which is why we use "volatile" pointers. 168 */ 169 170 static inline unsigned int 171 ___ia64_inb (unsigned long port) 172 { 173 volatile unsigned char *addr = __ia64_mk_io_addr(port); 174 unsigned char ret; 175 176 ret = *addr; 177 __ia64_mf_a(); 178 return ret; 179 } 180 181 static inline unsigned int 182 ___ia64_inw (unsigned long port) 183 { 184 volatile unsigned short *addr = __ia64_mk_io_addr(port); 185 unsigned short ret; 186 187 ret = *addr; 188 __ia64_mf_a(); 189 return ret; 190 } 191 192 static inline unsigned int 193 ___ia64_inl (unsigned long port) 194 { 195 volatile unsigned int *addr = __ia64_mk_io_addr(port); 196 unsigned int ret; 197 198 ret = *addr; 199 __ia64_mf_a(); 200 return ret; 201 } 202 203 static inline void 204 ___ia64_outb (unsigned char val, unsigned long port) 205 { 206 volatile unsigned char *addr = __ia64_mk_io_addr(port); 207 208 *addr = val; 209 __ia64_mf_a(); 210 } 211 212 static inline void 213 ___ia64_outw (unsigned short val, unsigned long port) 214 { 215 volatile unsigned short *addr = __ia64_mk_io_addr(port); 216 217 *addr = val; 218 __ia64_mf_a(); 219 } 220 221 static inline void 222 ___ia64_outl (unsigned int val, unsigned long port) 223 { 224 volatile unsigned int *addr = __ia64_mk_io_addr(port); 225 226 *addr = val; 227 __ia64_mf_a(); 228 } 229 230 static inline void 231 __insb (unsigned long port, void *dst, unsigned long count) 232 { 233 unsigned char *dp = dst; 234 235 while (count--) 236 *dp++ = platform_inb(port); 237 } 238 239 static inline void 240 __insw (unsigned long port, void *dst, unsigned long count) 241 { 242 unsigned short *dp = dst; 243 244 while (count--) 245 put_unaligned(platform_inw(port), dp++); 246 } 247 248 static inline void 249 __insl (unsigned long port, void *dst, unsigned long count) 250 { 251 unsigned int *dp = dst; 252 253 while (count--) 254 put_unaligned(platform_inl(port), dp++); 255 } 256 257 static inline void 258 __outsb (unsigned long port, const void *src, unsigned long count) 259 { 260 const unsigned char *sp = src; 261 262 while (count--) 263 platform_outb(*sp++, port); 264 } 265 266 static inline void 267 __outsw (unsigned long port, const void *src, unsigned long count) 268 { 269 const unsigned short *sp = src; 270 271 while (count--) 272 platform_outw(get_unaligned(sp++), port); 273 } 274 275 static inline void 276 __outsl (unsigned long port, const void *src, unsigned long count) 277 { 278 const unsigned int *sp = src; 279 280 while (count--) 281 platform_outl(get_unaligned(sp++), port); 282 } 283 284 /* 285 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture 286 * specification regarding legacy I/O support. Thus, we have to make these operations 287 * platform dependent... 288 */ 289 #define __inb platform_inb 290 #define __inw platform_inw 291 #define __inl platform_inl 292 #define __outb platform_outb 293 #define __outw platform_outw 294 #define __outl platform_outl 295 #define __mmiowb platform_mmiowb 296 297 #define inb(p) __inb(p) 298 #define inw(p) __inw(p) 299 #define inl(p) __inl(p) 300 #define insb(p,d,c) __insb(p,d,c) 301 #define insw(p,d,c) __insw(p,d,c) 302 #define insl(p,d,c) __insl(p,d,c) 303 #define outb(v,p) __outb(v,p) 304 #define outw(v,p) __outw(v,p) 305 #define outl(v,p) __outl(v,p) 306 #define outsb(p,s,c) __outsb(p,s,c) 307 #define outsw(p,s,c) __outsw(p,s,c) 308 #define outsl(p,s,c) __outsl(p,s,c) 309 #define mmiowb() __mmiowb() 310 311 /* 312 * The address passed to these functions are ioremap()ped already. 313 * 314 * We need these to be machine vectors since some platforms don't provide 315 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is 316 * a good idea). Writes are ok though for all existing ia64 platforms (and 317 * hopefully it'll stay that way). 318 */ 319 static inline unsigned char 320 ___ia64_readb (const volatile void __iomem *addr) 321 { 322 return *(volatile unsigned char __force *)addr; 323 } 324 325 static inline unsigned short 326 ___ia64_readw (const volatile void __iomem *addr) 327 { 328 return *(volatile unsigned short __force *)addr; 329 } 330 331 static inline unsigned int 332 ___ia64_readl (const volatile void __iomem *addr) 333 { 334 return *(volatile unsigned int __force *) addr; 335 } 336 337 static inline unsigned long 338 ___ia64_readq (const volatile void __iomem *addr) 339 { 340 return *(volatile unsigned long __force *) addr; 341 } 342 343 static inline void 344 __writeb (unsigned char val, volatile void __iomem *addr) 345 { 346 *(volatile unsigned char __force *) addr = val; 347 } 348 349 static inline void 350 __writew (unsigned short val, volatile void __iomem *addr) 351 { 352 *(volatile unsigned short __force *) addr = val; 353 } 354 355 static inline void 356 __writel (unsigned int val, volatile void __iomem *addr) 357 { 358 *(volatile unsigned int __force *) addr = val; 359 } 360 361 static inline void 362 __writeq (unsigned long val, volatile void __iomem *addr) 363 { 364 *(volatile unsigned long __force *) addr = val; 365 } 366 367 #define __readb platform_readb 368 #define __readw platform_readw 369 #define __readl platform_readl 370 #define __readq platform_readq 371 #define __readb_relaxed platform_readb_relaxed 372 #define __readw_relaxed platform_readw_relaxed 373 #define __readl_relaxed platform_readl_relaxed 374 #define __readq_relaxed platform_readq_relaxed 375 376 #define readb(a) __readb((a)) 377 #define readw(a) __readw((a)) 378 #define readl(a) __readl((a)) 379 #define readq(a) __readq((a)) 380 #define readb_relaxed(a) __readb_relaxed((a)) 381 #define readw_relaxed(a) __readw_relaxed((a)) 382 #define readl_relaxed(a) __readl_relaxed((a)) 383 #define readq_relaxed(a) __readq_relaxed((a)) 384 #define __raw_readb readb 385 #define __raw_readw readw 386 #define __raw_readl readl 387 #define __raw_readq readq 388 #define __raw_readb_relaxed readb_relaxed 389 #define __raw_readw_relaxed readw_relaxed 390 #define __raw_readl_relaxed readl_relaxed 391 #define __raw_readq_relaxed readq_relaxed 392 #define writeb(v,a) __writeb((v), (a)) 393 #define writew(v,a) __writew((v), (a)) 394 #define writel(v,a) __writel((v), (a)) 395 #define writeq(v,a) __writeq((v), (a)) 396 #define writeb_relaxed(v,a) __writeb((v), (a)) 397 #define writew_relaxed(v,a) __writew((v), (a)) 398 #define writel_relaxed(v,a) __writel((v), (a)) 399 #define writeq_relaxed(v,a) __writeq((v), (a)) 400 #define __raw_writeb writeb 401 #define __raw_writew writew 402 #define __raw_writel writel 403 #define __raw_writeq writeq 404 405 #ifndef inb_p 406 # define inb_p inb 407 #endif 408 #ifndef inw_p 409 # define inw_p inw 410 #endif 411 #ifndef inl_p 412 # define inl_p inl 413 #endif 414 415 #ifndef outb_p 416 # define outb_p outb 417 #endif 418 #ifndef outw_p 419 # define outw_p outw 420 #endif 421 #ifndef outl_p 422 # define outl_p outl 423 #endif 424 425 # ifdef __KERNEL__ 426 427 extern void __iomem * ioremap(unsigned long offset, unsigned long size); 428 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 429 extern void iounmap (volatile void __iomem *addr); 430 extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); 431 #define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) 432 extern void early_iounmap (volatile void __iomem *addr, unsigned long size); 433 #define early_memunmap(addr, size) early_iounmap(addr, size) 434 static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) 435 { 436 return ioremap(phys_addr, size); 437 } 438 439 440 /* 441 * String version of IO memory access ops: 442 */ 443 extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); 444 extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); 445 extern void memset_io(volatile void __iomem *s, int c, long n); 446 447 # endif /* __KERNEL__ */ 448 449 #endif /* _ASM_IA64_IO_H */ 450