1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 27f30491cSTony Luck #ifndef _ASM_IA64_IO_H 37f30491cSTony Luck #define _ASM_IA64_IO_H 47f30491cSTony Luck 57f30491cSTony Luck /* 67f30491cSTony Luck * This file contains the definitions for the emulated IO instructions 77f30491cSTony Luck * inb/inw/inl/outb/outw/outl and the "string versions" of the same 87f30491cSTony Luck * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 97f30491cSTony Luck * versions of the single-IO instructions (inb_p/inw_p/..). 107f30491cSTony Luck * 117f30491cSTony Luck * This file is not meant to be obfuscating: it's just complicated to 127f30491cSTony Luck * (a) handle it all in a way that makes gcc able to optimize it as 137f30491cSTony Luck * well as possible and (b) trying to avoid writing the same thing 147f30491cSTony Luck * over and over again with slight variations and possibly making a 157f30491cSTony Luck * mistake somewhere. 167f30491cSTony Luck * 177f30491cSTony Luck * Copyright (C) 1998-2003 Hewlett-Packard Co 187f30491cSTony Luck * David Mosberger-Tang <davidm@hpl.hp.com> 197f30491cSTony Luck * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 207f30491cSTony Luck * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 217f30491cSTony Luck */ 227f30491cSTony Luck 238a549f8bSJames Bottomley #include <asm/unaligned.h> 2480926770SArd Biesheuvel #include <asm/early_ioremap.h> 258a549f8bSJames Bottomley 267f30491cSTony Luck /* We don't use IO slowdowns on the ia64, but.. */ 277f30491cSTony Luck #define __SLOW_DOWN_IO do { } while (0) 287f30491cSTony Luck #define SLOW_DOWN_IO do { } while (0) 297f30491cSTony Luck 307f30491cSTony Luck #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED) 317f30491cSTony Luck 327f30491cSTony Luck /* 337f30491cSTony Luck * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but 347f30491cSTony Luck * large machines may have multiple other I/O spaces so we can't place any a priori limit 357f30491cSTony Luck * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. 367f30491cSTony Luck */ 377f30491cSTony Luck #define IO_SPACE_LIMIT 0xffffffffffffffffUL 387f30491cSTony Luck 397f30491cSTony Luck #define MAX_IO_SPACES_BITS 8 407f30491cSTony Luck #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) 417f30491cSTony Luck #define IO_SPACE_BITS 24 427f30491cSTony Luck #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) 437f30491cSTony Luck 447f30491cSTony Luck #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) 457f30491cSTony Luck #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) 467f30491cSTony Luck #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) 477f30491cSTony Luck 487f30491cSTony Luck #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff)) 497f30491cSTony Luck 507f30491cSTony Luck struct io_space { 517f30491cSTony Luck unsigned long mmio_base; /* base in MMIO space */ 527f30491cSTony Luck int sparse; 537f30491cSTony Luck }; 547f30491cSTony Luck 557f30491cSTony Luck extern struct io_space io_space[]; 567f30491cSTony Luck extern unsigned int num_io_spaces; 577f30491cSTony Luck 587f30491cSTony Luck # ifdef __KERNEL__ 597f30491cSTony Luck 607f30491cSTony Luck /* 617f30491cSTony Luck * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: 627f30491cSTony Luck * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) 637f30491cSTony Luck * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) 647f30491cSTony Luck * 657f30491cSTony Luck * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch 667f30491cSTony Luck * code that uses bare port numbers without the prerequisite pci_iomap(). 677f30491cSTony Luck */ 687f30491cSTony Luck #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) 697f30491cSTony Luck #define PIO_MASK (PIO_OFFSET - 1) 707f30491cSTony Luck #define PIO_RESERVED __IA64_UNCACHED_OFFSET 717f30491cSTony Luck #define HAVE_ARCH_PIO_SIZE 727f30491cSTony Luck 737f30491cSTony Luck #include <asm/intrinsics.h> 747f30491cSTony Luck #include <asm/machvec.h> 757f30491cSTony Luck #include <asm/page.h> 767f30491cSTony Luck #include <asm-generic/iomap.h> 777f30491cSTony Luck 787f30491cSTony Luck /* 797f30491cSTony Luck * Change virtual addresses to physical addresses and vv. 807f30491cSTony Luck */ 817f30491cSTony Luck static inline unsigned long 827f30491cSTony Luck virt_to_phys (volatile void *address) 837f30491cSTony Luck { 847f30491cSTony Luck return (unsigned long) address - PAGE_OFFSET; 857f30491cSTony Luck } 860bbf47eaSArnd Bergmann #define virt_to_phys virt_to_phys 877f30491cSTony Luck 887f30491cSTony Luck static inline void* 897f30491cSTony Luck phys_to_virt (unsigned long address) 907f30491cSTony Luck { 917f30491cSTony Luck return (void *) (address + PAGE_OFFSET); 927f30491cSTony Luck } 930bbf47eaSArnd Bergmann #define phys_to_virt phys_to_virt 947f30491cSTony Luck 957f30491cSTony Luck #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 967f30491cSTony Luck extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size); 977e6735c3SCyril Chemparathy extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */ 987f30491cSTony Luck extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); 997f30491cSTony Luck 1007f30491cSTony Luck /* 1017f30491cSTony Luck * The following two macros are deprecated and scheduled for removal. 1027f30491cSTony Luck * Please use the PCI-DMA interface defined in <asm/pci.h> instead. 1037f30491cSTony Luck */ 1047f30491cSTony Luck #define bus_to_virt phys_to_virt 1057f30491cSTony Luck #define virt_to_bus virt_to_phys 1067f30491cSTony Luck #define page_to_bus page_to_phys 1077f30491cSTony Luck 1087f30491cSTony Luck # endif /* KERNEL */ 1097f30491cSTony Luck 1107f30491cSTony Luck /* 1117f30491cSTony Luck * Memory fence w/accept. This should never be used in code that is 1127f30491cSTony Luck * not IA-64 specific. 1137f30491cSTony Luck */ 1147f30491cSTony Luck #define __ia64_mf_a() ia64_mfa() 1157f30491cSTony Luck 1167f30491cSTony Luck /** 1177f30491cSTony Luck * ___ia64_mmiowb - I/O write barrier 1187f30491cSTony Luck * 1197f30491cSTony Luck * Ensure ordering of I/O space writes. This will make sure that writes 1207f30491cSTony Luck * following the barrier will arrive after all previous writes. For most 1217f30491cSTony Luck * ia64 platforms, this is a simple 'mf.a' instruction. 1227f30491cSTony Luck * 1232a762130SMauro Carvalho Chehab * See Documentation/driver-api/device-io.rst for more information. 1247f30491cSTony Luck */ 1257f30491cSTony Luck static inline void ___ia64_mmiowb(void) 1267f30491cSTony Luck { 1277f30491cSTony Luck ia64_mfa(); 1287f30491cSTony Luck } 1297f30491cSTony Luck 1307f30491cSTony Luck static inline void* 1317f30491cSTony Luck __ia64_mk_io_addr (unsigned long port) 1327f30491cSTony Luck { 1337f30491cSTony Luck struct io_space *space; 1347f30491cSTony Luck unsigned long offset; 1357f30491cSTony Luck 1367f30491cSTony Luck space = &io_space[IO_SPACE_NR(port)]; 1377f30491cSTony Luck port = IO_SPACE_PORT(port); 1387f30491cSTony Luck if (space->sparse) 1397f30491cSTony Luck offset = IO_SPACE_SPARSE_ENCODING(port); 1407f30491cSTony Luck else 1417f30491cSTony Luck offset = port; 1427f30491cSTony Luck 1437f30491cSTony Luck return (void *) (space->mmio_base | offset); 1447f30491cSTony Luck } 1457f30491cSTony Luck 1467f30491cSTony Luck #define __ia64_inb ___ia64_inb 1477f30491cSTony Luck #define __ia64_inw ___ia64_inw 1487f30491cSTony Luck #define __ia64_inl ___ia64_inl 1497f30491cSTony Luck #define __ia64_outb ___ia64_outb 1507f30491cSTony Luck #define __ia64_outw ___ia64_outw 1517f30491cSTony Luck #define __ia64_outl ___ia64_outl 1527f30491cSTony Luck #define __ia64_readb ___ia64_readb 1537f30491cSTony Luck #define __ia64_readw ___ia64_readw 1547f30491cSTony Luck #define __ia64_readl ___ia64_readl 1557f30491cSTony Luck #define __ia64_readq ___ia64_readq 1567f30491cSTony Luck #define __ia64_readb_relaxed ___ia64_readb 1577f30491cSTony Luck #define __ia64_readw_relaxed ___ia64_readw 1587f30491cSTony Luck #define __ia64_readl_relaxed ___ia64_readl 1597f30491cSTony Luck #define __ia64_readq_relaxed ___ia64_readq 1607f30491cSTony Luck #define __ia64_writeb ___ia64_writeb 1617f30491cSTony Luck #define __ia64_writew ___ia64_writew 1627f30491cSTony Luck #define __ia64_writel ___ia64_writel 1637f30491cSTony Luck #define __ia64_writeq ___ia64_writeq 1647f30491cSTony Luck #define __ia64_mmiowb ___ia64_mmiowb 1657f30491cSTony Luck 1667f30491cSTony Luck /* 1677f30491cSTony Luck * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure 1687f30491cSTony Luck * that the access has completed before executing other I/O accesses. Since we're doing 1697f30491cSTony Luck * the accesses through an uncachable (UC) translation, the CPU will execute them in 1707f30491cSTony Luck * program order. However, we still need to tell the compiler not to shuffle them around 1717f30491cSTony Luck * during optimization, which is why we use "volatile" pointers. 1727f30491cSTony Luck */ 1737f30491cSTony Luck 1747f30491cSTony Luck static inline unsigned int 1757f30491cSTony Luck ___ia64_inb (unsigned long port) 1767f30491cSTony Luck { 1777f30491cSTony Luck volatile unsigned char *addr = __ia64_mk_io_addr(port); 1787f30491cSTony Luck unsigned char ret; 1797f30491cSTony Luck 1807f30491cSTony Luck ret = *addr; 1817f30491cSTony Luck __ia64_mf_a(); 1827f30491cSTony Luck return ret; 1837f30491cSTony Luck } 1847f30491cSTony Luck 1857f30491cSTony Luck static inline unsigned int 1867f30491cSTony Luck ___ia64_inw (unsigned long port) 1877f30491cSTony Luck { 1887f30491cSTony Luck volatile unsigned short *addr = __ia64_mk_io_addr(port); 1897f30491cSTony Luck unsigned short ret; 1907f30491cSTony Luck 1917f30491cSTony Luck ret = *addr; 1927f30491cSTony Luck __ia64_mf_a(); 1937f30491cSTony Luck return ret; 1947f30491cSTony Luck } 1957f30491cSTony Luck 1967f30491cSTony Luck static inline unsigned int 1977f30491cSTony Luck ___ia64_inl (unsigned long port) 1987f30491cSTony Luck { 1997f30491cSTony Luck volatile unsigned int *addr = __ia64_mk_io_addr(port); 2007f30491cSTony Luck unsigned int ret; 2017f30491cSTony Luck 2027f30491cSTony Luck ret = *addr; 2037f30491cSTony Luck __ia64_mf_a(); 2047f30491cSTony Luck return ret; 2057f30491cSTony Luck } 2067f30491cSTony Luck 2077f30491cSTony Luck static inline void 2087f30491cSTony Luck ___ia64_outb (unsigned char val, unsigned long port) 2097f30491cSTony Luck { 2107f30491cSTony Luck volatile unsigned char *addr = __ia64_mk_io_addr(port); 2117f30491cSTony Luck 2127f30491cSTony Luck *addr = val; 2137f30491cSTony Luck __ia64_mf_a(); 2147f30491cSTony Luck } 2157f30491cSTony Luck 2167f30491cSTony Luck static inline void 2177f30491cSTony Luck ___ia64_outw (unsigned short val, unsigned long port) 2187f30491cSTony Luck { 2197f30491cSTony Luck volatile unsigned short *addr = __ia64_mk_io_addr(port); 2207f30491cSTony Luck 2217f30491cSTony Luck *addr = val; 2227f30491cSTony Luck __ia64_mf_a(); 2237f30491cSTony Luck } 2247f30491cSTony Luck 2257f30491cSTony Luck static inline void 2267f30491cSTony Luck ___ia64_outl (unsigned int val, unsigned long port) 2277f30491cSTony Luck { 2287f30491cSTony Luck volatile unsigned int *addr = __ia64_mk_io_addr(port); 2297f30491cSTony Luck 2307f30491cSTony Luck *addr = val; 2317f30491cSTony Luck __ia64_mf_a(); 2327f30491cSTony Luck } 2337f30491cSTony Luck 2347f30491cSTony Luck static inline void 2357f30491cSTony Luck __insb (unsigned long port, void *dst, unsigned long count) 2367f30491cSTony Luck { 2377f30491cSTony Luck unsigned char *dp = dst; 2387f30491cSTony Luck 2397f30491cSTony Luck while (count--) 2407f30491cSTony Luck *dp++ = platform_inb(port); 2417f30491cSTony Luck } 2427f30491cSTony Luck 2437f30491cSTony Luck static inline void 2447f30491cSTony Luck __insw (unsigned long port, void *dst, unsigned long count) 2457f30491cSTony Luck { 2467f30491cSTony Luck unsigned short *dp = dst; 2477f30491cSTony Luck 2487f30491cSTony Luck while (count--) 2498a549f8bSJames Bottomley put_unaligned(platform_inw(port), dp++); 2507f30491cSTony Luck } 2517f30491cSTony Luck 2527f30491cSTony Luck static inline void 2537f30491cSTony Luck __insl (unsigned long port, void *dst, unsigned long count) 2547f30491cSTony Luck { 2557f30491cSTony Luck unsigned int *dp = dst; 2567f30491cSTony Luck 2577f30491cSTony Luck while (count--) 2588a549f8bSJames Bottomley put_unaligned(platform_inl(port), dp++); 2597f30491cSTony Luck } 2607f30491cSTony Luck 2617f30491cSTony Luck static inline void 2627f30491cSTony Luck __outsb (unsigned long port, const void *src, unsigned long count) 2637f30491cSTony Luck { 2647f30491cSTony Luck const unsigned char *sp = src; 2657f30491cSTony Luck 2667f30491cSTony Luck while (count--) 2677f30491cSTony Luck platform_outb(*sp++, port); 2687f30491cSTony Luck } 2697f30491cSTony Luck 2707f30491cSTony Luck static inline void 2717f30491cSTony Luck __outsw (unsigned long port, const void *src, unsigned long count) 2727f30491cSTony Luck { 2737f30491cSTony Luck const unsigned short *sp = src; 2747f30491cSTony Luck 2757f30491cSTony Luck while (count--) 2768a549f8bSJames Bottomley platform_outw(get_unaligned(sp++), port); 2777f30491cSTony Luck } 2787f30491cSTony Luck 2797f30491cSTony Luck static inline void 2807f30491cSTony Luck __outsl (unsigned long port, const void *src, unsigned long count) 2817f30491cSTony Luck { 2827f30491cSTony Luck const unsigned int *sp = src; 2837f30491cSTony Luck 2847f30491cSTony Luck while (count--) 2858a549f8bSJames Bottomley platform_outl(get_unaligned(sp++), port); 2867f30491cSTony Luck } 2877f30491cSTony Luck 2887f30491cSTony Luck /* 2897f30491cSTony Luck * Unfortunately, some platforms are broken and do not follow the IA-64 architecture 2907f30491cSTony Luck * specification regarding legacy I/O support. Thus, we have to make these operations 2917f30491cSTony Luck * platform dependent... 2927f30491cSTony Luck */ 2937f30491cSTony Luck #define __inb platform_inb 2947f30491cSTony Luck #define __inw platform_inw 2957f30491cSTony Luck #define __inl platform_inl 2967f30491cSTony Luck #define __outb platform_outb 2977f30491cSTony Luck #define __outw platform_outw 2987f30491cSTony Luck #define __outl platform_outl 2997f30491cSTony Luck #define __mmiowb platform_mmiowb 3007f30491cSTony Luck 3017f30491cSTony Luck #define inb(p) __inb(p) 3027f30491cSTony Luck #define inw(p) __inw(p) 3037f30491cSTony Luck #define inl(p) __inl(p) 3047f30491cSTony Luck #define insb(p,d,c) __insb(p,d,c) 3057f30491cSTony Luck #define insw(p,d,c) __insw(p,d,c) 3067f30491cSTony Luck #define insl(p,d,c) __insl(p,d,c) 3077f30491cSTony Luck #define outb(v,p) __outb(v,p) 3087f30491cSTony Luck #define outw(v,p) __outw(v,p) 3097f30491cSTony Luck #define outl(v,p) __outl(v,p) 3107f30491cSTony Luck #define outsb(p,s,c) __outsb(p,s,c) 3117f30491cSTony Luck #define outsw(p,s,c) __outsw(p,s,c) 3127f30491cSTony Luck #define outsl(p,s,c) __outsl(p,s,c) 3137f30491cSTony Luck #define mmiowb() __mmiowb() 3147f30491cSTony Luck 3157f30491cSTony Luck /* 3167f30491cSTony Luck * The address passed to these functions are ioremap()ped already. 3177f30491cSTony Luck * 3187f30491cSTony Luck * We need these to be machine vectors since some platforms don't provide 3197f30491cSTony Luck * DMA coherence via PIO reads (PCI drivers and the spec imply that this is 3207f30491cSTony Luck * a good idea). Writes are ok though for all existing ia64 platforms (and 3217f30491cSTony Luck * hopefully it'll stay that way). 3227f30491cSTony Luck */ 3237f30491cSTony Luck static inline unsigned char 3247f30491cSTony Luck ___ia64_readb (const volatile void __iomem *addr) 3257f30491cSTony Luck { 3267f30491cSTony Luck return *(volatile unsigned char __force *)addr; 3277f30491cSTony Luck } 3287f30491cSTony Luck 3297f30491cSTony Luck static inline unsigned short 3307f30491cSTony Luck ___ia64_readw (const volatile void __iomem *addr) 3317f30491cSTony Luck { 3327f30491cSTony Luck return *(volatile unsigned short __force *)addr; 3337f30491cSTony Luck } 3347f30491cSTony Luck 3357f30491cSTony Luck static inline unsigned int 3367f30491cSTony Luck ___ia64_readl (const volatile void __iomem *addr) 3377f30491cSTony Luck { 3387f30491cSTony Luck return *(volatile unsigned int __force *) addr; 3397f30491cSTony Luck } 3407f30491cSTony Luck 3417f30491cSTony Luck static inline unsigned long 3427f30491cSTony Luck ___ia64_readq (const volatile void __iomem *addr) 3437f30491cSTony Luck { 3447f30491cSTony Luck return *(volatile unsigned long __force *) addr; 3457f30491cSTony Luck } 3467f30491cSTony Luck 3477f30491cSTony Luck static inline void 3487f30491cSTony Luck __writeb (unsigned char val, volatile void __iomem *addr) 3497f30491cSTony Luck { 3507f30491cSTony Luck *(volatile unsigned char __force *) addr = val; 3517f30491cSTony Luck } 3527f30491cSTony Luck 3537f30491cSTony Luck static inline void 3547f30491cSTony Luck __writew (unsigned short val, volatile void __iomem *addr) 3557f30491cSTony Luck { 3567f30491cSTony Luck *(volatile unsigned short __force *) addr = val; 3577f30491cSTony Luck } 3587f30491cSTony Luck 3597f30491cSTony Luck static inline void 3607f30491cSTony Luck __writel (unsigned int val, volatile void __iomem *addr) 3617f30491cSTony Luck { 3627f30491cSTony Luck *(volatile unsigned int __force *) addr = val; 3637f30491cSTony Luck } 3647f30491cSTony Luck 3657f30491cSTony Luck static inline void 3667f30491cSTony Luck __writeq (unsigned long val, volatile void __iomem *addr) 3677f30491cSTony Luck { 3687f30491cSTony Luck *(volatile unsigned long __force *) addr = val; 3697f30491cSTony Luck } 3707f30491cSTony Luck 3717f30491cSTony Luck #define __readb platform_readb 3727f30491cSTony Luck #define __readw platform_readw 3737f30491cSTony Luck #define __readl platform_readl 3747f30491cSTony Luck #define __readq platform_readq 3757f30491cSTony Luck #define __readb_relaxed platform_readb_relaxed 3767f30491cSTony Luck #define __readw_relaxed platform_readw_relaxed 3777f30491cSTony Luck #define __readl_relaxed platform_readl_relaxed 3787f30491cSTony Luck #define __readq_relaxed platform_readq_relaxed 3797f30491cSTony Luck 3807f30491cSTony Luck #define readb(a) __readb((a)) 3817f30491cSTony Luck #define readw(a) __readw((a)) 3827f30491cSTony Luck #define readl(a) __readl((a)) 3837f30491cSTony Luck #define readq(a) __readq((a)) 3847f30491cSTony Luck #define readb_relaxed(a) __readb_relaxed((a)) 3857f30491cSTony Luck #define readw_relaxed(a) __readw_relaxed((a)) 3867f30491cSTony Luck #define readl_relaxed(a) __readl_relaxed((a)) 3877f30491cSTony Luck #define readq_relaxed(a) __readq_relaxed((a)) 3887f30491cSTony Luck #define __raw_readb readb 3897f30491cSTony Luck #define __raw_readw readw 3907f30491cSTony Luck #define __raw_readl readl 3917f30491cSTony Luck #define __raw_readq readq 3927f30491cSTony Luck #define __raw_readb_relaxed readb_relaxed 3937f30491cSTony Luck #define __raw_readw_relaxed readw_relaxed 3947f30491cSTony Luck #define __raw_readl_relaxed readl_relaxed 3957f30491cSTony Luck #define __raw_readq_relaxed readq_relaxed 3967f30491cSTony Luck #define writeb(v,a) __writeb((v), (a)) 3977f30491cSTony Luck #define writew(v,a) __writew((v), (a)) 3987f30491cSTony Luck #define writel(v,a) __writel((v), (a)) 3997f30491cSTony Luck #define writeq(v,a) __writeq((v), (a)) 400f6b3b7a9SWill Deacon #define writeb_relaxed(v,a) __writeb((v), (a)) 401f6b3b7a9SWill Deacon #define writew_relaxed(v,a) __writew((v), (a)) 402f6b3b7a9SWill Deacon #define writel_relaxed(v,a) __writel((v), (a)) 403f6b3b7a9SWill Deacon #define writeq_relaxed(v,a) __writeq((v), (a)) 4047f30491cSTony Luck #define __raw_writeb writeb 4057f30491cSTony Luck #define __raw_writew writew 4067f30491cSTony Luck #define __raw_writel writel 4077f30491cSTony Luck #define __raw_writeq writeq 4087f30491cSTony Luck 4097f30491cSTony Luck #ifndef inb_p 4107f30491cSTony Luck # define inb_p inb 4117f30491cSTony Luck #endif 4127f30491cSTony Luck #ifndef inw_p 4137f30491cSTony Luck # define inw_p inw 4147f30491cSTony Luck #endif 4157f30491cSTony Luck #ifndef inl_p 4167f30491cSTony Luck # define inl_p inl 4177f30491cSTony Luck #endif 4187f30491cSTony Luck 4197f30491cSTony Luck #ifndef outb_p 4207f30491cSTony Luck # define outb_p outb 4217f30491cSTony Luck #endif 4227f30491cSTony Luck #ifndef outw_p 4237f30491cSTony Luck # define outw_p outw 4247f30491cSTony Luck #endif 4257f30491cSTony Luck #ifndef outl_p 4267f30491cSTony Luck # define outl_p outl 4277f30491cSTony Luck #endif 4287f30491cSTony Luck 4297f30491cSTony Luck # ifdef __KERNEL__ 4307f30491cSTony Luck 4317f30491cSTony Luck extern void __iomem * ioremap(unsigned long offset, unsigned long size); 4327f30491cSTony Luck extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 4337f30491cSTony Luck extern void iounmap (volatile void __iomem *addr); 4346d5bbf00SLen Brown static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) 4356d5bbf00SLen Brown { 4366d5bbf00SLen Brown return ioremap(phys_addr, size); 4376d5bbf00SLen Brown } 4380bbf47eaSArnd Bergmann #define ioremap ioremap 4390bbf47eaSArnd Bergmann #define ioremap_nocache ioremap_nocache 44092281deeSDan Williams #define ioremap_cache ioremap_cache 441b0f84ac3SLuis R. Rodriguez #define ioremap_uc ioremap_nocache 4420bbf47eaSArnd Bergmann #define iounmap iounmap 4437f30491cSTony Luck 4447f30491cSTony Luck /* 4457f30491cSTony Luck * String version of IO memory access ops: 4467f30491cSTony Luck */ 4477f30491cSTony Luck extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); 4487f30491cSTony Luck extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); 4497f30491cSTony Luck extern void memset_io(volatile void __iomem *s, int c, long n); 4507f30491cSTony Luck 4510bbf47eaSArnd Bergmann #define memcpy_fromio memcpy_fromio 4520bbf47eaSArnd Bergmann #define memcpy_toio memcpy_toio 4530bbf47eaSArnd Bergmann #define memset_io memset_io 4540bbf47eaSArnd Bergmann #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr 4550bbf47eaSArnd Bergmann #define xlate_dev_mem_ptr xlate_dev_mem_ptr 4560bbf47eaSArnd Bergmann #include <asm-generic/io.h> 457cc26ebbeSTony Luck #undef PCI_IOBASE 4580bbf47eaSArnd Bergmann 4597f30491cSTony Luck # endif /* __KERNEL__ */ 4607f30491cSTony Luck 4617f30491cSTony Luck #endif /* _ASM_IA64_IO_H */ 462