1384740dcSRalf Baechle /*
2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public
3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive
4384740dcSRalf Baechle * for more details.
5384740dcSRalf Baechle *
6384740dcSRalf Baechle * Copyright (C) 1994, 1995 Waldorf GmbH
7384740dcSRalf Baechle * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8384740dcSRalf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9384740dcSRalf Baechle * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10384740dcSRalf Baechle * Author: Maciej W. Rozycki <macro@mips.com>
11384740dcSRalf Baechle */
12384740dcSRalf Baechle #ifndef _ASM_IO_H
13384740dcSRalf Baechle #define _ASM_IO_H
14384740dcSRalf Baechle
15384740dcSRalf Baechle #include <linux/compiler.h>
16384740dcSRalf Baechle #include <linux/kernel.h>
17384740dcSRalf Baechle #include <linux/types.h>
1892d11594SJim Quinlan #include <linux/irqflags.h>
19384740dcSRalf Baechle
20384740dcSRalf Baechle #include <asm/addrspace.h>
214ae0452bSMaciej W. Rozycki #include <asm/barrier.h>
22893a0574SYoichi Yuasa #include <asm/bug.h>
23384740dcSRalf Baechle #include <asm/byteorder.h>
24384740dcSRalf Baechle #include <asm/cpu.h>
25384740dcSRalf Baechle #include <asm/cpu-features.h>
26384740dcSRalf Baechle #include <asm/page.h>
27384740dcSRalf Baechle #include <asm/pgtable-bits.h>
28384740dcSRalf Baechle #include <asm/processor.h>
29384740dcSRalf Baechle #include <asm/string.h>
30384740dcSRalf Baechle #include <mangle-port.h>
31384740dcSRalf Baechle
32384740dcSRalf Baechle /*
33384740dcSRalf Baechle * Raw operations are never swapped in software. OTOH values that raw
34384740dcSRalf Baechle * operations are working on may or may not have been swapped by the bus
35384740dcSRalf Baechle * hardware. An example use would be for flash memory that's used for
36384740dcSRalf Baechle * execute in place.
37384740dcSRalf Baechle */
38384740dcSRalf Baechle # define __raw_ioswabb(a, x) (x)
39384740dcSRalf Baechle # define __raw_ioswabw(a, x) (x)
40384740dcSRalf Baechle # define __raw_ioswabl(a, x) (x)
41384740dcSRalf Baechle # define __raw_ioswabq(a, x) (x)
42384740dcSRalf Baechle # define ____raw_ioswabq(a, x) (x)
43384740dcSRalf Baechle
448b656253SMaciej W. Rozycki # define __relaxed_ioswabb ioswabb
458b656253SMaciej W. Rozycki # define __relaxed_ioswabw ioswabw
468b656253SMaciej W. Rozycki # define __relaxed_ioswabl ioswabl
478b656253SMaciej W. Rozycki # define __relaxed_ioswabq ioswabq
488b656253SMaciej W. Rozycki
49384740dcSRalf Baechle /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
50384740dcSRalf Baechle
51384740dcSRalf Baechle /*
52384740dcSRalf Baechle * On MIPS I/O ports are memory mapped, so we access them using normal
53384740dcSRalf Baechle * load/store instructions. mips_io_port_base is the virtual address to
54384740dcSRalf Baechle * which all ports are being mapped. For sake of efficiency some code
55384740dcSRalf Baechle * assumes that this is an address that can be loaded with a single lui
56384740dcSRalf Baechle * instruction, so the lower 16 bits must be zero. Should be true on
57f2790db1SRandy Dunlap * any sane architecture; generic code does not use this assumption.
58384740dcSRalf Baechle */
5912051b31SNick Desaulniers extern unsigned long mips_io_port_base;
60384740dcSRalf Baechle
set_io_port_base(unsigned long base)61384740dcSRalf Baechle static inline void set_io_port_base(unsigned long base)
62384740dcSRalf Baechle {
6312051b31SNick Desaulniers mips_io_port_base = base;
64384740dcSRalf Baechle }
65384740dcSRalf Baechle
66384740dcSRalf Baechle /*
67b962aeb0SPaul Burton * Provide the necessary definitions for generic iomap. We make use of
68b962aeb0SPaul Burton * mips_io_port_base for iomap(), but we don't reserve any low addresses for
69b962aeb0SPaul Burton * use with I/O ports.
70b962aeb0SPaul Burton */
714ae0452bSMaciej W. Rozycki
72b962aeb0SPaul Burton #define HAVE_ARCH_PIO_SIZE
73b962aeb0SPaul Burton #define PIO_OFFSET mips_io_port_base
74b962aeb0SPaul Burton #define PIO_MASK IO_SPACE_LIMIT
75b962aeb0SPaul Burton #define PIO_RESERVED 0x0UL
76b962aeb0SPaul Burton
77b962aeb0SPaul Burton /*
784ae0452bSMaciej W. Rozycki * Enforce in-order execution of data I/O. In the MIPS architecture
794ae0452bSMaciej W. Rozycki * these are equivalent to corresponding platform-specific memory
804ae0452bSMaciej W. Rozycki * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
814ae0452bSMaciej W. Rozycki * with sync additionally defined.
824ae0452bSMaciej W. Rozycki */
834ae0452bSMaciej W. Rozycki #define iobarrier_rw() mb()
844ae0452bSMaciej W. Rozycki #define iobarrier_r() rmb()
854ae0452bSMaciej W. Rozycki #define iobarrier_w() wmb()
864ae0452bSMaciej W. Rozycki #define iobarrier_sync() iob()
874ae0452bSMaciej W. Rozycki
884ae0452bSMaciej W. Rozycki /*
89384740dcSRalf Baechle * virt_to_phys - map virtual addresses to physical
90384740dcSRalf Baechle * @address: address to remap
91384740dcSRalf Baechle *
92384740dcSRalf Baechle * The returned physical address is the physical (CPU) mapping for
93384740dcSRalf Baechle * the memory address given. It is only valid to use this function on
94384740dcSRalf Baechle * addresses directly mapped or allocated via kmalloc.
95384740dcSRalf Baechle *
96384740dcSRalf Baechle * This function does not give bus mappings for DMA transfers. In
97384740dcSRalf Baechle * almost all conceivable cases a device driver should not be using
98384740dcSRalf Baechle * this function
99384740dcSRalf Baechle */
__virt_to_phys_nodebug(volatile const void * address)100dfad83cbSFlorian Fainelli static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
101384740dcSRalf Baechle {
10249c426baSDavid Daney return __pa(address);
103384740dcSRalf Baechle }
104384740dcSRalf Baechle
105dfad83cbSFlorian Fainelli #ifdef CONFIG_DEBUG_VIRTUAL
106dfad83cbSFlorian Fainelli extern phys_addr_t __virt_to_phys(volatile const void *x);
107dfad83cbSFlorian Fainelli #else
108dfad83cbSFlorian Fainelli #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
109dfad83cbSFlorian Fainelli #endif
110dfad83cbSFlorian Fainelli
111dfad83cbSFlorian Fainelli #define virt_to_phys virt_to_phys
virt_to_phys(const volatile void * x)112dfad83cbSFlorian Fainelli static inline phys_addr_t virt_to_phys(const volatile void *x)
113dfad83cbSFlorian Fainelli {
114dfad83cbSFlorian Fainelli return __virt_to_phys(x);
115dfad83cbSFlorian Fainelli }
116dfad83cbSFlorian Fainelli
117384740dcSRalf Baechle /*
118384740dcSRalf Baechle * phys_to_virt - map physical address to virtual
119384740dcSRalf Baechle * @address: address to remap
120384740dcSRalf Baechle *
121384740dcSRalf Baechle * The returned virtual address is a current CPU mapping for
122384740dcSRalf Baechle * the memory address given. It is only valid to use this function on
123384740dcSRalf Baechle * addresses that have a kernel mapping
124384740dcSRalf Baechle *
125384740dcSRalf Baechle * This function does not handle bus mappings for DMA transfers. In
126384740dcSRalf Baechle * almost all conceivable cases a device driver should not be using
127384740dcSRalf Baechle * this function
128384740dcSRalf Baechle */
phys_to_virt(unsigned long address)129384740dcSRalf Baechle static inline void * phys_to_virt(unsigned long address)
130384740dcSRalf Baechle {
13190445763SFlorian Fainelli return __va(address);
132384740dcSRalf Baechle }
133384740dcSRalf Baechle
134384740dcSRalf Baechle /*
135384740dcSRalf Baechle * ISA I/O bus memory addresses are 1:1 with the physical address.
136384740dcSRalf Baechle */
isa_virt_to_bus(volatile void * address)137384740dcSRalf Baechle static inline unsigned long isa_virt_to_bus(volatile void *address)
138384740dcSRalf Baechle {
1390494d7ffSPaul Burton return virt_to_phys(address);
140384740dcSRalf Baechle }
141384740dcSRalf Baechle
isa_bus_to_virt(unsigned long address)142384740dcSRalf Baechle static inline void *isa_bus_to_virt(unsigned long address)
143384740dcSRalf Baechle {
1440494d7ffSPaul Burton return phys_to_virt(address);
145384740dcSRalf Baechle }
146384740dcSRalf Baechle
147384740dcSRalf Baechle /*
148384740dcSRalf Baechle * Change "struct page" to physical address.
149384740dcSRalf Baechle */
150384740dcSRalf Baechle #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
151384740dcSRalf Baechle
152d257b8feSChristoph Hellwig void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
153d257b8feSChristoph Hellwig unsigned long prot_val);
154d257b8feSChristoph Hellwig void iounmap(const volatile void __iomem *addr);
1558e487c15SChristoph Hellwig
156384740dcSRalf Baechle /*
157384740dcSRalf Baechle * ioremap - map bus memory into CPU space
158384740dcSRalf Baechle * @offset: bus address of the memory
159384740dcSRalf Baechle * @size: size of the resource to map
160384740dcSRalf Baechle *
161384740dcSRalf Baechle * ioremap performs a platform specific sequence of operations to
162384740dcSRalf Baechle * make bus memory CPU accessible via the readb/readw/readl/writeb/
163384740dcSRalf Baechle * writew/writel functions and the other mmio helpers. The returned
164384740dcSRalf Baechle * address is not guaranteed to be usable directly as a virtual
165384740dcSRalf Baechle * address.
166384740dcSRalf Baechle */
167384740dcSRalf Baechle #define ioremap(offset, size) \
1685c9ff570SChristoph Hellwig ioremap_prot((offset), (size), _CACHE_UNCACHED)
169d23cc635SChristoph Hellwig #define ioremap_uc ioremap
170384740dcSRalf Baechle
171384740dcSRalf Baechle /*
17260af0d94SChristoph Hellwig * ioremap_cache - map bus memory into CPU space
173384740dcSRalf Baechle * @offset: bus address of the memory
174384740dcSRalf Baechle * @size: size of the resource to map
175384740dcSRalf Baechle *
17660af0d94SChristoph Hellwig * ioremap_cache performs a platform specific sequence of operations to
177384740dcSRalf Baechle * make bus memory CPU accessible via the readb/readw/readl/writeb/
178384740dcSRalf Baechle * writew/writel functions and the other mmio helpers. The returned
179384740dcSRalf Baechle * address is not guaranteed to be usable directly as a virtual
180384740dcSRalf Baechle * address.
181384740dcSRalf Baechle *
182384740dcSRalf Baechle * This version of ioremap ensures that the memory is marked cachable by
183384740dcSRalf Baechle * the CPU. Also enables full write-combining. Useful for some
184384740dcSRalf Baechle * memory-like regions on I/O busses.
185384740dcSRalf Baechle */
18660af0d94SChristoph Hellwig #define ioremap_cache(offset, size) \
1875c9ff570SChristoph Hellwig ioremap_prot((offset), (size), _page_cachable_default)
188384740dcSRalf Baechle
189384740dcSRalf Baechle /*
1909748e33eSSerge Semin * ioremap_wc - map bus memory into CPU space
1919748e33eSSerge Semin * @offset: bus address of the memory
1929748e33eSSerge Semin * @size: size of the resource to map
1939748e33eSSerge Semin *
1949748e33eSSerge Semin * ioremap_wc performs a platform specific sequence of operations to
1959748e33eSSerge Semin * make bus memory CPU accessible via the readb/readw/readl/writeb/
1969748e33eSSerge Semin * writew/writel functions and the other mmio helpers. The returned
1979748e33eSSerge Semin * address is not guaranteed to be usable directly as a virtual
1989748e33eSSerge Semin * address.
1999748e33eSSerge Semin *
2009748e33eSSerge Semin * This version of ioremap ensures that the memory is marked uncachable
2019748e33eSSerge Semin * but accelerated by means of write-combining feature. It is specifically
2029748e33eSSerge Semin * useful for PCIe prefetchable windows, which may vastly improve a
2039748e33eSSerge Semin * communications performance. If it was determined on boot stage, what
2049748e33eSSerge Semin * CPU CCA doesn't support UCA, the method shall fall-back to the
2059748e33eSSerge Semin * _CACHE_UNCACHED option (see cpu_probe() method).
2069748e33eSSerge Semin */
2079748e33eSSerge Semin #define ioremap_wc(offset, size) \
2085c9ff570SChristoph Hellwig ioremap_prot((offset), (size), boot_cpu_data.writecombine)
2099748e33eSSerge Semin
210*0b1f77e7SBaoquan He #include <asm-generic/iomap.h>
211*0b1f77e7SBaoquan He
2127b76ab83SJiaxun Yang #if defined(CONFIG_CPU_CAVIUM_OCTEON)
2131e820da3SHuacai Chen #define war_io_reorder_wmb() wmb()
2148faca49aSDavid Daney #else
215f6b7aeeeSSinan Kaya #define war_io_reorder_wmb() barrier()
2168faca49aSDavid Daney #endif
2178faca49aSDavid Daney
2188b656253SMaciej W. Rozycki #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
219384740dcSRalf Baechle \
220384740dcSRalf Baechle static inline void pfx##write##bwlq(type val, \
221384740dcSRalf Baechle volatile void __iomem *mem) \
222384740dcSRalf Baechle { \
223384740dcSRalf Baechle volatile type *__mem; \
224384740dcSRalf Baechle type __val; \
225384740dcSRalf Baechle \
2263d474dacSMaciej W. Rozycki if (barrier) \
2273d474dacSMaciej W. Rozycki iobarrier_rw(); \
2283d474dacSMaciej W. Rozycki else \
2291e820da3SHuacai Chen war_io_reorder_wmb(); \
2308faca49aSDavid Daney \
231384740dcSRalf Baechle __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
232384740dcSRalf Baechle \
233384740dcSRalf Baechle __val = pfx##ioswab##bwlq(__mem, val); \
234384740dcSRalf Baechle \
235384740dcSRalf Baechle if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
236384740dcSRalf Baechle *__mem = __val; \
237384740dcSRalf Baechle else if (cpu_has_64bits) { \
238384740dcSRalf Baechle unsigned long __flags; \
239384740dcSRalf Baechle type __tmp; \
240384740dcSRalf Baechle \
241384740dcSRalf Baechle if (irq) \
242384740dcSRalf Baechle local_irq_save(__flags); \
243384740dcSRalf Baechle __asm__ __volatile__( \
244378ed6f0SPaul Burton ".set push" "\t\t# __writeq""\n\t" \
245378ed6f0SPaul Burton ".set arch=r4000" "\n\t" \
246384740dcSRalf Baechle "dsll32 %L0, %L0, 0" "\n\t" \
247384740dcSRalf Baechle "dsrl32 %L0, %L0, 0" "\n\t" \
248384740dcSRalf Baechle "dsll32 %M0, %M0, 0" "\n\t" \
249384740dcSRalf Baechle "or %L0, %L0, %M0" "\n\t" \
250384740dcSRalf Baechle "sd %L0, %2" "\n\t" \
251378ed6f0SPaul Burton ".set pop" "\n" \
252384740dcSRalf Baechle : "=r" (__tmp) \
253b77bb37aSRalf Baechle : "0" (__val), "m" (*__mem)); \
254384740dcSRalf Baechle if (irq) \
255384740dcSRalf Baechle local_irq_restore(__flags); \
256384740dcSRalf Baechle } else \
257384740dcSRalf Baechle BUG(); \
258384740dcSRalf Baechle } \
259384740dcSRalf Baechle \
260384740dcSRalf Baechle static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
261384740dcSRalf Baechle { \
262384740dcSRalf Baechle volatile type *__mem; \
263384740dcSRalf Baechle type __val; \
264384740dcSRalf Baechle \
265384740dcSRalf Baechle __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
266384740dcSRalf Baechle \
2673d474dacSMaciej W. Rozycki if (barrier) \
2683d474dacSMaciej W. Rozycki iobarrier_rw(); \
2693d474dacSMaciej W. Rozycki \
270384740dcSRalf Baechle if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
271384740dcSRalf Baechle __val = *__mem; \
272384740dcSRalf Baechle else if (cpu_has_64bits) { \
273384740dcSRalf Baechle unsigned long __flags; \
274384740dcSRalf Baechle \
275384740dcSRalf Baechle if (irq) \
276384740dcSRalf Baechle local_irq_save(__flags); \
277384740dcSRalf Baechle __asm__ __volatile__( \
278378ed6f0SPaul Burton ".set push" "\t\t# __readq" "\n\t" \
279378ed6f0SPaul Burton ".set arch=r4000" "\n\t" \
280384740dcSRalf Baechle "ld %L0, %1" "\n\t" \
281384740dcSRalf Baechle "dsra32 %M0, %L0, 0" "\n\t" \
282384740dcSRalf Baechle "sll %L0, %L0, 0" "\n\t" \
283378ed6f0SPaul Burton ".set pop" "\n" \
284384740dcSRalf Baechle : "=r" (__val) \
285b77bb37aSRalf Baechle : "m" (*__mem)); \
286384740dcSRalf Baechle if (irq) \
287384740dcSRalf Baechle local_irq_restore(__flags); \
288384740dcSRalf Baechle } else { \
289384740dcSRalf Baechle __val = 0; \
290384740dcSRalf Baechle BUG(); \
291384740dcSRalf Baechle } \
292384740dcSRalf Baechle \
293a1cc7034SSinan Kaya /* prevent prefetching of coherent DMA data prematurely */ \
2948b656253SMaciej W. Rozycki if (!relax) \
295a1cc7034SSinan Kaya rmb(); \
296384740dcSRalf Baechle return pfx##ioswab##bwlq(__mem, __val); \
297384740dcSRalf Baechle }
298384740dcSRalf Baechle
2998b656253SMaciej W. Rozycki #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
300384740dcSRalf Baechle \
301384740dcSRalf Baechle static inline void pfx##out##bwlq##p(type val, unsigned long port) \
302384740dcSRalf Baechle { \
303384740dcSRalf Baechle volatile type *__addr; \
304384740dcSRalf Baechle type __val; \
305384740dcSRalf Baechle \
3063d474dacSMaciej W. Rozycki if (barrier) \
3073d474dacSMaciej W. Rozycki iobarrier_rw(); \
3083d474dacSMaciej W. Rozycki else \
3091e820da3SHuacai Chen war_io_reorder_wmb(); \
3108faca49aSDavid Daney \
311384740dcSRalf Baechle __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
312384740dcSRalf Baechle \
313384740dcSRalf Baechle __val = pfx##ioswab##bwlq(__addr, val); \
314384740dcSRalf Baechle \
315384740dcSRalf Baechle /* Really, we want this to be atomic */ \
316384740dcSRalf Baechle BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
317384740dcSRalf Baechle \
318384740dcSRalf Baechle *__addr = __val; \
319384740dcSRalf Baechle } \
320384740dcSRalf Baechle \
321384740dcSRalf Baechle static inline type pfx##in##bwlq##p(unsigned long port) \
322384740dcSRalf Baechle { \
323384740dcSRalf Baechle volatile type *__addr; \
324384740dcSRalf Baechle type __val; \
325384740dcSRalf Baechle \
326384740dcSRalf Baechle __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
327384740dcSRalf Baechle \
328384740dcSRalf Baechle BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
329384740dcSRalf Baechle \
3303d474dacSMaciej W. Rozycki if (barrier) \
3313d474dacSMaciej W. Rozycki iobarrier_rw(); \
3323d474dacSMaciej W. Rozycki \
333384740dcSRalf Baechle __val = *__addr; \
334384740dcSRalf Baechle \
33518f3e95bSHuacai Chen /* prevent prefetching of coherent DMA data prematurely */ \
3368b656253SMaciej W. Rozycki if (!relax) \
33718f3e95bSHuacai Chen rmb(); \
338384740dcSRalf Baechle return pfx##ioswab##bwlq(__addr, __val); \
339384740dcSRalf Baechle }
340384740dcSRalf Baechle
3418b656253SMaciej W. Rozycki #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
342384740dcSRalf Baechle \
3438b656253SMaciej W. Rozycki __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
344384740dcSRalf Baechle
345384740dcSRalf Baechle #define BUILDIO_MEM(bwlq, type) \
346384740dcSRalf Baechle \
3478b656253SMaciej W. Rozycki __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
3488b656253SMaciej W. Rozycki __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
3498b656253SMaciej W. Rozycki __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
3508b656253SMaciej W. Rozycki __BUILD_MEMORY_PFX(, bwlq, type, 0)
351384740dcSRalf Baechle
BUILDIO_MEM(b,u8)352384740dcSRalf Baechle BUILDIO_MEM(b, u8)
353384740dcSRalf Baechle BUILDIO_MEM(w, u16)
354384740dcSRalf Baechle BUILDIO_MEM(l, u32)
3551e279144SSerge Semin #ifdef CONFIG_64BIT
356384740dcSRalf Baechle BUILDIO_MEM(q, u64)
3571e279144SSerge Semin #else
3581e279144SSerge Semin __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
3591e279144SSerge Semin __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
3601e279144SSerge Semin #endif
361384740dcSRalf Baechle
362384740dcSRalf Baechle #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
3638b656253SMaciej W. Rozycki __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
3648b656253SMaciej W. Rozycki __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
365384740dcSRalf Baechle
366384740dcSRalf Baechle #define BUILDIO_IOPORT(bwlq, type) \
367384740dcSRalf Baechle __BUILD_IOPORT_PFX(, bwlq, type) \
368384740dcSRalf Baechle __BUILD_IOPORT_PFX(__mem_, bwlq, type)
369384740dcSRalf Baechle
370384740dcSRalf Baechle BUILDIO_IOPORT(b, u8)
371384740dcSRalf Baechle BUILDIO_IOPORT(w, u16)
372384740dcSRalf Baechle BUILDIO_IOPORT(l, u32)
373384740dcSRalf Baechle #ifdef CONFIG_64BIT
374384740dcSRalf Baechle BUILDIO_IOPORT(q, u64)
375384740dcSRalf Baechle #endif
376384740dcSRalf Baechle
377384740dcSRalf Baechle #define __BUILDIO(bwlq, type) \
378384740dcSRalf Baechle \
3798b656253SMaciej W. Rozycki __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
380384740dcSRalf Baechle
381384740dcSRalf Baechle __BUILDIO(q, u64)
382384740dcSRalf Baechle
3838b656253SMaciej W. Rozycki #define readb_relaxed __relaxed_readb
3848b656253SMaciej W. Rozycki #define readw_relaxed __relaxed_readw
3858b656253SMaciej W. Rozycki #define readl_relaxed __relaxed_readl
3861e279144SSerge Semin #ifdef CONFIG_64BIT
3878b656253SMaciej W. Rozycki #define readq_relaxed __relaxed_readq
3881e279144SSerge Semin #endif
389384740dcSRalf Baechle
3908b656253SMaciej W. Rozycki #define writeb_relaxed __relaxed_writeb
3918b656253SMaciej W. Rozycki #define writew_relaxed __relaxed_writew
3928b656253SMaciej W. Rozycki #define writel_relaxed __relaxed_writel
3931e279144SSerge Semin #ifdef CONFIG_64BIT
3948b656253SMaciej W. Rozycki #define writeq_relaxed __relaxed_writeq
3951e279144SSerge Semin #endif
396edd4201eSFlorian Fainelli
397f868ba29SFlorian Fainelli #define readb_be(addr) \
398f868ba29SFlorian Fainelli __raw_readb((__force unsigned *)(addr))
399f868ba29SFlorian Fainelli #define readw_be(addr) \
400f868ba29SFlorian Fainelli be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
401f868ba29SFlorian Fainelli #define readl_be(addr) \
402f868ba29SFlorian Fainelli be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
403f868ba29SFlorian Fainelli #define readq_be(addr) \
404f868ba29SFlorian Fainelli be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
405f868ba29SFlorian Fainelli
406f868ba29SFlorian Fainelli #define writeb_be(val, addr) \
407f868ba29SFlorian Fainelli __raw_writeb((val), (__force unsigned *)(addr))
408f868ba29SFlorian Fainelli #define writew_be(val, addr) \
409f868ba29SFlorian Fainelli __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
410f868ba29SFlorian Fainelli #define writel_be(val, addr) \
411f868ba29SFlorian Fainelli __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
412f868ba29SFlorian Fainelli #define writeq_be(val, addr) \
413f868ba29SFlorian Fainelli __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
414f868ba29SFlorian Fainelli
415384740dcSRalf Baechle /*
416384740dcSRalf Baechle * Some code tests for these symbols
417384740dcSRalf Baechle */
4181e279144SSerge Semin #ifdef CONFIG_64BIT
419384740dcSRalf Baechle #define readq readq
420384740dcSRalf Baechle #define writeq writeq
4211e279144SSerge Semin #endif
422384740dcSRalf Baechle
423384740dcSRalf Baechle #define __BUILD_MEMORY_STRING(bwlq, type) \
424384740dcSRalf Baechle \
425384740dcSRalf Baechle static inline void writes##bwlq(volatile void __iomem *mem, \
426384740dcSRalf Baechle const void *addr, unsigned int count) \
427384740dcSRalf Baechle { \
428384740dcSRalf Baechle const volatile type *__addr = addr; \
429384740dcSRalf Baechle \
430384740dcSRalf Baechle while (count--) { \
431384740dcSRalf Baechle __mem_write##bwlq(*__addr, mem); \
432384740dcSRalf Baechle __addr++; \
433384740dcSRalf Baechle } \
434384740dcSRalf Baechle } \
435384740dcSRalf Baechle \
436384740dcSRalf Baechle static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
437384740dcSRalf Baechle unsigned int count) \
438384740dcSRalf Baechle { \
439384740dcSRalf Baechle volatile type *__addr = addr; \
440384740dcSRalf Baechle \
441384740dcSRalf Baechle while (count--) { \
442384740dcSRalf Baechle *__addr = __mem_read##bwlq(mem); \
443384740dcSRalf Baechle __addr++; \
444384740dcSRalf Baechle } \
445384740dcSRalf Baechle }
446384740dcSRalf Baechle
447384740dcSRalf Baechle #define __BUILD_IOPORT_STRING(bwlq, type) \
448384740dcSRalf Baechle \
449384740dcSRalf Baechle static inline void outs##bwlq(unsigned long port, const void *addr, \
450384740dcSRalf Baechle unsigned int count) \
451384740dcSRalf Baechle { \
452384740dcSRalf Baechle const volatile type *__addr = addr; \
453384740dcSRalf Baechle \
454384740dcSRalf Baechle while (count--) { \
455384740dcSRalf Baechle __mem_out##bwlq(*__addr, port); \
456384740dcSRalf Baechle __addr++; \
457384740dcSRalf Baechle } \
458384740dcSRalf Baechle } \
459384740dcSRalf Baechle \
460384740dcSRalf Baechle static inline void ins##bwlq(unsigned long port, void *addr, \
461384740dcSRalf Baechle unsigned int count) \
462384740dcSRalf Baechle { \
463384740dcSRalf Baechle volatile type *__addr = addr; \
464384740dcSRalf Baechle \
465384740dcSRalf Baechle while (count--) { \
466384740dcSRalf Baechle *__addr = __mem_in##bwlq(port); \
467384740dcSRalf Baechle __addr++; \
468384740dcSRalf Baechle } \
469384740dcSRalf Baechle }
470384740dcSRalf Baechle
471384740dcSRalf Baechle #define BUILDSTRING(bwlq, type) \
472384740dcSRalf Baechle \
473384740dcSRalf Baechle __BUILD_MEMORY_STRING(bwlq, type) \
474384740dcSRalf Baechle __BUILD_IOPORT_STRING(bwlq, type)
475384740dcSRalf Baechle
476384740dcSRalf Baechle BUILDSTRING(b, u8)
477384740dcSRalf Baechle BUILDSTRING(w, u16)
478384740dcSRalf Baechle BUILDSTRING(l, u32)
479384740dcSRalf Baechle #ifdef CONFIG_64BIT
480384740dcSRalf Baechle BUILDSTRING(q, u64)
481384740dcSRalf Baechle #endif
482384740dcSRalf Baechle
483384740dcSRalf Baechle static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
484384740dcSRalf Baechle {
485384740dcSRalf Baechle memset((void __force *) addr, val, count);
486384740dcSRalf Baechle }
memcpy_fromio(void * dst,const volatile void __iomem * src,int count)487384740dcSRalf Baechle static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
488384740dcSRalf Baechle {
489384740dcSRalf Baechle memcpy(dst, (void __force *) src, count);
490384740dcSRalf Baechle }
memcpy_toio(volatile void __iomem * dst,const void * src,int count)491384740dcSRalf Baechle static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
492384740dcSRalf Baechle {
493384740dcSRalf Baechle memcpy((void __force *) dst, src, count);
494384740dcSRalf Baechle }
495384740dcSRalf Baechle
496384740dcSRalf Baechle /*
497384740dcSRalf Baechle * The caches on some architectures aren't dma-coherent and have need to
498384740dcSRalf Baechle * handle this in software. There are three types of operations that
499384740dcSRalf Baechle * can be applied to dma buffers.
500384740dcSRalf Baechle *
501384740dcSRalf Baechle * - dma_cache_wback_inv(start, size) makes caches and coherent by
502384740dcSRalf Baechle * writing the content of the caches back to memory, if necessary.
503384740dcSRalf Baechle * The function also invalidates the affected part of the caches as
504384740dcSRalf Baechle * necessary before DMA transfers from outside to memory.
505384740dcSRalf Baechle * - dma_cache_wback(start, size) makes caches and coherent by
506384740dcSRalf Baechle * writing the content of the caches back to memory, if necessary.
507384740dcSRalf Baechle * The function also invalidates the affected part of the caches as
508384740dcSRalf Baechle * necessary before DMA transfers from outside to memory.
509384740dcSRalf Baechle * - dma_cache_inv(start, size) invalidates the affected parts of the
510384740dcSRalf Baechle * caches. Dirty lines of the caches may be written back or simply
511384740dcSRalf Baechle * be discarded. This operation is necessary before dma operations
512384740dcSRalf Baechle * to the memory.
513384740dcSRalf Baechle *
514384740dcSRalf Baechle * This API used to be exported; it now is for arch code internal use only.
515384740dcSRalf Baechle */
516972dc3b7SChristoph Hellwig #ifdef CONFIG_DMA_NONCOHERENT
517384740dcSRalf Baechle
518384740dcSRalf Baechle extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
519384740dcSRalf Baechle extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
520384740dcSRalf Baechle extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
521384740dcSRalf Baechle
522384740dcSRalf Baechle #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
523384740dcSRalf Baechle #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
524384740dcSRalf Baechle #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
525384740dcSRalf Baechle
526384740dcSRalf Baechle #else /* Sane hardware */
527384740dcSRalf Baechle
528384740dcSRalf Baechle #define dma_cache_wback_inv(start,size) \
529384740dcSRalf Baechle do { (void) (start); (void) (size); } while (0)
530384740dcSRalf Baechle #define dma_cache_wback(start,size) \
531384740dcSRalf Baechle do { (void) (start); (void) (size); } while (0)
532384740dcSRalf Baechle #define dma_cache_inv(start,size) \
533384740dcSRalf Baechle do { (void) (start); (void) (size); } while (0)
534384740dcSRalf Baechle
535972dc3b7SChristoph Hellwig #endif /* CONFIG_DMA_NONCOHERENT */
536384740dcSRalf Baechle
537384740dcSRalf Baechle /*
538384740dcSRalf Baechle * Read a 32-bit register that requires a 64-bit read cycle on the bus.
539384740dcSRalf Baechle * Avoid interrupt mucking, just adjust the address for 4-byte access.
540384740dcSRalf Baechle * Assume the addresses are 8-byte aligned.
541384740dcSRalf Baechle */
542384740dcSRalf Baechle #ifdef __MIPSEB__
543384740dcSRalf Baechle #define __CSR_32_ADJUST 4
544384740dcSRalf Baechle #else
545384740dcSRalf Baechle #define __CSR_32_ADJUST 0
546384740dcSRalf Baechle #endif
547384740dcSRalf Baechle
548384740dcSRalf Baechle #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
549384740dcSRalf Baechle #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
550384740dcSRalf Baechle
551384740dcSRalf Baechle /*
552384740dcSRalf Baechle * Convert a physical pointer to a virtual kernel pointer for /dev/mem
553384740dcSRalf Baechle * access
554384740dcSRalf Baechle */
555384740dcSRalf Baechle #define xlate_dev_mem_ptr(p) __va(p)
55699b619b3SArnd Bergmann #define unxlate_dev_mem_ptr(p, v) do { } while (0)
557384740dcSRalf Baechle
558d8c825e2SPaul Burton void __ioread64_copy(void *to, const void __iomem *from, size_t count);
559d8c825e2SPaul Burton
560384740dcSRalf Baechle #endif /* _ASM_IO_H */
561