1819833afSPeter Tyser /* 2819833afSPeter Tyser * Copyright (C) 1994, 1995 Waldorf GmbH 323ff8633SDaniel Schwierzeck * Copyright (C) 1994 - 2000, 06 Ralf Baechle 4819833afSPeter Tyser * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 523ff8633SDaniel Schwierzeck * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 623ff8633SDaniel Schwierzeck * Author: Maciej W. Rozycki <macro@mips.com> 723ff8633SDaniel Schwierzeck * 823ff8633SDaniel Schwierzeck * SPDX-License-Identifier: GPL-2.0 9819833afSPeter Tyser */ 10819833afSPeter Tyser #ifndef _ASM_IO_H 11819833afSPeter Tyser #define _ASM_IO_H 12819833afSPeter Tyser 13*d6ea6d88STom Rini #include <linux/bug.h> 1423ff8633SDaniel Schwierzeck #include <linux/compiler.h> 1523ff8633SDaniel Schwierzeck #include <linux/types.h> 1623ff8633SDaniel Schwierzeck 17819833afSPeter Tyser #include <asm/addrspace.h> 18819833afSPeter Tyser #include <asm/byteorder.h> 1923ff8633SDaniel Schwierzeck #include <asm/cpu-features.h> 2023ff8633SDaniel Schwierzeck #include <asm/pgtable-bits.h> 2123ff8633SDaniel Schwierzeck #include <asm/processor.h> 2223ff8633SDaniel Schwierzeck #include <asm/string.h> 2323ff8633SDaniel Schwierzeck 2423ff8633SDaniel Schwierzeck #include <ioremap.h> 2523ff8633SDaniel Schwierzeck #include <mangle-port.h> 2623ff8633SDaniel Schwierzeck #include <spaces.h> 27819833afSPeter Tyser 28819833afSPeter Tyser /* 29819833afSPeter Tyser * Slowdown I/O port space accesses for antique hardware. 30819833afSPeter Tyser */ 31819833afSPeter Tyser #undef CONF_SLOWDOWN_IO 32819833afSPeter Tyser 33819833afSPeter Tyser /* 3423ff8633SDaniel Schwierzeck * Raw operations are never swapped in software. OTOH values that raw 3523ff8633SDaniel Schwierzeck * operations are working on may or may not have been swapped by the bus 3623ff8633SDaniel Schwierzeck * hardware. An example use would be for flash memory that's used for 3723ff8633SDaniel Schwierzeck * execute in place. 38819833afSPeter Tyser */ 3923ff8633SDaniel Schwierzeck # define __raw_ioswabb(a, x) (x) 4023ff8633SDaniel Schwierzeck # define __raw_ioswabw(a, x) (x) 4123ff8633SDaniel Schwierzeck # define __raw_ioswabl(a, x) (x) 4223ff8633SDaniel Schwierzeck # define __raw_ioswabq(a, x) (x) 4323ff8633SDaniel Schwierzeck # define ____raw_ioswabq(a, x) (x) 44819833afSPeter Tyser 4523ff8633SDaniel Schwierzeck /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 46819833afSPeter Tyser 4723ff8633SDaniel Schwierzeck #define IO_SPACE_LIMIT 0xffff 48819833afSPeter Tyser 49819833afSPeter Tyser /* 50819833afSPeter Tyser * On MIPS I/O ports are memory mapped, so we access them using normal 51819833afSPeter Tyser * load/store instructions. mips_io_port_base is the virtual address to 52819833afSPeter Tyser * which all ports are being mapped. For sake of efficiency some code 53819833afSPeter Tyser * assumes that this is an address that can be loaded with a single lui 54819833afSPeter Tyser * instruction, so the lower 16 bits must be zero. Should be true on 55819833afSPeter Tyser * on any sane architecture; generic code does not use this assumption. 56819833afSPeter Tyser */ 57819833afSPeter Tyser extern const unsigned long mips_io_port_base; 58819833afSPeter Tyser 59819833afSPeter Tyser /* 60819833afSPeter Tyser * Gcc will generate code to load the value of mips_io_port_base after each 61819833afSPeter Tyser * function call which may be fairly wasteful in some cases. So we don't 62819833afSPeter Tyser * play quite by the book. We tell gcc mips_io_port_base is a long variable 63819833afSPeter Tyser * which solves the code generation issue. Now we need to violate the 64819833afSPeter Tyser * aliasing rules a little to make initialization possible and finally we 65819833afSPeter Tyser * will need the barrier() to fight side effects of the aliasing chat. 66819833afSPeter Tyser * This trickery will eventually collapse under gcc's optimizer. Oh well. 67819833afSPeter Tyser */ 68819833afSPeter Tyser static inline void set_io_port_base(unsigned long base) 69819833afSPeter Tyser { 70819833afSPeter Tyser * (unsigned long *) &mips_io_port_base = base; 7123ff8633SDaniel Schwierzeck barrier(); 72819833afSPeter Tyser } 73819833afSPeter Tyser 74819833afSPeter Tyser /* 75819833afSPeter Tyser * Thanks to James van Artsdalen for a better timing-fix than 76819833afSPeter Tyser * the two short jumps: using outb's to a nonexistent port seems 77819833afSPeter Tyser * to guarantee better timings even on fast machines. 78819833afSPeter Tyser * 79819833afSPeter Tyser * On the other hand, I'd like to be sure of a non-existent port: 80819833afSPeter Tyser * I feel a bit unsafe about using 0x80 (should be safe, though) 81819833afSPeter Tyser * 82819833afSPeter Tyser * Linus 83819833afSPeter Tyser * 84819833afSPeter Tyser */ 85819833afSPeter Tyser 86819833afSPeter Tyser #define __SLOW_DOWN_IO \ 87819833afSPeter Tyser __asm__ __volatile__( \ 88819833afSPeter Tyser "sb\t$0,0x80(%0)" \ 89819833afSPeter Tyser : : "r" (mips_io_port_base)); 90819833afSPeter Tyser 91819833afSPeter Tyser #ifdef CONF_SLOWDOWN_IO 92819833afSPeter Tyser #ifdef REALLY_SLOW_IO 93819833afSPeter Tyser #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 94819833afSPeter Tyser #else 95819833afSPeter Tyser #define SLOW_DOWN_IO __SLOW_DOWN_IO 96819833afSPeter Tyser #endif 97819833afSPeter Tyser #else 98819833afSPeter Tyser #define SLOW_DOWN_IO 99819833afSPeter Tyser #endif 100819833afSPeter Tyser 101819833afSPeter Tyser /* 10223ff8633SDaniel Schwierzeck * virt_to_phys - map virtual addresses to physical 10323ff8633SDaniel Schwierzeck * @address: address to remap 10423ff8633SDaniel Schwierzeck * 10523ff8633SDaniel Schwierzeck * The returned physical address is the physical (CPU) mapping for 10623ff8633SDaniel Schwierzeck * the memory address given. It is only valid to use this function on 10723ff8633SDaniel Schwierzeck * addresses directly mapped or allocated via kmalloc. 10823ff8633SDaniel Schwierzeck * 10923ff8633SDaniel Schwierzeck * This function does not give bus mappings for DMA transfers. In 11023ff8633SDaniel Schwierzeck * almost all conceivable cases a device driver should not be using 11123ff8633SDaniel Schwierzeck * this function 112819833afSPeter Tyser */ 11323ff8633SDaniel Schwierzeck static inline unsigned long virt_to_phys(volatile const void *address) 114819833afSPeter Tyser { 11523ff8633SDaniel Schwierzeck unsigned long addr = (unsigned long)address; 11623ff8633SDaniel Schwierzeck 11723ff8633SDaniel Schwierzeck /* this corresponds to kernel implementation of __pa() */ 11823ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 11923ff8633SDaniel Schwierzeck if (addr < CKSEG0) 12023ff8633SDaniel Schwierzeck return XPHYSADDR(addr); 12123ff8633SDaniel Schwierzeck 12223ff8633SDaniel Schwierzeck return CPHYSADDR(addr); 123090854c8SZhi-zhou Zhang #else 12423ff8633SDaniel Schwierzeck return addr - PAGE_OFFSET + PHYS_OFFSET; 125090854c8SZhi-zhou Zhang #endif 126819833afSPeter Tyser } 127819833afSPeter Tyser 12823ff8633SDaniel Schwierzeck /* 12923ff8633SDaniel Schwierzeck * phys_to_virt - map physical address to virtual 13023ff8633SDaniel Schwierzeck * @address: address to remap 13123ff8633SDaniel Schwierzeck * 13223ff8633SDaniel Schwierzeck * The returned virtual address is a current CPU mapping for 13323ff8633SDaniel Schwierzeck * the memory address given. It is only valid to use this function on 13423ff8633SDaniel Schwierzeck * addresses that have a kernel mapping 13523ff8633SDaniel Schwierzeck * 13623ff8633SDaniel Schwierzeck * This function does not handle bus mappings for DMA transfers. In 13723ff8633SDaniel Schwierzeck * almost all conceivable cases a device driver should not be using 13823ff8633SDaniel Schwierzeck * this function 13923ff8633SDaniel Schwierzeck */ 140b11c5d1dSDaniel Schwierzeck static inline void *phys_to_virt(unsigned long address) 141819833afSPeter Tyser { 14223ff8633SDaniel Schwierzeck return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 143819833afSPeter Tyser } 144819833afSPeter Tyser 145819833afSPeter Tyser /* 14623ff8633SDaniel Schwierzeck * ISA I/O bus memory addresses are 1:1 with the physical address. 147819833afSPeter Tyser */ 14823ff8633SDaniel Schwierzeck static inline unsigned long isa_virt_to_bus(volatile void *address) 149819833afSPeter Tyser { 15023ff8633SDaniel Schwierzeck return (unsigned long)address - PAGE_OFFSET; 151819833afSPeter Tyser } 152819833afSPeter Tyser 15323ff8633SDaniel Schwierzeck static inline void *isa_bus_to_virt(unsigned long address) 154819833afSPeter Tyser { 15523ff8633SDaniel Schwierzeck return (void *)(address + PAGE_OFFSET); 156819833afSPeter Tyser } 157819833afSPeter Tyser 15823ff8633SDaniel Schwierzeck #define isa_page_to_bus page_to_phys 159819833afSPeter Tyser 160819833afSPeter Tyser /* 16123ff8633SDaniel Schwierzeck * However PCI ones are not necessarily 1:1 and therefore these interfaces 16223ff8633SDaniel Schwierzeck * are forbidden in portable PCI drivers. 163819833afSPeter Tyser * 16423ff8633SDaniel Schwierzeck * Allow them for x86 for legacy drivers, though. 165819833afSPeter Tyser */ 16623ff8633SDaniel Schwierzeck #define virt_to_bus virt_to_phys 16723ff8633SDaniel Schwierzeck #define bus_to_virt phys_to_virt 168819833afSPeter Tyser 16923ff8633SDaniel Schwierzeck static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, 17023ff8633SDaniel Schwierzeck unsigned long flags) 171819833afSPeter Tyser { 17223ff8633SDaniel Schwierzeck void __iomem *addr; 17323ff8633SDaniel Schwierzeck phys_addr_t phys_addr; 17423ff8633SDaniel Schwierzeck 17523ff8633SDaniel Schwierzeck addr = plat_ioremap(offset, size, flags); 17623ff8633SDaniel Schwierzeck if (addr) 17723ff8633SDaniel Schwierzeck return addr; 17823ff8633SDaniel Schwierzeck 17923ff8633SDaniel Schwierzeck phys_addr = fixup_bigphys_addr(offset, size); 18023ff8633SDaniel Schwierzeck return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); 181819833afSPeter Tyser } 182819833afSPeter Tyser 183819833afSPeter Tyser /* 18423ff8633SDaniel Schwierzeck * ioremap - map bus memory into CPU space 18523ff8633SDaniel Schwierzeck * @offset: bus address of the memory 18623ff8633SDaniel Schwierzeck * @size: size of the resource to map 18723ff8633SDaniel Schwierzeck * 18823ff8633SDaniel Schwierzeck * ioremap performs a platform specific sequence of operations to 18923ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 19023ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 19123ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 19223ff8633SDaniel Schwierzeck * address. 193819833afSPeter Tyser */ 19423ff8633SDaniel Schwierzeck #define ioremap(offset, size) \ 19523ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED) 19623ff8633SDaniel Schwierzeck 19723ff8633SDaniel Schwierzeck /* 19823ff8633SDaniel Schwierzeck * ioremap_nocache - map bus memory into CPU space 19923ff8633SDaniel Schwierzeck * @offset: bus address of the memory 20023ff8633SDaniel Schwierzeck * @size: size of the resource to map 20123ff8633SDaniel Schwierzeck * 20223ff8633SDaniel Schwierzeck * ioremap_nocache performs a platform specific sequence of operations to 20323ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 20423ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 20523ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 20623ff8633SDaniel Schwierzeck * address. 20723ff8633SDaniel Schwierzeck * 20823ff8633SDaniel Schwierzeck * This version of ioremap ensures that the memory is marked uncachable 20923ff8633SDaniel Schwierzeck * on the CPU as well as honouring existing caching rules from things like 21023ff8633SDaniel Schwierzeck * the PCI bus. Note that there are other caches and buffers on many 21123ff8633SDaniel Schwierzeck * busses. In particular driver authors should read up on PCI writes 21223ff8633SDaniel Schwierzeck * 21323ff8633SDaniel Schwierzeck * It's useful if some control registers are in such an area and 21423ff8633SDaniel Schwierzeck * write combining or read caching is not desirable: 21523ff8633SDaniel Schwierzeck */ 21623ff8633SDaniel Schwierzeck #define ioremap_nocache(offset, size) \ 21723ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED) 21823ff8633SDaniel Schwierzeck #define ioremap_uc ioremap_nocache 21923ff8633SDaniel Schwierzeck 22023ff8633SDaniel Schwierzeck /* 22123ff8633SDaniel Schwierzeck * ioremap_cachable - map bus memory into CPU space 22223ff8633SDaniel Schwierzeck * @offset: bus address of the memory 22323ff8633SDaniel Schwierzeck * @size: size of the resource to map 22423ff8633SDaniel Schwierzeck * 22523ff8633SDaniel Schwierzeck * ioremap_nocache performs a platform specific sequence of operations to 22623ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 22723ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 22823ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 22923ff8633SDaniel Schwierzeck * address. 23023ff8633SDaniel Schwierzeck * 23123ff8633SDaniel Schwierzeck * This version of ioremap ensures that the memory is marked cachable by 23223ff8633SDaniel Schwierzeck * the CPU. Also enables full write-combining. Useful for some 23323ff8633SDaniel Schwierzeck * memory-like regions on I/O busses. 23423ff8633SDaniel Schwierzeck */ 23523ff8633SDaniel Schwierzeck #define ioremap_cachable(offset, size) \ 23623ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _page_cachable_default) 23723ff8633SDaniel Schwierzeck 23823ff8633SDaniel Schwierzeck /* 23923ff8633SDaniel Schwierzeck * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 24023ff8633SDaniel Schwierzeck * requests a cachable mapping, ioremap_uncached_accelerated requests a 24123ff8633SDaniel Schwierzeck * mapping using the uncached accelerated mode which isn't supported on 24223ff8633SDaniel Schwierzeck * all processors. 24323ff8633SDaniel Schwierzeck */ 24423ff8633SDaniel Schwierzeck #define ioremap_cacheable_cow(offset, size) \ 24523ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 24623ff8633SDaniel Schwierzeck #define ioremap_uncached_accelerated(offset, size) \ 24723ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 24823ff8633SDaniel Schwierzeck 24923ff8633SDaniel Schwierzeck static inline void iounmap(const volatile void __iomem *addr) 25023ff8633SDaniel Schwierzeck { 25123ff8633SDaniel Schwierzeck plat_iounmap(addr); 25223ff8633SDaniel Schwierzeck } 25323ff8633SDaniel Schwierzeck 25423ff8633SDaniel Schwierzeck #ifdef CONFIG_CPU_CAVIUM_OCTEON 25523ff8633SDaniel Schwierzeck #define war_octeon_io_reorder_wmb() wmb() 25623ff8633SDaniel Schwierzeck #else 25723ff8633SDaniel Schwierzeck #define war_octeon_io_reorder_wmb() do { } while (0) 25823ff8633SDaniel Schwierzeck #endif 25923ff8633SDaniel Schwierzeck 26023ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 26123ff8633SDaniel Schwierzeck \ 26223ff8633SDaniel Schwierzeck static inline void pfx##write##bwlq(type val, \ 26323ff8633SDaniel Schwierzeck volatile void __iomem *mem) \ 26423ff8633SDaniel Schwierzeck { \ 26523ff8633SDaniel Schwierzeck volatile type *__mem; \ 26623ff8633SDaniel Schwierzeck type __val; \ 26723ff8633SDaniel Schwierzeck \ 26823ff8633SDaniel Schwierzeck war_octeon_io_reorder_wmb(); \ 26923ff8633SDaniel Schwierzeck \ 27023ff8633SDaniel Schwierzeck __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 27123ff8633SDaniel Schwierzeck \ 27223ff8633SDaniel Schwierzeck __val = pfx##ioswab##bwlq(__mem, val); \ 27323ff8633SDaniel Schwierzeck \ 27423ff8633SDaniel Schwierzeck if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 27523ff8633SDaniel Schwierzeck *__mem = __val; \ 27623ff8633SDaniel Schwierzeck else if (cpu_has_64bits) { \ 27723ff8633SDaniel Schwierzeck type __tmp; \ 27823ff8633SDaniel Schwierzeck \ 27923ff8633SDaniel Schwierzeck __asm__ __volatile__( \ 28023ff8633SDaniel Schwierzeck ".set arch=r4000" "\t\t# __writeq""\n\t" \ 28123ff8633SDaniel Schwierzeck "dsll32 %L0, %L0, 0" "\n\t" \ 28223ff8633SDaniel Schwierzeck "dsrl32 %L0, %L0, 0" "\n\t" \ 28323ff8633SDaniel Schwierzeck "dsll32 %M0, %M0, 0" "\n\t" \ 28423ff8633SDaniel Schwierzeck "or %L0, %L0, %M0" "\n\t" \ 28523ff8633SDaniel Schwierzeck "sd %L0, %2" "\n\t" \ 28623ff8633SDaniel Schwierzeck ".set mips0" "\n" \ 28723ff8633SDaniel Schwierzeck : "=r" (__tmp) \ 28823ff8633SDaniel Schwierzeck : "0" (__val), "m" (*__mem)); \ 28923ff8633SDaniel Schwierzeck } else \ 29023ff8633SDaniel Schwierzeck BUG(); \ 29123ff8633SDaniel Schwierzeck } \ 29223ff8633SDaniel Schwierzeck \ 29323ff8633SDaniel Schwierzeck static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 29423ff8633SDaniel Schwierzeck { \ 29523ff8633SDaniel Schwierzeck volatile type *__mem; \ 29623ff8633SDaniel Schwierzeck type __val; \ 29723ff8633SDaniel Schwierzeck \ 29823ff8633SDaniel Schwierzeck __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 29923ff8633SDaniel Schwierzeck \ 30023ff8633SDaniel Schwierzeck if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 30123ff8633SDaniel Schwierzeck __val = *__mem; \ 30223ff8633SDaniel Schwierzeck else if (cpu_has_64bits) { \ 30323ff8633SDaniel Schwierzeck __asm__ __volatile__( \ 30423ff8633SDaniel Schwierzeck ".set arch=r4000" "\t\t# __readq" "\n\t" \ 30523ff8633SDaniel Schwierzeck "ld %L0, %1" "\n\t" \ 30623ff8633SDaniel Schwierzeck "dsra32 %M0, %L0, 0" "\n\t" \ 30723ff8633SDaniel Schwierzeck "sll %L0, %L0, 0" "\n\t" \ 30823ff8633SDaniel Schwierzeck ".set mips0" "\n" \ 30923ff8633SDaniel Schwierzeck : "=r" (__val) \ 31023ff8633SDaniel Schwierzeck : "m" (*__mem)); \ 31123ff8633SDaniel Schwierzeck } else { \ 31223ff8633SDaniel Schwierzeck __val = 0; \ 31323ff8633SDaniel Schwierzeck BUG(); \ 31423ff8633SDaniel Schwierzeck } \ 31523ff8633SDaniel Schwierzeck \ 31623ff8633SDaniel Schwierzeck return pfx##ioswab##bwlq(__mem, __val); \ 31723ff8633SDaniel Schwierzeck } 31823ff8633SDaniel Schwierzeck 31923ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 32023ff8633SDaniel Schwierzeck \ 32123ff8633SDaniel Schwierzeck static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 32223ff8633SDaniel Schwierzeck { \ 32323ff8633SDaniel Schwierzeck volatile type *__addr; \ 32423ff8633SDaniel Schwierzeck type __val; \ 32523ff8633SDaniel Schwierzeck \ 32623ff8633SDaniel Schwierzeck war_octeon_io_reorder_wmb(); \ 32723ff8633SDaniel Schwierzeck \ 32823ff8633SDaniel Schwierzeck __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 32923ff8633SDaniel Schwierzeck \ 33023ff8633SDaniel Schwierzeck __val = pfx##ioswab##bwlq(__addr, val); \ 33123ff8633SDaniel Schwierzeck \ 33223ff8633SDaniel Schwierzeck /* Really, we want this to be atomic */ \ 33323ff8633SDaniel Schwierzeck BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 33423ff8633SDaniel Schwierzeck \ 33523ff8633SDaniel Schwierzeck *__addr = __val; \ 33623ff8633SDaniel Schwierzeck slow; \ 33723ff8633SDaniel Schwierzeck } \ 33823ff8633SDaniel Schwierzeck \ 33923ff8633SDaniel Schwierzeck static inline type pfx##in##bwlq##p(unsigned long port) \ 34023ff8633SDaniel Schwierzeck { \ 34123ff8633SDaniel Schwierzeck volatile type *__addr; \ 34223ff8633SDaniel Schwierzeck type __val; \ 34323ff8633SDaniel Schwierzeck \ 34423ff8633SDaniel Schwierzeck __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 34523ff8633SDaniel Schwierzeck \ 34623ff8633SDaniel Schwierzeck BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 34723ff8633SDaniel Schwierzeck \ 34823ff8633SDaniel Schwierzeck __val = *__addr; \ 34923ff8633SDaniel Schwierzeck slow; \ 35023ff8633SDaniel Schwierzeck \ 35123ff8633SDaniel Schwierzeck return pfx##ioswab##bwlq(__addr, __val); \ 35223ff8633SDaniel Schwierzeck } 35323ff8633SDaniel Schwierzeck 35423ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 35523ff8633SDaniel Schwierzeck \ 35623ff8633SDaniel Schwierzeck __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 35723ff8633SDaniel Schwierzeck 35823ff8633SDaniel Schwierzeck #define BUILDIO_MEM(bwlq, type) \ 35923ff8633SDaniel Schwierzeck \ 36023ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 36123ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(, bwlq, type) \ 36223ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 36323ff8633SDaniel Schwierzeck 36423ff8633SDaniel Schwierzeck BUILDIO_MEM(b, u8) 36523ff8633SDaniel Schwierzeck BUILDIO_MEM(w, u16) 36623ff8633SDaniel Schwierzeck BUILDIO_MEM(l, u32) 36723ff8633SDaniel Schwierzeck BUILDIO_MEM(q, u64) 36823ff8633SDaniel Schwierzeck 36923ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 37023ff8633SDaniel Schwierzeck __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 37123ff8633SDaniel Schwierzeck __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 37223ff8633SDaniel Schwierzeck 37323ff8633SDaniel Schwierzeck #define BUILDIO_IOPORT(bwlq, type) \ 37423ff8633SDaniel Schwierzeck __BUILD_IOPORT_PFX(, bwlq, type) \ 37523ff8633SDaniel Schwierzeck __BUILD_IOPORT_PFX(__mem_, bwlq, type) 37623ff8633SDaniel Schwierzeck 37723ff8633SDaniel Schwierzeck BUILDIO_IOPORT(b, u8) 37823ff8633SDaniel Schwierzeck BUILDIO_IOPORT(w, u16) 37923ff8633SDaniel Schwierzeck BUILDIO_IOPORT(l, u32) 38023ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 38123ff8633SDaniel Schwierzeck BUILDIO_IOPORT(q, u64) 38223ff8633SDaniel Schwierzeck #endif 38323ff8633SDaniel Schwierzeck 38423ff8633SDaniel Schwierzeck #define __BUILDIO(bwlq, type) \ 38523ff8633SDaniel Schwierzeck \ 38623ff8633SDaniel Schwierzeck __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 38723ff8633SDaniel Schwierzeck 38823ff8633SDaniel Schwierzeck __BUILDIO(q, u64) 38923ff8633SDaniel Schwierzeck 39023ff8633SDaniel Schwierzeck #define readb_relaxed readb 39123ff8633SDaniel Schwierzeck #define readw_relaxed readw 39223ff8633SDaniel Schwierzeck #define readl_relaxed readl 39323ff8633SDaniel Schwierzeck #define readq_relaxed readq 39423ff8633SDaniel Schwierzeck 39523ff8633SDaniel Schwierzeck #define writeb_relaxed writeb 39623ff8633SDaniel Schwierzeck #define writew_relaxed writew 39723ff8633SDaniel Schwierzeck #define writel_relaxed writel 39823ff8633SDaniel Schwierzeck #define writeq_relaxed writeq 39923ff8633SDaniel Schwierzeck 40023ff8633SDaniel Schwierzeck #define readb_be(addr) \ 40123ff8633SDaniel Schwierzeck __raw_readb((__force unsigned *)(addr)) 40223ff8633SDaniel Schwierzeck #define readw_be(addr) \ 40323ff8633SDaniel Schwierzeck be16_to_cpu(__raw_readw((__force unsigned *)(addr))) 40423ff8633SDaniel Schwierzeck #define readl_be(addr) \ 40523ff8633SDaniel Schwierzeck be32_to_cpu(__raw_readl((__force unsigned *)(addr))) 40623ff8633SDaniel Schwierzeck #define readq_be(addr) \ 40723ff8633SDaniel Schwierzeck be64_to_cpu(__raw_readq((__force unsigned *)(addr))) 40823ff8633SDaniel Schwierzeck 40923ff8633SDaniel Schwierzeck #define writeb_be(val, addr) \ 41023ff8633SDaniel Schwierzeck __raw_writeb((val), (__force unsigned *)(addr)) 41123ff8633SDaniel Schwierzeck #define writew_be(val, addr) \ 41223ff8633SDaniel Schwierzeck __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) 41323ff8633SDaniel Schwierzeck #define writel_be(val, addr) \ 41423ff8633SDaniel Schwierzeck __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) 41523ff8633SDaniel Schwierzeck #define writeq_be(val, addr) \ 41623ff8633SDaniel Schwierzeck __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) 41723ff8633SDaniel Schwierzeck 41823ff8633SDaniel Schwierzeck /* 41923ff8633SDaniel Schwierzeck * Some code tests for these symbols 42023ff8633SDaniel Schwierzeck */ 42123ff8633SDaniel Schwierzeck #define readq readq 42223ff8633SDaniel Schwierzeck #define writeq writeq 42323ff8633SDaniel Schwierzeck 42423ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_STRING(bwlq, type) \ 42523ff8633SDaniel Schwierzeck \ 42623ff8633SDaniel Schwierzeck static inline void writes##bwlq(volatile void __iomem *mem, \ 42723ff8633SDaniel Schwierzeck const void *addr, unsigned int count) \ 42823ff8633SDaniel Schwierzeck { \ 42923ff8633SDaniel Schwierzeck const volatile type *__addr = addr; \ 43023ff8633SDaniel Schwierzeck \ 43123ff8633SDaniel Schwierzeck while (count--) { \ 43223ff8633SDaniel Schwierzeck __mem_write##bwlq(*__addr, mem); \ 43323ff8633SDaniel Schwierzeck __addr++; \ 43423ff8633SDaniel Schwierzeck } \ 43523ff8633SDaniel Schwierzeck } \ 43623ff8633SDaniel Schwierzeck \ 43723ff8633SDaniel Schwierzeck static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 43823ff8633SDaniel Schwierzeck unsigned int count) \ 43923ff8633SDaniel Schwierzeck { \ 44023ff8633SDaniel Schwierzeck volatile type *__addr = addr; \ 44123ff8633SDaniel Schwierzeck \ 44223ff8633SDaniel Schwierzeck while (count--) { \ 44323ff8633SDaniel Schwierzeck *__addr = __mem_read##bwlq(mem); \ 44423ff8633SDaniel Schwierzeck __addr++; \ 44523ff8633SDaniel Schwierzeck } \ 44623ff8633SDaniel Schwierzeck } 44723ff8633SDaniel Schwierzeck 44823ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_STRING(bwlq, type) \ 44923ff8633SDaniel Schwierzeck \ 45023ff8633SDaniel Schwierzeck static inline void outs##bwlq(unsigned long port, const void *addr, \ 45123ff8633SDaniel Schwierzeck unsigned int count) \ 45223ff8633SDaniel Schwierzeck { \ 45323ff8633SDaniel Schwierzeck const volatile type *__addr = addr; \ 45423ff8633SDaniel Schwierzeck \ 45523ff8633SDaniel Schwierzeck while (count--) { \ 45623ff8633SDaniel Schwierzeck __mem_out##bwlq(*__addr, port); \ 45723ff8633SDaniel Schwierzeck __addr++; \ 45823ff8633SDaniel Schwierzeck } \ 45923ff8633SDaniel Schwierzeck } \ 46023ff8633SDaniel Schwierzeck \ 46123ff8633SDaniel Schwierzeck static inline void ins##bwlq(unsigned long port, void *addr, \ 46223ff8633SDaniel Schwierzeck unsigned int count) \ 46323ff8633SDaniel Schwierzeck { \ 46423ff8633SDaniel Schwierzeck volatile type *__addr = addr; \ 46523ff8633SDaniel Schwierzeck \ 46623ff8633SDaniel Schwierzeck while (count--) { \ 46723ff8633SDaniel Schwierzeck *__addr = __mem_in##bwlq(port); \ 46823ff8633SDaniel Schwierzeck __addr++; \ 46923ff8633SDaniel Schwierzeck } \ 47023ff8633SDaniel Schwierzeck } 47123ff8633SDaniel Schwierzeck 47223ff8633SDaniel Schwierzeck #define BUILDSTRING(bwlq, type) \ 47323ff8633SDaniel Schwierzeck \ 47423ff8633SDaniel Schwierzeck __BUILD_MEMORY_STRING(bwlq, type) \ 47523ff8633SDaniel Schwierzeck __BUILD_IOPORT_STRING(bwlq, type) 47623ff8633SDaniel Schwierzeck 47723ff8633SDaniel Schwierzeck BUILDSTRING(b, u8) 47823ff8633SDaniel Schwierzeck BUILDSTRING(w, u16) 47923ff8633SDaniel Schwierzeck BUILDSTRING(l, u32) 48023ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 48123ff8633SDaniel Schwierzeck BUILDSTRING(q, u64) 48223ff8633SDaniel Schwierzeck #endif 48323ff8633SDaniel Schwierzeck 48423ff8633SDaniel Schwierzeck 48523ff8633SDaniel Schwierzeck #ifdef CONFIG_CPU_CAVIUM_OCTEON 48623ff8633SDaniel Schwierzeck #define mmiowb() wmb() 48723ff8633SDaniel Schwierzeck #else 48823ff8633SDaniel Schwierzeck /* Depends on MIPS II instruction set */ 48923ff8633SDaniel Schwierzeck #define mmiowb() asm volatile ("sync" ::: "memory") 49023ff8633SDaniel Schwierzeck #endif 49123ff8633SDaniel Schwierzeck 49223ff8633SDaniel Schwierzeck static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 49323ff8633SDaniel Schwierzeck { 49423ff8633SDaniel Schwierzeck memset((void __force *)addr, val, count); 49523ff8633SDaniel Schwierzeck } 49623ff8633SDaniel Schwierzeck static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 49723ff8633SDaniel Schwierzeck { 49823ff8633SDaniel Schwierzeck memcpy(dst, (void __force *)src, count); 49923ff8633SDaniel Schwierzeck } 50023ff8633SDaniel Schwierzeck static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 50123ff8633SDaniel Schwierzeck { 50223ff8633SDaniel Schwierzeck memcpy((void __force *)dst, src, count); 50323ff8633SDaniel Schwierzeck } 50423ff8633SDaniel Schwierzeck 50523ff8633SDaniel Schwierzeck /* 50623ff8633SDaniel Schwierzeck * Read a 32-bit register that requires a 64-bit read cycle on the bus. 50723ff8633SDaniel Schwierzeck * Avoid interrupt mucking, just adjust the address for 4-byte access. 50823ff8633SDaniel Schwierzeck * Assume the addresses are 8-byte aligned. 50923ff8633SDaniel Schwierzeck */ 51023ff8633SDaniel Schwierzeck #ifdef __MIPSEB__ 51123ff8633SDaniel Schwierzeck #define __CSR_32_ADJUST 4 51223ff8633SDaniel Schwierzeck #else 51323ff8633SDaniel Schwierzeck #define __CSR_32_ADJUST 0 51423ff8633SDaniel Schwierzeck #endif 51523ff8633SDaniel Schwierzeck 51623ff8633SDaniel Schwierzeck #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 51723ff8633SDaniel Schwierzeck #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 51823ff8633SDaniel Schwierzeck 51923ff8633SDaniel Schwierzeck /* 52023ff8633SDaniel Schwierzeck * U-Boot specific 52123ff8633SDaniel Schwierzeck */ 52223ff8633SDaniel Schwierzeck #define sync() mmiowb() 52323ff8633SDaniel Schwierzeck 52423ff8633SDaniel Schwierzeck #define MAP_NOCACHE (1) 525819833afSPeter Tyser #define MAP_WRCOMBINE (0) 526819833afSPeter Tyser #define MAP_WRBACK (0) 527819833afSPeter Tyser #define MAP_WRTHROUGH (0) 528819833afSPeter Tyser 529819833afSPeter Tyser static inline void * 530819833afSPeter Tyser map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 531819833afSPeter Tyser { 53223ff8633SDaniel Schwierzeck if (flags == MAP_NOCACHE) 53323ff8633SDaniel Schwierzeck return ioremap(paddr, len); 53423ff8633SDaniel Schwierzeck 535819833afSPeter Tyser return (void *)paddr; 536819833afSPeter Tyser } 537819833afSPeter Tyser 538819833afSPeter Tyser /* 539819833afSPeter Tyser * Take down a mapping set up by map_physmem(). 540819833afSPeter Tyser */ 541819833afSPeter Tyser static inline void unmap_physmem(void *vaddr, unsigned long flags) 542819833afSPeter Tyser { 543819833afSPeter Tyser } 544819833afSPeter Tyser 5450e0efb40SDaniel Schwierzeck #define __BUILD_CLRBITS(bwlq, sfx, end, type) \ 5460e0efb40SDaniel Schwierzeck \ 5470e0efb40SDaniel Schwierzeck static inline void clrbits_##sfx(volatile void __iomem *mem, type clr) \ 5480e0efb40SDaniel Schwierzeck { \ 5490e0efb40SDaniel Schwierzeck type __val = __raw_read##bwlq(mem); \ 5500e0efb40SDaniel Schwierzeck __val = end##_to_cpu(__val); \ 5510e0efb40SDaniel Schwierzeck __val &= ~clr; \ 5520e0efb40SDaniel Schwierzeck __val = cpu_to_##end(__val); \ 5530e0efb40SDaniel Schwierzeck __raw_write##bwlq(__val, mem); \ 5540e0efb40SDaniel Schwierzeck } 5550e0efb40SDaniel Schwierzeck 5560e0efb40SDaniel Schwierzeck #define __BUILD_SETBITS(bwlq, sfx, end, type) \ 5570e0efb40SDaniel Schwierzeck \ 5580e0efb40SDaniel Schwierzeck static inline void setbits_##sfx(volatile void __iomem *mem, type set) \ 5590e0efb40SDaniel Schwierzeck { \ 5600e0efb40SDaniel Schwierzeck type __val = __raw_read##bwlq(mem); \ 5610e0efb40SDaniel Schwierzeck __val = end##_to_cpu(__val); \ 5620e0efb40SDaniel Schwierzeck __val |= set; \ 5630e0efb40SDaniel Schwierzeck __val = cpu_to_##end(__val); \ 5640e0efb40SDaniel Schwierzeck __raw_write##bwlq(__val, mem); \ 5650e0efb40SDaniel Schwierzeck } 5660e0efb40SDaniel Schwierzeck 5670e0efb40SDaniel Schwierzeck #define __BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 5680e0efb40SDaniel Schwierzeck \ 5690e0efb40SDaniel Schwierzeck static inline void clrsetbits_##sfx(volatile void __iomem *mem, \ 5700e0efb40SDaniel Schwierzeck type clr, type set) \ 5710e0efb40SDaniel Schwierzeck { \ 5720e0efb40SDaniel Schwierzeck type __val = __raw_read##bwlq(mem); \ 5730e0efb40SDaniel Schwierzeck __val = end##_to_cpu(__val); \ 5740e0efb40SDaniel Schwierzeck __val &= ~clr; \ 5750e0efb40SDaniel Schwierzeck __val |= set; \ 5760e0efb40SDaniel Schwierzeck __val = cpu_to_##end(__val); \ 5770e0efb40SDaniel Schwierzeck __raw_write##bwlq(__val, mem); \ 5780e0efb40SDaniel Schwierzeck } 5790e0efb40SDaniel Schwierzeck 5800e0efb40SDaniel Schwierzeck #define BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 5810e0efb40SDaniel Schwierzeck \ 5820e0efb40SDaniel Schwierzeck __BUILD_CLRBITS(bwlq, sfx, end, type) \ 5830e0efb40SDaniel Schwierzeck __BUILD_SETBITS(bwlq, sfx, end, type) \ 5840e0efb40SDaniel Schwierzeck __BUILD_CLRSETBITS(bwlq, sfx, end, type) 5850e0efb40SDaniel Schwierzeck 5860e0efb40SDaniel Schwierzeck #define __to_cpu(v) (v) 5870e0efb40SDaniel Schwierzeck #define cpu_to__(v) (v) 5880e0efb40SDaniel Schwierzeck 5890e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(b, 8, _, u8) 5900e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(w, le16, le16, u16) 5910e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(w, be16, be16, u16) 5920e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(w, 16, _, u16) 5930e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(l, le32, le32, u32) 5940e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(l, be32, be32, u32) 5950e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(l, 32, _, u32) 5960e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(q, le64, le64, u64) 5970e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(q, be64, be64, u64) 5980e0efb40SDaniel Schwierzeck BUILD_CLRSETBITS(q, 64, _, u64) 5990e0efb40SDaniel Schwierzeck 600819833afSPeter Tyser #endif /* _ASM_IO_H */ 601