1 /* 2 * Copyright (C) 1994, 1995 Waldorf GmbH 3 * Copyright (C) 1994 - 2000, 06 Ralf Baechle 4 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 5 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 6 * Author: Maciej W. Rozycki <macro@mips.com> 7 * 8 * SPDX-License-Identifier: GPL-2.0 9 */ 10 #ifndef _ASM_IO_H 11 #define _ASM_IO_H 12 13 #include <linux/bug.h> 14 #include <linux/compiler.h> 15 #include <linux/types.h> 16 17 #include <asm/addrspace.h> 18 #include <asm/byteorder.h> 19 #include <asm/cpu-features.h> 20 #include <asm/pgtable-bits.h> 21 #include <asm/processor.h> 22 #include <asm/string.h> 23 24 #include <ioremap.h> 25 #include <mangle-port.h> 26 #include <spaces.h> 27 28 /* 29 * Raw operations are never swapped in software. OTOH values that raw 30 * operations are working on may or may not have been swapped by the bus 31 * hardware. An example use would be for flash memory that's used for 32 * execute in place. 33 */ 34 # define __raw_ioswabb(a, x) (x) 35 # define __raw_ioswabw(a, x) (x) 36 # define __raw_ioswabl(a, x) (x) 37 # define __raw_ioswabq(a, x) (x) 38 # define ____raw_ioswabq(a, x) (x) 39 40 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 41 42 #define IO_SPACE_LIMIT 0xffff 43 44 #ifdef CONFIG_DYNAMIC_IO_PORT_BASE 45 46 static inline ulong mips_io_port_base(void) 47 { 48 DECLARE_GLOBAL_DATA_PTR; 49 50 return gd->arch.io_port_base; 51 } 52 53 static inline void set_io_port_base(unsigned long base) 54 { 55 DECLARE_GLOBAL_DATA_PTR; 56 57 gd->arch.io_port_base = base; 58 barrier(); 59 } 60 61 #else /* !CONFIG_DYNAMIC_IO_PORT_BASE */ 62 63 static inline ulong mips_io_port_base(void) 64 { 65 return 0; 66 } 67 68 static inline void set_io_port_base(unsigned long base) 69 { 70 BUG_ON(base); 71 } 72 73 #endif /* !CONFIG_DYNAMIC_IO_PORT_BASE */ 74 75 /* 76 * virt_to_phys - map virtual addresses to physical 77 * @address: address to remap 78 * 79 * The returned physical address is the physical (CPU) mapping for 80 * the memory address given. It is only valid to use this function on 81 * addresses directly mapped or allocated via kmalloc. 82 * 83 * This function does not give bus mappings for DMA transfers. In 84 * almost all conceivable cases a device driver should not be using 85 * this function 86 */ 87 static inline unsigned long virt_to_phys(volatile const void *address) 88 { 89 unsigned long addr = (unsigned long)address; 90 91 /* this corresponds to kernel implementation of __pa() */ 92 #ifdef CONFIG_64BIT 93 if (addr < CKSEG0) 94 return XPHYSADDR(addr); 95 96 return CPHYSADDR(addr); 97 #else 98 return addr - PAGE_OFFSET + PHYS_OFFSET; 99 #endif 100 } 101 102 /* 103 * phys_to_virt - map physical address to virtual 104 * @address: address to remap 105 * 106 * The returned virtual address is a current CPU mapping for 107 * the memory address given. It is only valid to use this function on 108 * addresses that have a kernel mapping 109 * 110 * This function does not handle bus mappings for DMA transfers. In 111 * almost all conceivable cases a device driver should not be using 112 * this function 113 */ 114 static inline void *phys_to_virt(unsigned long address) 115 { 116 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 117 } 118 119 /* 120 * ISA I/O bus memory addresses are 1:1 with the physical address. 121 */ 122 static inline unsigned long isa_virt_to_bus(volatile void *address) 123 { 124 return (unsigned long)address - PAGE_OFFSET; 125 } 126 127 static inline void *isa_bus_to_virt(unsigned long address) 128 { 129 return (void *)(address + PAGE_OFFSET); 130 } 131 132 #define isa_page_to_bus page_to_phys 133 134 /* 135 * However PCI ones are not necessarily 1:1 and therefore these interfaces 136 * are forbidden in portable PCI drivers. 137 * 138 * Allow them for x86 for legacy drivers, though. 139 */ 140 #define virt_to_bus virt_to_phys 141 #define bus_to_virt phys_to_virt 142 143 static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, 144 unsigned long flags) 145 { 146 void __iomem *addr; 147 phys_addr_t phys_addr; 148 149 addr = plat_ioremap(offset, size, flags); 150 if (addr) 151 return addr; 152 153 phys_addr = fixup_bigphys_addr(offset, size); 154 return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); 155 } 156 157 /* 158 * ioremap - map bus memory into CPU space 159 * @offset: bus address of the memory 160 * @size: size of the resource to map 161 * 162 * ioremap performs a platform specific sequence of operations to 163 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 164 * writew/writel functions and the other mmio helpers. The returned 165 * address is not guaranteed to be usable directly as a virtual 166 * address. 167 */ 168 #define ioremap(offset, size) \ 169 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 170 171 /* 172 * ioremap_nocache - map bus memory into CPU space 173 * @offset: bus address of the memory 174 * @size: size of the resource to map 175 * 176 * ioremap_nocache performs a platform specific sequence of operations to 177 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 178 * writew/writel functions and the other mmio helpers. The returned 179 * address is not guaranteed to be usable directly as a virtual 180 * address. 181 * 182 * This version of ioremap ensures that the memory is marked uncachable 183 * on the CPU as well as honouring existing caching rules from things like 184 * the PCI bus. Note that there are other caches and buffers on many 185 * busses. In particular driver authors should read up on PCI writes 186 * 187 * It's useful if some control registers are in such an area and 188 * write combining or read caching is not desirable: 189 */ 190 #define ioremap_nocache(offset, size) \ 191 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 192 #define ioremap_uc ioremap_nocache 193 194 /* 195 * ioremap_cachable - map bus memory into CPU space 196 * @offset: bus address of the memory 197 * @size: size of the resource to map 198 * 199 * ioremap_nocache performs a platform specific sequence of operations to 200 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 201 * writew/writel functions and the other mmio helpers. The returned 202 * address is not guaranteed to be usable directly as a virtual 203 * address. 204 * 205 * This version of ioremap ensures that the memory is marked cachable by 206 * the CPU. Also enables full write-combining. Useful for some 207 * memory-like regions on I/O busses. 208 */ 209 #define ioremap_cachable(offset, size) \ 210 __ioremap_mode((offset), (size), _page_cachable_default) 211 212 /* 213 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 214 * requests a cachable mapping, ioremap_uncached_accelerated requests a 215 * mapping using the uncached accelerated mode which isn't supported on 216 * all processors. 217 */ 218 #define ioremap_cacheable_cow(offset, size) \ 219 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 220 #define ioremap_uncached_accelerated(offset, size) \ 221 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 222 223 static inline void iounmap(const volatile void __iomem *addr) 224 { 225 plat_iounmap(addr); 226 } 227 228 #ifdef CONFIG_CPU_CAVIUM_OCTEON 229 #define war_octeon_io_reorder_wmb() wmb() 230 #else 231 #define war_octeon_io_reorder_wmb() do { } while (0) 232 #endif 233 234 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 235 \ 236 static inline void pfx##write##bwlq(type val, \ 237 volatile void __iomem *mem) \ 238 { \ 239 volatile type *__mem; \ 240 type __val; \ 241 \ 242 war_octeon_io_reorder_wmb(); \ 243 \ 244 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 245 \ 246 __val = pfx##ioswab##bwlq(__mem, val); \ 247 \ 248 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 249 *__mem = __val; \ 250 else if (cpu_has_64bits) { \ 251 type __tmp; \ 252 \ 253 __asm__ __volatile__( \ 254 ".set arch=r4000" "\t\t# __writeq""\n\t" \ 255 "dsll32 %L0, %L0, 0" "\n\t" \ 256 "dsrl32 %L0, %L0, 0" "\n\t" \ 257 "dsll32 %M0, %M0, 0" "\n\t" \ 258 "or %L0, %L0, %M0" "\n\t" \ 259 "sd %L0, %2" "\n\t" \ 260 ".set mips0" "\n" \ 261 : "=r" (__tmp) \ 262 : "0" (__val), "m" (*__mem)); \ 263 } else \ 264 BUG(); \ 265 } \ 266 \ 267 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 268 { \ 269 volatile type *__mem; \ 270 type __val; \ 271 \ 272 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 273 \ 274 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 275 __val = *__mem; \ 276 else if (cpu_has_64bits) { \ 277 __asm__ __volatile__( \ 278 ".set arch=r4000" "\t\t# __readq" "\n\t" \ 279 "ld %L0, %1" "\n\t" \ 280 "dsra32 %M0, %L0, 0" "\n\t" \ 281 "sll %L0, %L0, 0" "\n\t" \ 282 ".set mips0" "\n" \ 283 : "=r" (__val) \ 284 : "m" (*__mem)); \ 285 } else { \ 286 __val = 0; \ 287 BUG(); \ 288 } \ 289 \ 290 return pfx##ioswab##bwlq(__mem, __val); \ 291 } 292 293 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \ 294 \ 295 static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 296 { \ 297 volatile type *__addr; \ 298 type __val; \ 299 \ 300 war_octeon_io_reorder_wmb(); \ 301 \ 302 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ 303 \ 304 __val = pfx##ioswab##bwlq(__addr, val); \ 305 \ 306 /* Really, we want this to be atomic */ \ 307 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 308 \ 309 *__addr = __val; \ 310 } \ 311 \ 312 static inline type pfx##in##bwlq##p(unsigned long port) \ 313 { \ 314 volatile type *__addr; \ 315 type __val; \ 316 \ 317 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ 318 \ 319 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 320 \ 321 __val = *__addr; \ 322 \ 323 return pfx##ioswab##bwlq(__addr, __val); \ 324 } 325 326 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 327 \ 328 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 329 330 #define BUILDIO_MEM(bwlq, type) \ 331 \ 332 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 333 __BUILD_MEMORY_PFX(, bwlq, type) \ 334 __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 335 336 BUILDIO_MEM(b, u8) 337 BUILDIO_MEM(w, u16) 338 BUILDIO_MEM(l, u32) 339 BUILDIO_MEM(q, u64) 340 341 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 342 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ) \ 343 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p) 344 345 #define BUILDIO_IOPORT(bwlq, type) \ 346 __BUILD_IOPORT_PFX(, bwlq, type) \ 347 __BUILD_IOPORT_PFX(__mem_, bwlq, type) 348 349 BUILDIO_IOPORT(b, u8) 350 BUILDIO_IOPORT(w, u16) 351 BUILDIO_IOPORT(l, u32) 352 #ifdef CONFIG_64BIT 353 BUILDIO_IOPORT(q, u64) 354 #endif 355 356 #define __BUILDIO(bwlq, type) \ 357 \ 358 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 359 360 __BUILDIO(q, u64) 361 362 #define readb_relaxed readb 363 #define readw_relaxed readw 364 #define readl_relaxed readl 365 #define readq_relaxed readq 366 367 #define writeb_relaxed writeb 368 #define writew_relaxed writew 369 #define writel_relaxed writel 370 #define writeq_relaxed writeq 371 372 #define readb_be(addr) \ 373 __raw_readb((__force unsigned *)(addr)) 374 #define readw_be(addr) \ 375 be16_to_cpu(__raw_readw((__force unsigned *)(addr))) 376 #define readl_be(addr) \ 377 be32_to_cpu(__raw_readl((__force unsigned *)(addr))) 378 #define readq_be(addr) \ 379 be64_to_cpu(__raw_readq((__force unsigned *)(addr))) 380 381 #define writeb_be(val, addr) \ 382 __raw_writeb((val), (__force unsigned *)(addr)) 383 #define writew_be(val, addr) \ 384 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) 385 #define writel_be(val, addr) \ 386 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) 387 #define writeq_be(val, addr) \ 388 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) 389 390 /* 391 * Some code tests for these symbols 392 */ 393 #define readq readq 394 #define writeq writeq 395 396 #define __BUILD_MEMORY_STRING(bwlq, type) \ 397 \ 398 static inline void writes##bwlq(volatile void __iomem *mem, \ 399 const void *addr, unsigned int count) \ 400 { \ 401 const volatile type *__addr = addr; \ 402 \ 403 while (count--) { \ 404 __mem_write##bwlq(*__addr, mem); \ 405 __addr++; \ 406 } \ 407 } \ 408 \ 409 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 410 unsigned int count) \ 411 { \ 412 volatile type *__addr = addr; \ 413 \ 414 while (count--) { \ 415 *__addr = __mem_read##bwlq(mem); \ 416 __addr++; \ 417 } \ 418 } 419 420 #define __BUILD_IOPORT_STRING(bwlq, type) \ 421 \ 422 static inline void outs##bwlq(unsigned long port, const void *addr, \ 423 unsigned int count) \ 424 { \ 425 const volatile type *__addr = addr; \ 426 \ 427 while (count--) { \ 428 __mem_out##bwlq(*__addr, port); \ 429 __addr++; \ 430 } \ 431 } \ 432 \ 433 static inline void ins##bwlq(unsigned long port, void *addr, \ 434 unsigned int count) \ 435 { \ 436 volatile type *__addr = addr; \ 437 \ 438 while (count--) { \ 439 *__addr = __mem_in##bwlq(port); \ 440 __addr++; \ 441 } \ 442 } 443 444 #define BUILDSTRING(bwlq, type) \ 445 \ 446 __BUILD_MEMORY_STRING(bwlq, type) \ 447 __BUILD_IOPORT_STRING(bwlq, type) 448 449 BUILDSTRING(b, u8) 450 BUILDSTRING(w, u16) 451 BUILDSTRING(l, u32) 452 #ifdef CONFIG_64BIT 453 BUILDSTRING(q, u64) 454 #endif 455 456 457 #ifdef CONFIG_CPU_CAVIUM_OCTEON 458 #define mmiowb() wmb() 459 #else 460 /* Depends on MIPS II instruction set */ 461 #define mmiowb() asm volatile ("sync" ::: "memory") 462 #endif 463 464 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 465 { 466 memset((void __force *)addr, val, count); 467 } 468 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 469 { 470 memcpy(dst, (void __force *)src, count); 471 } 472 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 473 { 474 memcpy((void __force *)dst, src, count); 475 } 476 477 /* 478 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 479 * Avoid interrupt mucking, just adjust the address for 4-byte access. 480 * Assume the addresses are 8-byte aligned. 481 */ 482 #ifdef __MIPSEB__ 483 #define __CSR_32_ADJUST 4 484 #else 485 #define __CSR_32_ADJUST 0 486 #endif 487 488 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 489 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 490 491 /* 492 * U-Boot specific 493 */ 494 #define sync() mmiowb() 495 496 #define MAP_NOCACHE (1) 497 #define MAP_WRCOMBINE (0) 498 #define MAP_WRBACK (0) 499 #define MAP_WRTHROUGH (0) 500 501 static inline void * 502 map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 503 { 504 if (flags == MAP_NOCACHE) 505 return ioremap(paddr, len); 506 507 return (void *)paddr; 508 } 509 510 /* 511 * Take down a mapping set up by map_physmem(). 512 */ 513 static inline void unmap_physmem(void *vaddr, unsigned long flags) 514 { 515 } 516 517 #define __BUILD_CLRBITS(bwlq, sfx, end, type) \ 518 \ 519 static inline void clrbits_##sfx(volatile void __iomem *mem, type clr) \ 520 { \ 521 type __val = __raw_read##bwlq(mem); \ 522 __val = end##_to_cpu(__val); \ 523 __val &= ~clr; \ 524 __val = cpu_to_##end(__val); \ 525 __raw_write##bwlq(__val, mem); \ 526 } 527 528 #define __BUILD_SETBITS(bwlq, sfx, end, type) \ 529 \ 530 static inline void setbits_##sfx(volatile void __iomem *mem, type set) \ 531 { \ 532 type __val = __raw_read##bwlq(mem); \ 533 __val = end##_to_cpu(__val); \ 534 __val |= set; \ 535 __val = cpu_to_##end(__val); \ 536 __raw_write##bwlq(__val, mem); \ 537 } 538 539 #define __BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 540 \ 541 static inline void clrsetbits_##sfx(volatile void __iomem *mem, \ 542 type clr, type set) \ 543 { \ 544 type __val = __raw_read##bwlq(mem); \ 545 __val = end##_to_cpu(__val); \ 546 __val &= ~clr; \ 547 __val |= set; \ 548 __val = cpu_to_##end(__val); \ 549 __raw_write##bwlq(__val, mem); \ 550 } 551 552 #define BUILD_CLRSETBITS(bwlq, sfx, end, type) \ 553 \ 554 __BUILD_CLRBITS(bwlq, sfx, end, type) \ 555 __BUILD_SETBITS(bwlq, sfx, end, type) \ 556 __BUILD_CLRSETBITS(bwlq, sfx, end, type) 557 558 #define __to_cpu(v) (v) 559 #define cpu_to__(v) (v) 560 561 BUILD_CLRSETBITS(b, 8, _, u8) 562 BUILD_CLRSETBITS(w, le16, le16, u16) 563 BUILD_CLRSETBITS(w, be16, be16, u16) 564 BUILD_CLRSETBITS(w, 16, _, u16) 565 BUILD_CLRSETBITS(l, le32, le32, u32) 566 BUILD_CLRSETBITS(l, be32, be32, u32) 567 BUILD_CLRSETBITS(l, 32, _, u32) 568 BUILD_CLRSETBITS(q, le64, le64, u64) 569 BUILD_CLRSETBITS(q, be64, be64, u64) 570 BUILD_CLRSETBITS(q, 64, _, u64) 571 572 #endif /* _ASM_IO_H */ 573