1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 24d35b93aSMatt Fleming /* 34d35b93aSMatt Fleming * Re-map IO memory to kernel address space so that we can access it. 44d35b93aSMatt Fleming * 54d35b93aSMatt Fleming * These functions should only be used when it is necessary to map a 64d35b93aSMatt Fleming * physical address space into the kernel address space before ioremap() 74d35b93aSMatt Fleming * can be used, e.g. early in boot before paging_init(). 84d35b93aSMatt Fleming * 94d35b93aSMatt Fleming * Copyright (C) 2009 Matt Fleming 104d35b93aSMatt Fleming */ 114d35b93aSMatt Fleming 124d35b93aSMatt Fleming #include <linux/vmalloc.h> 134d35b93aSMatt Fleming #include <linux/ioport.h> 144d35b93aSMatt Fleming #include <linux/module.h> 154d35b93aSMatt Fleming #include <linux/mm.h> 164d35b93aSMatt Fleming #include <linux/io.h> 1757c8a661SMike Rapoport #include <linux/memblock.h> 184d35b93aSMatt Fleming #include <linux/proc_fs.h> 194d35b93aSMatt Fleming #include <asm/fixmap.h> 204d35b93aSMatt Fleming #include <asm/page.h> 214d35b93aSMatt Fleming #include <asm/pgalloc.h> 224d35b93aSMatt Fleming #include <asm/addrspace.h> 234d35b93aSMatt Fleming #include <asm/cacheflush.h> 244d35b93aSMatt Fleming #include <asm/tlbflush.h> 254d35b93aSMatt Fleming #include <asm/mmu.h> 264d35b93aSMatt Fleming #include <asm/mmu_context.h> 27*3eef6b74SChristoph Hellwig #include "ioremap.h" 284d35b93aSMatt Fleming 294d35b93aSMatt Fleming struct ioremap_map { 304d35b93aSMatt Fleming void __iomem *addr; 314d35b93aSMatt Fleming unsigned long size; 324d35b93aSMatt Fleming unsigned long fixmap_addr; 334d35b93aSMatt Fleming }; 344d35b93aSMatt Fleming 354d35b93aSMatt Fleming static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; 364d35b93aSMatt Fleming 374d35b93aSMatt Fleming void __init ioremap_fixed_init(void) 384d35b93aSMatt Fleming { 394d35b93aSMatt Fleming struct ioremap_map *map; 404d35b93aSMatt Fleming int i; 414d35b93aSMatt Fleming 424d35b93aSMatt Fleming for (i = 0; i < FIX_N_IOREMAPS; i++) { 434d35b93aSMatt Fleming map = &ioremap_maps[i]; 444d35b93aSMatt Fleming map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); 454d35b93aSMatt Fleming } 464d35b93aSMatt Fleming } 474d35b93aSMatt Fleming 484d35b93aSMatt Fleming void __init __iomem * 4990e7d649SPaul Mundt ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) 504d35b93aSMatt Fleming { 514d35b93aSMatt Fleming enum fixed_addresses idx0, idx; 524d35b93aSMatt Fleming struct ioremap_map *map; 534d35b93aSMatt Fleming unsigned int nrpages; 5490e7d649SPaul Mundt unsigned long offset; 554d35b93aSMatt Fleming int i, slot; 564d35b93aSMatt Fleming 5790e7d649SPaul Mundt /* 5890e7d649SPaul Mundt * Mappings have to be page-aligned 5990e7d649SPaul Mundt */ 6090e7d649SPaul Mundt offset = phys_addr & ~PAGE_MASK; 6190e7d649SPaul Mundt phys_addr &= PAGE_MASK; 6290e7d649SPaul Mundt size = PAGE_ALIGN(phys_addr + size) - phys_addr; 6390e7d649SPaul Mundt 644d35b93aSMatt Fleming slot = -1; 654d35b93aSMatt Fleming for (i = 0; i < FIX_N_IOREMAPS; i++) { 664d35b93aSMatt Fleming map = &ioremap_maps[i]; 674d35b93aSMatt Fleming if (!map->addr) { 684d35b93aSMatt Fleming map->size = size; 694d35b93aSMatt Fleming slot = i; 704d35b93aSMatt Fleming break; 714d35b93aSMatt Fleming } 724d35b93aSMatt Fleming } 734d35b93aSMatt Fleming 744d35b93aSMatt Fleming if (slot < 0) 754d35b93aSMatt Fleming return NULL; 764d35b93aSMatt Fleming 774d35b93aSMatt Fleming /* 784d35b93aSMatt Fleming * Mappings have to fit in the FIX_IOREMAP area. 794d35b93aSMatt Fleming */ 804d35b93aSMatt Fleming nrpages = size >> PAGE_SHIFT; 814d35b93aSMatt Fleming if (nrpages > FIX_N_IOREMAPS) 824d35b93aSMatt Fleming return NULL; 834d35b93aSMatt Fleming 844d35b93aSMatt Fleming /* 854d35b93aSMatt Fleming * Ok, go for it.. 864d35b93aSMatt Fleming */ 874d35b93aSMatt Fleming idx0 = FIX_IOREMAP_BEGIN + slot; 884d35b93aSMatt Fleming idx = idx0; 894d35b93aSMatt Fleming while (nrpages > 0) { 904d35b93aSMatt Fleming pgprot_val(prot) |= _PAGE_WIRED; 914d35b93aSMatt Fleming __set_fixmap(idx, phys_addr, prot); 924d35b93aSMatt Fleming phys_addr += PAGE_SIZE; 934d35b93aSMatt Fleming idx++; 944d35b93aSMatt Fleming --nrpages; 954d35b93aSMatt Fleming } 964d35b93aSMatt Fleming 974d35b93aSMatt Fleming map->addr = (void __iomem *)(offset + map->fixmap_addr); 984d35b93aSMatt Fleming return map->addr; 994d35b93aSMatt Fleming } 1004d35b93aSMatt Fleming 1014f744affSPaul Mundt int iounmap_fixed(void __iomem *addr) 1024d35b93aSMatt Fleming { 1034d35b93aSMatt Fleming enum fixed_addresses idx; 1044d35b93aSMatt Fleming struct ioremap_map *map; 1054d35b93aSMatt Fleming unsigned int nrpages; 1064d35b93aSMatt Fleming int i, slot; 1074d35b93aSMatt Fleming 1084d35b93aSMatt Fleming slot = -1; 1094d35b93aSMatt Fleming for (i = 0; i < FIX_N_IOREMAPS; i++) { 1104d35b93aSMatt Fleming map = &ioremap_maps[i]; 1114d35b93aSMatt Fleming if (map->addr == addr) { 1124d35b93aSMatt Fleming slot = i; 1134d35b93aSMatt Fleming break; 1144d35b93aSMatt Fleming } 1154d35b93aSMatt Fleming } 1164d35b93aSMatt Fleming 1174f744affSPaul Mundt /* 1184f744affSPaul Mundt * If we don't match, it's not for us. 1194f744affSPaul Mundt */ 1204d35b93aSMatt Fleming if (slot < 0) 1214f744affSPaul Mundt return -EINVAL; 1224d35b93aSMatt Fleming 123920efaabSPaul Mundt nrpages = map->size >> PAGE_SHIFT; 1244d35b93aSMatt Fleming 125920efaabSPaul Mundt idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1; 1264d35b93aSMatt Fleming while (nrpages > 0) { 127acf2c968SPaul Mundt __clear_fixmap(idx, __pgprot(_PAGE_WIRED)); 1284d35b93aSMatt Fleming --idx; 1294d35b93aSMatt Fleming --nrpages; 1304d35b93aSMatt Fleming } 1314d35b93aSMatt Fleming 1324d35b93aSMatt Fleming map->size = 0; 1334d35b93aSMatt Fleming map->addr = NULL; 1344f744affSPaul Mundt 1354f744affSPaul Mundt return 0; 1364d35b93aSMatt Fleming } 137