xref: /openbmc/linux/arch/sh/mm/ioremap_fixed.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
24d35b93aSMatt Fleming /*
34d35b93aSMatt Fleming  * Re-map IO memory to kernel address space so that we can access it.
44d35b93aSMatt Fleming  *
54d35b93aSMatt Fleming  * These functions should only be used when it is necessary to map a
64d35b93aSMatt Fleming  * physical address space into the kernel address space before ioremap()
74d35b93aSMatt Fleming  * can be used, e.g. early in boot before paging_init().
84d35b93aSMatt Fleming  *
94d35b93aSMatt Fleming  * Copyright (C) 2009  Matt Fleming
104d35b93aSMatt Fleming  */
114d35b93aSMatt Fleming 
124d35b93aSMatt Fleming #include <linux/vmalloc.h>
134d35b93aSMatt Fleming #include <linux/ioport.h>
144d35b93aSMatt Fleming #include <linux/module.h>
154d35b93aSMatt Fleming #include <linux/mm.h>
164d35b93aSMatt Fleming #include <linux/io.h>
1757c8a661SMike Rapoport #include <linux/memblock.h>
184d35b93aSMatt Fleming #include <linux/proc_fs.h>
194d35b93aSMatt Fleming #include <asm/fixmap.h>
204d35b93aSMatt Fleming #include <asm/page.h>
214d35b93aSMatt Fleming #include <asm/addrspace.h>
224d35b93aSMatt Fleming #include <asm/cacheflush.h>
234d35b93aSMatt Fleming #include <asm/tlbflush.h>
244d35b93aSMatt Fleming #include <asm/mmu.h>
254d35b93aSMatt Fleming #include <asm/mmu_context.h>
26*3eef6b74SChristoph Hellwig #include "ioremap.h"
274d35b93aSMatt Fleming 
284d35b93aSMatt Fleming struct ioremap_map {
294d35b93aSMatt Fleming 	void __iomem *addr;
304d35b93aSMatt Fleming 	unsigned long size;
314d35b93aSMatt Fleming 	unsigned long fixmap_addr;
324d35b93aSMatt Fleming };
334d35b93aSMatt Fleming 
344d35b93aSMatt Fleming static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS];
354d35b93aSMatt Fleming 
ioremap_fixed_init(void)364d35b93aSMatt Fleming void __init ioremap_fixed_init(void)
374d35b93aSMatt Fleming {
384d35b93aSMatt Fleming 	struct ioremap_map *map;
394d35b93aSMatt Fleming 	int i;
404d35b93aSMatt Fleming 
414d35b93aSMatt Fleming 	for (i = 0; i < FIX_N_IOREMAPS; i++) {
424d35b93aSMatt Fleming 		map = &ioremap_maps[i];
434d35b93aSMatt Fleming 		map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i);
444d35b93aSMatt Fleming 	}
454d35b93aSMatt Fleming }
464d35b93aSMatt Fleming 
474d35b93aSMatt Fleming void __init __iomem *
ioremap_fixed(phys_addr_t phys_addr,unsigned long size,pgprot_t prot)4890e7d649SPaul Mundt ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
494d35b93aSMatt Fleming {
504d35b93aSMatt Fleming 	enum fixed_addresses idx0, idx;
514d35b93aSMatt Fleming 	struct ioremap_map *map;
524d35b93aSMatt Fleming 	unsigned int nrpages;
5390e7d649SPaul Mundt 	unsigned long offset;
544d35b93aSMatt Fleming 	int i, slot;
554d35b93aSMatt Fleming 
5690e7d649SPaul Mundt 	/*
5790e7d649SPaul Mundt 	 * Mappings have to be page-aligned
5890e7d649SPaul Mundt 	 */
5990e7d649SPaul Mundt 	offset = phys_addr & ~PAGE_MASK;
6090e7d649SPaul Mundt 	phys_addr &= PAGE_MASK;
6190e7d649SPaul Mundt 	size = PAGE_ALIGN(phys_addr + size) - phys_addr;
6290e7d649SPaul Mundt 
634d35b93aSMatt Fleming 	slot = -1;
644d35b93aSMatt Fleming 	for (i = 0; i < FIX_N_IOREMAPS; i++) {
654d35b93aSMatt Fleming 		map = &ioremap_maps[i];
664d35b93aSMatt Fleming 		if (!map->addr) {
674d35b93aSMatt Fleming 			map->size = size;
684d35b93aSMatt Fleming 			slot = i;
694d35b93aSMatt Fleming 			break;
704d35b93aSMatt Fleming 		}
714d35b93aSMatt Fleming 	}
724d35b93aSMatt Fleming 
734d35b93aSMatt Fleming 	if (slot < 0)
744d35b93aSMatt Fleming 		return NULL;
754d35b93aSMatt Fleming 
764d35b93aSMatt Fleming 	/*
774d35b93aSMatt Fleming 	 * Mappings have to fit in the FIX_IOREMAP area.
784d35b93aSMatt Fleming 	 */
794d35b93aSMatt Fleming 	nrpages = size >> PAGE_SHIFT;
804d35b93aSMatt Fleming 	if (nrpages > FIX_N_IOREMAPS)
814d35b93aSMatt Fleming 		return NULL;
824d35b93aSMatt Fleming 
834d35b93aSMatt Fleming 	/*
844d35b93aSMatt Fleming 	 * Ok, go for it..
854d35b93aSMatt Fleming 	 */
864d35b93aSMatt Fleming 	idx0 = FIX_IOREMAP_BEGIN + slot;
874d35b93aSMatt Fleming 	idx = idx0;
884d35b93aSMatt Fleming 	while (nrpages > 0) {
894d35b93aSMatt Fleming 		pgprot_val(prot) |= _PAGE_WIRED;
904d35b93aSMatt Fleming 		__set_fixmap(idx, phys_addr, prot);
914d35b93aSMatt Fleming 		phys_addr += PAGE_SIZE;
924d35b93aSMatt Fleming 		idx++;
934d35b93aSMatt Fleming 		--nrpages;
944d35b93aSMatt Fleming 	}
954d35b93aSMatt Fleming 
964d35b93aSMatt Fleming 	map->addr = (void __iomem *)(offset + map->fixmap_addr);
974d35b93aSMatt Fleming 	return map->addr;
984d35b93aSMatt Fleming }
994d35b93aSMatt Fleming 
iounmap_fixed(void __iomem * addr)1004f744affSPaul Mundt int iounmap_fixed(void __iomem *addr)
1014d35b93aSMatt Fleming {
1024d35b93aSMatt Fleming 	enum fixed_addresses idx;
1034d35b93aSMatt Fleming 	struct ioremap_map *map;
1044d35b93aSMatt Fleming 	unsigned int nrpages;
1054d35b93aSMatt Fleming 	int i, slot;
1064d35b93aSMatt Fleming 
1074d35b93aSMatt Fleming 	slot = -1;
1084d35b93aSMatt Fleming 	for (i = 0; i < FIX_N_IOREMAPS; i++) {
1094d35b93aSMatt Fleming 		map = &ioremap_maps[i];
1104d35b93aSMatt Fleming 		if (map->addr == addr) {
1114d35b93aSMatt Fleming 			slot = i;
1124d35b93aSMatt Fleming 			break;
1134d35b93aSMatt Fleming 		}
1144d35b93aSMatt Fleming 	}
1154d35b93aSMatt Fleming 
1164f744affSPaul Mundt 	/*
1174f744affSPaul Mundt 	 * If we don't match, it's not for us.
1184f744affSPaul Mundt 	 */
1194d35b93aSMatt Fleming 	if (slot < 0)
1204f744affSPaul Mundt 		return -EINVAL;
1214d35b93aSMatt Fleming 
122920efaabSPaul Mundt 	nrpages = map->size >> PAGE_SHIFT;
1234d35b93aSMatt Fleming 
124920efaabSPaul Mundt 	idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1;
1254d35b93aSMatt Fleming 	while (nrpages > 0) {
126acf2c968SPaul Mundt 		__clear_fixmap(idx, __pgprot(_PAGE_WIRED));
1274d35b93aSMatt Fleming 		--idx;
1284d35b93aSMatt Fleming 		--nrpages;
1294d35b93aSMatt Fleming 	}
1304d35b93aSMatt Fleming 
1314d35b93aSMatt Fleming 	map->size = 0;
1324d35b93aSMatt Fleming 	map->addr = NULL;
1334f744affSPaul Mundt 
1344f744affSPaul Mundt 	return 0;
1354d35b93aSMatt Fleming }
136