xref: /openbmc/linux/arch/arm64/mm/ioremap.c (revision e1f7c9ee)
1 /*
2  * Based on arch/arm/mm/ioremap.c
3  *
4  * (C) Copyright 1995 1996 Linus Torvalds
5  * Hacked for ARM by Phil Blundell <philb@gnu.org>
6  * Hacked to allow all architectures to build, and various cleanups
7  * by Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/export.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
27 
28 #include <asm/fixmap.h>
29 #include <asm/tlbflush.h>
30 #include <asm/pgalloc.h>
31 
32 static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
33 				      pgprot_t prot, void *caller)
34 {
35 	unsigned long last_addr;
36 	unsigned long offset = phys_addr & ~PAGE_MASK;
37 	int err;
38 	unsigned long addr;
39 	struct vm_struct *area;
40 
41 	/*
42 	 * Page align the mapping address and size, taking account of any
43 	 * offset.
44 	 */
45 	phys_addr &= PAGE_MASK;
46 	size = PAGE_ALIGN(size + offset);
47 
48 	/*
49 	 * Don't allow wraparound, zero size or outside PHYS_MASK.
50 	 */
51 	last_addr = phys_addr + size - 1;
52 	if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
53 		return NULL;
54 
55 	/*
56 	 * Don't allow RAM to be mapped.
57 	 */
58 	if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
59 		return NULL;
60 
61 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
62 	if (!area)
63 		return NULL;
64 	addr = (unsigned long)area->addr;
65 
66 	err = ioremap_page_range(addr, addr + size, phys_addr, prot);
67 	if (err) {
68 		vunmap((void *)addr);
69 		return NULL;
70 	}
71 
72 	return (void __iomem *)(offset + addr);
73 }
74 
75 void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
76 {
77 	return __ioremap_caller(phys_addr, size, prot,
78 				__builtin_return_address(0));
79 }
80 EXPORT_SYMBOL(__ioremap);
81 
82 void __iounmap(volatile void __iomem *io_addr)
83 {
84 	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
85 
86 	/*
87 	 * We could get an address outside vmalloc range in case
88 	 * of ioremap_cache() reusing a RAM mapping.
89 	 */
90 	if (VMALLOC_START <= addr && addr < VMALLOC_END)
91 		vunmap((void *)addr);
92 }
93 EXPORT_SYMBOL(__iounmap);
94 
95 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
96 {
97 	/* For normal memory we already have a cacheable mapping. */
98 	if (pfn_valid(__phys_to_pfn(phys_addr)))
99 		return (void __iomem *)__phys_to_virt(phys_addr);
100 
101 	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
102 				__builtin_return_address(0));
103 }
104 EXPORT_SYMBOL(ioremap_cache);
105 
106 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
107 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
108 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
109 #endif
110 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
111 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
112 #endif
113 
114 static inline pud_t * __init early_ioremap_pud(unsigned long addr)
115 {
116 	pgd_t *pgd;
117 
118 	pgd = pgd_offset_k(addr);
119 	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
120 
121 	return pud_offset(pgd, addr);
122 }
123 
124 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
125 {
126 	pud_t *pud = early_ioremap_pud(addr);
127 
128 	BUG_ON(pud_none(*pud) || pud_bad(*pud));
129 
130 	return pmd_offset(pud, addr);
131 }
132 
133 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
134 {
135 	pmd_t *pmd = early_ioremap_pmd(addr);
136 
137 	BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
138 
139 	return pte_offset_kernel(pmd, addr);
140 }
141 
142 void __init early_ioremap_init(void)
143 {
144 	pgd_t *pgd;
145 	pud_t *pud;
146 	pmd_t *pmd;
147 	unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
148 
149 	pgd = pgd_offset_k(addr);
150 	pgd_populate(&init_mm, pgd, bm_pud);
151 	pud = pud_offset(pgd, addr);
152 	pud_populate(&init_mm, pud, bm_pmd);
153 	pmd = pmd_offset(pud, addr);
154 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
155 
156 	/*
157 	 * The boot-ioremap range spans multiple pmds, for which
158 	 * we are not prepared:
159 	 */
160 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
161 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
162 
163 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
164 		WARN_ON(1);
165 		pr_warn("pmd %p != %p\n",
166 			pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
167 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
168 			fix_to_virt(FIX_BTMAP_BEGIN));
169 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
170 			fix_to_virt(FIX_BTMAP_END));
171 
172 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
173 		pr_warn("FIX_BTMAP_BEGIN:     %d\n",
174 			FIX_BTMAP_BEGIN);
175 	}
176 
177 	early_ioremap_setup();
178 }
179 
180 void __init __early_set_fixmap(enum fixed_addresses idx,
181 			       phys_addr_t phys, pgprot_t flags)
182 {
183 	unsigned long addr = __fix_to_virt(idx);
184 	pte_t *pte;
185 
186 	if (idx >= __end_of_fixed_addresses) {
187 		BUG();
188 		return;
189 	}
190 
191 	pte = early_ioremap_pte(addr);
192 
193 	if (pgprot_val(flags))
194 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
195 	else {
196 		pte_clear(&init_mm, addr, pte);
197 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
198 	}
199 }
200