1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/m68k/mm/kmap.c
4 *
5 * Copyright (C) 1997 Roman Hodek
6 *
7 * 10/01/99 cleaned up the code and changing to the same interface
8 * used by other architectures /Roman Zippel
9 */
10
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18
19 #include <asm/setup.h>
20 #include <asm/page.h>
21 #include <asm/io.h>
22 #include <asm/tlbflush.h>
23
24 #undef DEBUG
25
26 /*
27 * For 040/060 we can use the virtual memory area like other architectures,
28 * but for 020/030 we want to use early termination page descriptors and we
29 * can't mix this with normal page descriptors, so we have to copy that code
30 * (mm/vmalloc.c) and return appropriately aligned addresses.
31 */
32
33 #ifdef CPU_M68040_OR_M68060_ONLY
34
35 #define IO_SIZE PAGE_SIZE
36
get_io_area(unsigned long size)37 static inline struct vm_struct *get_io_area(unsigned long size)
38 {
39 return get_vm_area(size, VM_IOREMAP);
40 }
41
42
free_io_area(void * addr)43 static inline void free_io_area(void *addr)
44 {
45 vfree((void *)(PAGE_MASK & (unsigned long)addr));
46 }
47
48 #else
49
50 #define IO_SIZE PMD_SIZE
51
52 static struct vm_struct *iolist;
53
54 /*
55 * __free_io_area unmaps nearly everything, so be careful
56 * Currently it doesn't free pointer/page tables anymore but this
57 * wasn't used anyway and might be added later.
58 */
__free_io_area(void * addr,unsigned long size)59 static void __free_io_area(void *addr, unsigned long size)
60 {
61 unsigned long virtaddr = (unsigned long)addr;
62 pgd_t *pgd_dir;
63 p4d_t *p4d_dir;
64 pud_t *pud_dir;
65 pmd_t *pmd_dir;
66 pte_t *pte_dir;
67
68 while ((long)size > 0) {
69 pgd_dir = pgd_offset_k(virtaddr);
70 p4d_dir = p4d_offset(pgd_dir, virtaddr);
71 pud_dir = pud_offset(p4d_dir, virtaddr);
72 if (pud_bad(*pud_dir)) {
73 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
74 pud_clear(pud_dir);
75 return;
76 }
77 pmd_dir = pmd_offset(pud_dir, virtaddr);
78
79 #if CONFIG_PGTABLE_LEVELS == 3
80 if (CPU_IS_020_OR_030) {
81 int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
82
83 if (pmd_type == _PAGE_PRESENT) {
84 pmd_clear(pmd_dir);
85 virtaddr += PMD_SIZE;
86 size -= PMD_SIZE;
87
88 } else if (pmd_type == 0)
89 continue;
90 }
91 #endif
92
93 if (pmd_bad(*pmd_dir)) {
94 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
95 pmd_clear(pmd_dir);
96 return;
97 }
98 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
99
100 pte_val(*pte_dir) = 0;
101 virtaddr += PAGE_SIZE;
102 size -= PAGE_SIZE;
103 }
104
105 flush_tlb_all();
106 }
107
get_io_area(unsigned long size)108 static struct vm_struct *get_io_area(unsigned long size)
109 {
110 unsigned long addr;
111 struct vm_struct **p, *tmp, *area;
112
113 area = kmalloc(sizeof(*area), GFP_KERNEL);
114 if (!area)
115 return NULL;
116 addr = KMAP_START;
117 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
118 if (size + addr < (unsigned long)tmp->addr)
119 break;
120 if (addr > KMAP_END-size) {
121 kfree(area);
122 return NULL;
123 }
124 addr = tmp->size + (unsigned long)tmp->addr;
125 }
126 area->addr = (void *)addr;
127 area->size = size + IO_SIZE;
128 area->next = *p;
129 *p = area;
130 return area;
131 }
132
free_io_area(void * addr)133 static inline void free_io_area(void *addr)
134 {
135 struct vm_struct **p, *tmp;
136
137 if (!addr)
138 return;
139 addr = (void *)((unsigned long)addr & -IO_SIZE);
140 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
141 if (tmp->addr == addr) {
142 *p = tmp->next;
143 /* remove gap added in get_io_area() */
144 __free_io_area(tmp->addr, tmp->size - IO_SIZE);
145 kfree(tmp);
146 return;
147 }
148 }
149 }
150
151 #endif
152
153 /*
154 * Map some physical address range into the kernel address space.
155 */
156 /* Rewritten by Andreas Schwab to remove all races. */
157
__ioremap(unsigned long physaddr,unsigned long size,int cacheflag)158 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
159 {
160 struct vm_struct *area;
161 unsigned long virtaddr, retaddr;
162 long offset;
163 pgd_t *pgd_dir;
164 p4d_t *p4d_dir;
165 pud_t *pud_dir;
166 pmd_t *pmd_dir;
167 pte_t *pte_dir;
168
169 /*
170 * Don't allow mappings that wrap..
171 */
172 if (!size || physaddr > (unsigned long)(-size))
173 return NULL;
174
175 #ifdef CONFIG_AMIGA
176 if (MACH_IS_AMIGA) {
177 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
178 && (cacheflag == IOMAP_NOCACHE_SER))
179 return (void __iomem *)physaddr;
180 }
181 #endif
182 #ifdef CONFIG_VIRT
183 if (MACH_IS_VIRT) {
184 if (physaddr >= 0xff000000 && cacheflag == IOMAP_NOCACHE_SER)
185 return (void __iomem *)physaddr;
186 }
187 #endif
188 #ifdef CONFIG_COLDFIRE
189 if (__cf_internalio(physaddr))
190 return (void __iomem *) physaddr;
191 #endif
192
193 #ifdef DEBUG
194 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
195 #endif
196 /*
197 * Mappings have to be aligned
198 */
199 offset = physaddr & (IO_SIZE - 1);
200 physaddr &= -IO_SIZE;
201 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
202
203 /*
204 * Ok, go for it..
205 */
206 area = get_io_area(size);
207 if (!area)
208 return NULL;
209
210 virtaddr = (unsigned long)area->addr;
211 retaddr = virtaddr + offset;
212 #ifdef DEBUG
213 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
214 #endif
215
216 /*
217 * add cache and table flags to physical address
218 */
219 if (CPU_IS_040_OR_060) {
220 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
221 _PAGE_ACCESSED | _PAGE_DIRTY);
222 switch (cacheflag) {
223 case IOMAP_FULL_CACHING:
224 physaddr |= _PAGE_CACHE040;
225 break;
226 case IOMAP_NOCACHE_SER:
227 default:
228 physaddr |= _PAGE_NOCACHE_S;
229 break;
230 case IOMAP_NOCACHE_NONSER:
231 physaddr |= _PAGE_NOCACHE;
232 break;
233 case IOMAP_WRITETHROUGH:
234 physaddr |= _PAGE_CACHE040W;
235 break;
236 }
237 } else {
238 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
239 _PAGE_DIRTY | _PAGE_READWRITE);
240 switch (cacheflag) {
241 case IOMAP_NOCACHE_SER:
242 case IOMAP_NOCACHE_NONSER:
243 default:
244 physaddr |= _PAGE_NOCACHE030;
245 break;
246 case IOMAP_FULL_CACHING:
247 case IOMAP_WRITETHROUGH:
248 break;
249 }
250 }
251
252 while ((long)size > 0) {
253 #ifdef DEBUG
254 if (!(virtaddr & (PMD_SIZE-1)))
255 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
256 #endif
257 pgd_dir = pgd_offset_k(virtaddr);
258 p4d_dir = p4d_offset(pgd_dir, virtaddr);
259 pud_dir = pud_offset(p4d_dir, virtaddr);
260 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
261 if (!pmd_dir) {
262 printk("ioremap: no mem for pmd_dir\n");
263 return NULL;
264 }
265
266 #if CONFIG_PGTABLE_LEVELS == 3
267 if (CPU_IS_020_OR_030) {
268 pmd_val(*pmd_dir) = physaddr;
269 physaddr += PMD_SIZE;
270 virtaddr += PMD_SIZE;
271 size -= PMD_SIZE;
272 } else
273 #endif
274 {
275 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
276 if (!pte_dir) {
277 printk("ioremap: no mem for pte_dir\n");
278 return NULL;
279 }
280
281 pte_val(*pte_dir) = physaddr;
282 virtaddr += PAGE_SIZE;
283 physaddr += PAGE_SIZE;
284 size -= PAGE_SIZE;
285 }
286 }
287 #ifdef DEBUG
288 printk("\n");
289 #endif
290 flush_tlb_all();
291
292 return (void __iomem *)retaddr;
293 }
294 EXPORT_SYMBOL(__ioremap);
295
296 /*
297 * Unmap an ioremap()ed region again
298 */
iounmap(void __iomem * addr)299 void iounmap(void __iomem *addr)
300 {
301 #ifdef CONFIG_AMIGA
302 if (MACH_IS_AMIGA &&
303 ((unsigned long)addr >= 0x40000000) &&
304 ((unsigned long)addr < 0x60000000))
305 return;
306 #endif
307 #ifdef CONFIG_VIRT
308 if (MACH_IS_VIRT && (unsigned long)addr >= 0xff000000)
309 return;
310 #endif
311 #ifdef CONFIG_COLDFIRE
312 if (cf_internalio(addr))
313 return;
314 #endif
315 free_io_area((__force void *)addr);
316 }
317 EXPORT_SYMBOL(iounmap);
318
319 /*
320 * Set new cache mode for some kernel address space.
321 * The caller must push data for that range itself, if such data may already
322 * be in the cache.
323 */
kernel_set_cachemode(void * addr,unsigned long size,int cmode)324 void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
325 {
326 unsigned long virtaddr = (unsigned long)addr;
327 pgd_t *pgd_dir;
328 p4d_t *p4d_dir;
329 pud_t *pud_dir;
330 pmd_t *pmd_dir;
331 pte_t *pte_dir;
332
333 if (CPU_IS_040_OR_060) {
334 switch (cmode) {
335 case IOMAP_FULL_CACHING:
336 cmode = _PAGE_CACHE040;
337 break;
338 case IOMAP_NOCACHE_SER:
339 default:
340 cmode = _PAGE_NOCACHE_S;
341 break;
342 case IOMAP_NOCACHE_NONSER:
343 cmode = _PAGE_NOCACHE;
344 break;
345 case IOMAP_WRITETHROUGH:
346 cmode = _PAGE_CACHE040W;
347 break;
348 }
349 } else {
350 switch (cmode) {
351 case IOMAP_NOCACHE_SER:
352 case IOMAP_NOCACHE_NONSER:
353 default:
354 cmode = _PAGE_NOCACHE030;
355 break;
356 case IOMAP_FULL_CACHING:
357 case IOMAP_WRITETHROUGH:
358 cmode = 0;
359 }
360 }
361
362 while ((long)size > 0) {
363 pgd_dir = pgd_offset_k(virtaddr);
364 p4d_dir = p4d_offset(pgd_dir, virtaddr);
365 pud_dir = pud_offset(p4d_dir, virtaddr);
366 if (pud_bad(*pud_dir)) {
367 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
368 pud_clear(pud_dir);
369 return;
370 }
371 pmd_dir = pmd_offset(pud_dir, virtaddr);
372
373 #if CONFIG_PGTABLE_LEVELS == 3
374 if (CPU_IS_020_OR_030) {
375 unsigned long pmd = pmd_val(*pmd_dir);
376
377 if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
378 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
379 virtaddr += PMD_SIZE;
380 size -= PMD_SIZE;
381 continue;
382 }
383 }
384 #endif
385
386 if (pmd_bad(*pmd_dir)) {
387 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
388 pmd_clear(pmd_dir);
389 return;
390 }
391 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
392
393 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
394 virtaddr += PAGE_SIZE;
395 size -= PAGE_SIZE;
396 }
397
398 flush_tlb_all();
399 }
400 EXPORT_SYMBOL(kernel_set_cachemode);
401