1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/m68k/mm/kmap.c 4 * 5 * Copyright (C) 1997 Roman Hodek 6 * 7 * 10/01/99 cleaned up the code and changing to the same interface 8 * used by other architectures /Roman Zippel 9 */ 10 11 #include <linux/module.h> 12 #include <linux/mm.h> 13 #include <linux/kernel.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <linux/vmalloc.h> 18 19 #include <asm/setup.h> 20 #include <asm/segment.h> 21 #include <asm/page.h> 22 #include <asm/pgalloc.h> 23 #include <asm/io.h> 24 25 #undef DEBUG 26 27 #define PTRTREESIZE (256*1024) 28 29 /* 30 * For 040/060 we can use the virtual memory area like other architectures, 31 * but for 020/030 we want to use early termination page descriptors and we 32 * can't mix this with normal page descriptors, so we have to copy that code 33 * (mm/vmalloc.c) and return appropriately aligned addresses. 34 */ 35 36 #ifdef CPU_M68040_OR_M68060_ONLY 37 38 #define IO_SIZE PAGE_SIZE 39 40 static inline struct vm_struct *get_io_area(unsigned long size) 41 { 42 return get_vm_area(size, VM_IOREMAP); 43 } 44 45 46 static inline void free_io_area(void *addr) 47 { 48 vfree((void *)(PAGE_MASK & (unsigned long)addr)); 49 } 50 51 #else 52 53 #define IO_SIZE (256*1024) 54 55 static struct vm_struct *iolist; 56 57 /* 58 * __free_io_area unmaps nearly everything, so be careful 59 * Currently it doesn't free pointer/page tables anymore but this 60 * wasn't used anyway and might be added later. 61 */ 62 static void __free_io_area(void *addr, unsigned long size) 63 { 64 unsigned long virtaddr = (unsigned long)addr; 65 pgd_t *pgd_dir; 66 p4d_t *p4d_dir; 67 pud_t *pud_dir; 68 pmd_t *pmd_dir; 69 pte_t *pte_dir; 70 71 while ((long)size > 0) { 72 pgd_dir = pgd_offset_k(virtaddr); 73 p4d_dir = p4d_offset(pgd_dir, virtaddr); 74 pud_dir = pud_offset(p4d_dir, virtaddr); 75 if (pud_bad(*pud_dir)) { 76 printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir)); 77 pud_clear(pud_dir); 78 return; 79 } 80 pmd_dir = pmd_offset(pud_dir, virtaddr); 81 82 #if CONFIG_PGTABLE_LEVELS == 3 83 if (CPU_IS_020_OR_030) { 84 int pmd_off = (virtaddr/PTRTREESIZE) & 15; 85 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; 86 87 if (pmd_type == _PAGE_PRESENT) { 88 pmd_dir->pmd[pmd_off] = 0; 89 virtaddr += PTRTREESIZE; 90 size -= PTRTREESIZE; 91 continue; 92 } else if (pmd_type == 0) 93 continue; 94 } 95 #endif 96 97 if (pmd_bad(*pmd_dir)) { 98 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); 99 pmd_clear(pmd_dir); 100 return; 101 } 102 pte_dir = pte_offset_kernel(pmd_dir, virtaddr); 103 104 pte_val(*pte_dir) = 0; 105 virtaddr += PAGE_SIZE; 106 size -= PAGE_SIZE; 107 } 108 109 flush_tlb_all(); 110 } 111 112 static struct vm_struct *get_io_area(unsigned long size) 113 { 114 unsigned long addr; 115 struct vm_struct **p, *tmp, *area; 116 117 area = kmalloc(sizeof(*area), GFP_KERNEL); 118 if (!area) 119 return NULL; 120 addr = KMAP_START; 121 for (p = &iolist; (tmp = *p) ; p = &tmp->next) { 122 if (size + addr < (unsigned long)tmp->addr) 123 break; 124 if (addr > KMAP_END-size) { 125 kfree(area); 126 return NULL; 127 } 128 addr = tmp->size + (unsigned long)tmp->addr; 129 } 130 area->addr = (void *)addr; 131 area->size = size + IO_SIZE; 132 area->next = *p; 133 *p = area; 134 return area; 135 } 136 137 static inline void free_io_area(void *addr) 138 { 139 struct vm_struct **p, *tmp; 140 141 if (!addr) 142 return; 143 addr = (void *)((unsigned long)addr & -IO_SIZE); 144 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { 145 if (tmp->addr == addr) { 146 *p = tmp->next; 147 /* remove gap added in get_io_area() */ 148 __free_io_area(tmp->addr, tmp->size - IO_SIZE); 149 kfree(tmp); 150 return; 151 } 152 } 153 } 154 155 #endif 156 157 /* 158 * Map some physical address range into the kernel address space. 159 */ 160 /* Rewritten by Andreas Schwab to remove all races. */ 161 162 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) 163 { 164 struct vm_struct *area; 165 unsigned long virtaddr, retaddr; 166 long offset; 167 pgd_t *pgd_dir; 168 p4d_t *p4d_dir; 169 pud_t *pud_dir; 170 pmd_t *pmd_dir; 171 pte_t *pte_dir; 172 173 /* 174 * Don't allow mappings that wrap.. 175 */ 176 if (!size || physaddr > (unsigned long)(-size)) 177 return NULL; 178 179 #ifdef CONFIG_AMIGA 180 if (MACH_IS_AMIGA) { 181 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) 182 && (cacheflag == IOMAP_NOCACHE_SER)) 183 return (void __iomem *)physaddr; 184 } 185 #endif 186 #ifdef CONFIG_COLDFIRE 187 if (__cf_internalio(physaddr)) 188 return (void __iomem *) physaddr; 189 #endif 190 191 #ifdef DEBUG 192 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); 193 #endif 194 /* 195 * Mappings have to be aligned 196 */ 197 offset = physaddr & (IO_SIZE - 1); 198 physaddr &= -IO_SIZE; 199 size = (size + offset + IO_SIZE - 1) & -IO_SIZE; 200 201 /* 202 * Ok, go for it.. 203 */ 204 area = get_io_area(size); 205 if (!area) 206 return NULL; 207 208 virtaddr = (unsigned long)area->addr; 209 retaddr = virtaddr + offset; 210 #ifdef DEBUG 211 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); 212 #endif 213 214 /* 215 * add cache and table flags to physical address 216 */ 217 if (CPU_IS_040_OR_060) { 218 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | 219 _PAGE_ACCESSED | _PAGE_DIRTY); 220 switch (cacheflag) { 221 case IOMAP_FULL_CACHING: 222 physaddr |= _PAGE_CACHE040; 223 break; 224 case IOMAP_NOCACHE_SER: 225 default: 226 physaddr |= _PAGE_NOCACHE_S; 227 break; 228 case IOMAP_NOCACHE_NONSER: 229 physaddr |= _PAGE_NOCACHE; 230 break; 231 case IOMAP_WRITETHROUGH: 232 physaddr |= _PAGE_CACHE040W; 233 break; 234 } 235 } else { 236 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | 237 _PAGE_DIRTY | _PAGE_READWRITE); 238 switch (cacheflag) { 239 case IOMAP_NOCACHE_SER: 240 case IOMAP_NOCACHE_NONSER: 241 default: 242 physaddr |= _PAGE_NOCACHE030; 243 break; 244 case IOMAP_FULL_CACHING: 245 case IOMAP_WRITETHROUGH: 246 break; 247 } 248 } 249 250 while ((long)size > 0) { 251 #ifdef DEBUG 252 if (!(virtaddr & (PTRTREESIZE-1))) 253 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); 254 #endif 255 pgd_dir = pgd_offset_k(virtaddr); 256 p4d_dir = p4d_offset(pgd_dir, virtaddr); 257 pud_dir = pud_offset(p4d_dir, virtaddr); 258 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr); 259 if (!pmd_dir) { 260 printk("ioremap: no mem for pmd_dir\n"); 261 return NULL; 262 } 263 264 #if CONFIG_PGTABLE_LEVELS == 3 265 if (CPU_IS_020_OR_030) { 266 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; 267 physaddr += PTRTREESIZE; 268 virtaddr += PTRTREESIZE; 269 size -= PTRTREESIZE; 270 } else 271 #endif 272 { 273 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); 274 if (!pte_dir) { 275 printk("ioremap: no mem for pte_dir\n"); 276 return NULL; 277 } 278 279 pte_val(*pte_dir) = physaddr; 280 virtaddr += PAGE_SIZE; 281 physaddr += PAGE_SIZE; 282 size -= PAGE_SIZE; 283 } 284 } 285 #ifdef DEBUG 286 printk("\n"); 287 #endif 288 flush_tlb_all(); 289 290 return (void __iomem *)retaddr; 291 } 292 EXPORT_SYMBOL(__ioremap); 293 294 /* 295 * Unmap an ioremap()ed region again 296 */ 297 void iounmap(void __iomem *addr) 298 { 299 #ifdef CONFIG_AMIGA 300 if ((!MACH_IS_AMIGA) || 301 (((unsigned long)addr < 0x40000000) || 302 ((unsigned long)addr > 0x60000000))) 303 free_io_area((__force void *)addr); 304 #else 305 #ifdef CONFIG_COLDFIRE 306 if (cf_internalio(addr)) 307 return; 308 #endif 309 free_io_area((__force void *)addr); 310 #endif 311 } 312 EXPORT_SYMBOL(iounmap); 313 314 /* 315 * Set new cache mode for some kernel address space. 316 * The caller must push data for that range itself, if such data may already 317 * be in the cache. 318 */ 319 void kernel_set_cachemode(void *addr, unsigned long size, int cmode) 320 { 321 unsigned long virtaddr = (unsigned long)addr; 322 pgd_t *pgd_dir; 323 p4d_t *p4d_dir; 324 pud_t *pud_dir; 325 pmd_t *pmd_dir; 326 pte_t *pte_dir; 327 328 if (CPU_IS_040_OR_060) { 329 switch (cmode) { 330 case IOMAP_FULL_CACHING: 331 cmode = _PAGE_CACHE040; 332 break; 333 case IOMAP_NOCACHE_SER: 334 default: 335 cmode = _PAGE_NOCACHE_S; 336 break; 337 case IOMAP_NOCACHE_NONSER: 338 cmode = _PAGE_NOCACHE; 339 break; 340 case IOMAP_WRITETHROUGH: 341 cmode = _PAGE_CACHE040W; 342 break; 343 } 344 } else { 345 switch (cmode) { 346 case IOMAP_NOCACHE_SER: 347 case IOMAP_NOCACHE_NONSER: 348 default: 349 cmode = _PAGE_NOCACHE030; 350 break; 351 case IOMAP_FULL_CACHING: 352 case IOMAP_WRITETHROUGH: 353 cmode = 0; 354 } 355 } 356 357 while ((long)size > 0) { 358 pgd_dir = pgd_offset_k(virtaddr); 359 p4d_dir = p4d_offset(pgd_dir, virtaddr); 360 pud_dir = pud_offset(p4d_dir, virtaddr); 361 if (pud_bad(*pud_dir)) { 362 printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir)); 363 pud_clear(pud_dir); 364 return; 365 } 366 pmd_dir = pmd_offset(pud_dir, virtaddr); 367 368 #if CONFIG_PGTABLE_LEVELS == 3 369 if (CPU_IS_020_OR_030) { 370 int pmd_off = (virtaddr/PTRTREESIZE) & 15; 371 372 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { 373 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & 374 _CACHEMASK040) | cmode; 375 virtaddr += PTRTREESIZE; 376 size -= PTRTREESIZE; 377 continue; 378 } 379 } 380 #endif 381 382 if (pmd_bad(*pmd_dir)) { 383 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); 384 pmd_clear(pmd_dir); 385 return; 386 } 387 pte_dir = pte_offset_kernel(pmd_dir, virtaddr); 388 389 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode; 390 virtaddr += PAGE_SIZE; 391 size -= PAGE_SIZE; 392 } 393 394 flush_tlb_all(); 395 } 396 EXPORT_SYMBOL(kernel_set_cachemode); 397