1/* 2 * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * 14 * These are the low level assembler for performing cache and TLB 15 * functions on the ARM1022E. 16 */ 17#include <linux/linkage.h> 18#include <linux/init.h> 19#include <asm/assembler.h> 20#include <asm/asm-offsets.h> 21#include <asm/hwcap.h> 22#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable.h> 24#include <asm/ptrace.h> 25 26#include "proc-macros.S" 27 28/* 29 * This is the maximum size of an area which will be invalidated 30 * using the single invalidate entry instructions. Anything larger 31 * than this, and we go for the whole cache. 32 * 33 * This value should be chosen such that we choose the cheapest 34 * alternative. 35 */ 36#define MAX_AREA_SIZE 32768 37 38/* 39 * The size of one data cache line. 40 */ 41#define CACHE_DLINESIZE 32 42 43/* 44 * The number of data cache segments. 45 */ 46#define CACHE_DSEGMENTS 16 47 48/* 49 * The number of lines in a cache segment. 50 */ 51#define CACHE_DENTRIES 64 52 53/* 54 * This is the size at which it becomes more efficient to 55 * clean the whole cache, rather than using the individual 56 * cache line maintainence instructions. 57 */ 58#define CACHE_DLIMIT 32768 59 60 .text 61/* 62 * cpu_arm1022_proc_init() 63 */ 64ENTRY(cpu_arm1022_proc_init) 65 mov pc, lr 66 67/* 68 * cpu_arm1022_proc_fin() 69 */ 70ENTRY(cpu_arm1022_proc_fin) 71 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 72 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x000e @ ............wca. 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mov pc, lr 76 77/* 78 * cpu_arm1022_reset(loc) 79 * 80 * Perform a soft reset of the system. Put the CPU into the 81 * same state as it would be if it had been reset, and branch 82 * to what would be the reset vector. 83 * 84 * loc: location to jump to for soft reset 85 */ 86 .align 5 87ENTRY(cpu_arm1022_reset) 88 mov ip, #0 89 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 90 mcr p15, 0, ip, c7, c10, 4 @ drain WB 91#ifdef CONFIG_MMU 92 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 93#endif 94 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 95 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x1100 @ ...i...s........ 97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mov pc, r0 99 100/* 101 * cpu_arm1022_do_idle() 102 */ 103 .align 5 104ENTRY(cpu_arm1022_do_idle) 105 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 106 mov pc, lr 107 108/* ================================= CACHE ================================ */ 109 110 .align 5 111 112/* 113 * flush_icache_all() 114 * 115 * Unconditionally clean and invalidate the entire icache. 116 */ 117ENTRY(arm1022_flush_icache_all) 118#ifndef CONFIG_CPU_ICACHE_DISABLE 119 mov r0, #0 120 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 121#endif 122 mov pc, lr 123ENDPROC(arm1022_flush_icache_all) 124 125/* 126 * flush_user_cache_all() 127 * 128 * Invalidate all cache entries in a particular address 129 * space. 130 */ 131ENTRY(arm1022_flush_user_cache_all) 132 /* FALLTHROUGH */ 133/* 134 * flush_kern_cache_all() 135 * 136 * Clean and invalidate the entire cache. 137 */ 138ENTRY(arm1022_flush_kern_cache_all) 139 mov r2, #VM_EXEC 140 mov ip, #0 141__flush_whole_cache: 142#ifndef CONFIG_CPU_DCACHE_DISABLE 143 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1441: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1452: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 146 subs r3, r3, #1 << 26 147 bcs 2b @ entries 63 to 0 148 subs r1, r1, #1 << 5 149 bcs 1b @ segments 15 to 0 150#endif 151 tst r2, #VM_EXEC 152#ifndef CONFIG_CPU_ICACHE_DISABLE 153 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 154#endif 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 156 mov pc, lr 157 158/* 159 * flush_user_cache_range(start, end, flags) 160 * 161 * Invalidate a range of cache entries in the specified 162 * address space. 163 * 164 * - start - start address (inclusive) 165 * - end - end address (exclusive) 166 * - flags - vm_flags for this space 167 */ 168ENTRY(arm1022_flush_user_cache_range) 169 mov ip, #0 170 sub r3, r1, r0 @ calculate total size 171 cmp r3, #CACHE_DLIMIT 172 bhs __flush_whole_cache 173 174#ifndef CONFIG_CPU_DCACHE_DISABLE 1751: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 176 add r0, r0, #CACHE_DLINESIZE 177 cmp r0, r1 178 blo 1b 179#endif 180 tst r2, #VM_EXEC 181#ifndef CONFIG_CPU_ICACHE_DISABLE 182 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 183#endif 184 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 185 mov pc, lr 186 187/* 188 * coherent_kern_range(start, end) 189 * 190 * Ensure coherency between the Icache and the Dcache in the 191 * region described by start. If you have non-snooping 192 * Harvard caches, you need to implement this function. 193 * 194 * - start - virtual start address 195 * - end - virtual end address 196 */ 197ENTRY(arm1022_coherent_kern_range) 198 /* FALLTHROUGH */ 199 200/* 201 * coherent_user_range(start, end) 202 * 203 * Ensure coherency between the Icache and the Dcache in the 204 * region described by start. If you have non-snooping 205 * Harvard caches, you need to implement this function. 206 * 207 * - start - virtual start address 208 * - end - virtual end address 209 */ 210ENTRY(arm1022_coherent_user_range) 211 mov ip, #0 212 bic r0, r0, #CACHE_DLINESIZE - 1 2131: 214#ifndef CONFIG_CPU_DCACHE_DISABLE 215 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 216#endif 217#ifndef CONFIG_CPU_ICACHE_DISABLE 218 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 219#endif 220 add r0, r0, #CACHE_DLINESIZE 221 cmp r0, r1 222 blo 1b 223 mcr p15, 0, ip, c7, c10, 4 @ drain WB 224 mov pc, lr 225 226/* 227 * flush_kern_dcache_area(void *addr, size_t size) 228 * 229 * Ensure no D cache aliasing occurs, either with itself or 230 * the I cache 231 * 232 * - addr - kernel address 233 * - size - region size 234 */ 235ENTRY(arm1022_flush_kern_dcache_area) 236 mov ip, #0 237#ifndef CONFIG_CPU_DCACHE_DISABLE 238 add r1, r0, r1 2391: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 240 add r0, r0, #CACHE_DLINESIZE 241 cmp r0, r1 242 blo 1b 243#endif 244 mcr p15, 0, ip, c7, c10, 4 @ drain WB 245 mov pc, lr 246 247/* 248 * dma_inv_range(start, end) 249 * 250 * Invalidate (discard) the specified virtual address range. 251 * May not write back any entries. If 'start' or 'end' 252 * are not cache line aligned, those lines must be written 253 * back. 254 * 255 * - start - virtual start address 256 * - end - virtual end address 257 * 258 * (same as v4wb) 259 */ 260arm1022_dma_inv_range: 261 mov ip, #0 262#ifndef CONFIG_CPU_DCACHE_DISABLE 263 tst r0, #CACHE_DLINESIZE - 1 264 bic r0, r0, #CACHE_DLINESIZE - 1 265 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 266 tst r1, #CACHE_DLINESIZE - 1 267 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2681: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 269 add r0, r0, #CACHE_DLINESIZE 270 cmp r0, r1 271 blo 1b 272#endif 273 mcr p15, 0, ip, c7, c10, 4 @ drain WB 274 mov pc, lr 275 276/* 277 * dma_clean_range(start, end) 278 * 279 * Clean the specified virtual address range. 280 * 281 * - start - virtual start address 282 * - end - virtual end address 283 * 284 * (same as v4wb) 285 */ 286arm1022_dma_clean_range: 287 mov ip, #0 288#ifndef CONFIG_CPU_DCACHE_DISABLE 289 bic r0, r0, #CACHE_DLINESIZE - 1 2901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 291 add r0, r0, #CACHE_DLINESIZE 292 cmp r0, r1 293 blo 1b 294#endif 295 mcr p15, 0, ip, c7, c10, 4 @ drain WB 296 mov pc, lr 297 298/* 299 * dma_flush_range(start, end) 300 * 301 * Clean and invalidate the specified virtual address range. 302 * 303 * - start - virtual start address 304 * - end - virtual end address 305 */ 306ENTRY(arm1022_dma_flush_range) 307 mov ip, #0 308#ifndef CONFIG_CPU_DCACHE_DISABLE 309 bic r0, r0, #CACHE_DLINESIZE - 1 3101: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 311 add r0, r0, #CACHE_DLINESIZE 312 cmp r0, r1 313 blo 1b 314#endif 315 mcr p15, 0, ip, c7, c10, 4 @ drain WB 316 mov pc, lr 317 318/* 319 * dma_map_area(start, size, dir) 320 * - start - kernel virtual start address 321 * - size - size of region 322 * - dir - DMA direction 323 */ 324ENTRY(arm1022_dma_map_area) 325 add r1, r1, r0 326 cmp r2, #DMA_TO_DEVICE 327 beq arm1022_dma_clean_range 328 bcs arm1022_dma_inv_range 329 b arm1022_dma_flush_range 330ENDPROC(arm1022_dma_map_area) 331 332/* 333 * dma_unmap_area(start, size, dir) 334 * - start - kernel virtual start address 335 * - size - size of region 336 * - dir - DMA direction 337 */ 338ENTRY(arm1022_dma_unmap_area) 339 mov pc, lr 340ENDPROC(arm1022_dma_unmap_area) 341 342ENTRY(arm1022_cache_fns) 343 .long arm1022_flush_icache_all 344 .long arm1022_flush_kern_cache_all 345 .long arm1022_flush_user_cache_all 346 .long arm1022_flush_user_cache_range 347 .long arm1022_coherent_kern_range 348 .long arm1022_coherent_user_range 349 .long arm1022_flush_kern_dcache_area 350 .long arm1022_dma_map_area 351 .long arm1022_dma_unmap_area 352 .long arm1022_dma_flush_range 353 354 .align 5 355ENTRY(cpu_arm1022_dcache_clean_area) 356#ifndef CONFIG_CPU_DCACHE_DISABLE 357 mov ip, #0 3581: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 359 add r0, r0, #CACHE_DLINESIZE 360 subs r1, r1, #CACHE_DLINESIZE 361 bhi 1b 362#endif 363 mov pc, lr 364 365/* =============================== PageTable ============================== */ 366 367/* 368 * cpu_arm1022_switch_mm(pgd) 369 * 370 * Set the translation base pointer to be as described by pgd. 371 * 372 * pgd: new page tables 373 */ 374 .align 5 375ENTRY(cpu_arm1022_switch_mm) 376#ifdef CONFIG_MMU 377#ifndef CONFIG_CPU_DCACHE_DISABLE 378 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 3791: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 3802: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 381 subs r3, r3, #1 << 26 382 bcs 2b @ entries 63 to 0 383 subs r1, r1, #1 << 5 384 bcs 1b @ segments 15 to 0 385#endif 386 mov r1, #0 387#ifndef CONFIG_CPU_ICACHE_DISABLE 388 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 389#endif 390 mcr p15, 0, r1, c7, c10, 4 @ drain WB 391 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 392 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 393#endif 394 mov pc, lr 395 396/* 397 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 398 * 399 * Set a PTE and flush it out 400 */ 401 .align 5 402ENTRY(cpu_arm1022_set_pte_ext) 403#ifdef CONFIG_MMU 404 armv3_set_pte_ext 405 mov r0, r0 406#ifndef CONFIG_CPU_DCACHE_DISABLE 407 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 408#endif 409#endif /* CONFIG_MMU */ 410 mov pc, lr 411 412 __CPUINIT 413 414 .type __arm1022_setup, #function 415__arm1022_setup: 416 mov r0, #0 417 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 418 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 419#ifdef CONFIG_MMU 420 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 421#endif 422 adr r5, arm1022_crval 423 ldmia r5, {r5, r6} 424 mrc p15, 0, r0, c1, c0 @ get control register v4 425 bic r0, r0, r5 426 orr r0, r0, r6 427#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 428 orr r0, r0, #0x4000 @ .R.............. 429#endif 430 mov pc, lr 431 .size __arm1022_setup, . - __arm1022_setup 432 433 /* 434 * R 435 * .RVI ZFRS BLDP WCAM 436 * .011 1001 ..11 0101 437 * 438 */ 439 .type arm1022_crval, #object 440arm1022_crval: 441 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 442 443 __INITDATA 444 445/* 446 * Purpose : Function pointers used to access above functions - all calls 447 * come through these 448 */ 449 .type arm1022_processor_functions, #object 450arm1022_processor_functions: 451 .word v4t_early_abort 452 .word legacy_pabort 453 .word cpu_arm1022_proc_init 454 .word cpu_arm1022_proc_fin 455 .word cpu_arm1022_reset 456 .word cpu_arm1022_do_idle 457 .word cpu_arm1022_dcache_clean_area 458 .word cpu_arm1022_switch_mm 459 .word cpu_arm1022_set_pte_ext 460 .size arm1022_processor_functions, . - arm1022_processor_functions 461 462 .section ".rodata" 463 464 .type cpu_arch_name, #object 465cpu_arch_name: 466 .asciz "armv5te" 467 .size cpu_arch_name, . - cpu_arch_name 468 469 .type cpu_elf_name, #object 470cpu_elf_name: 471 .asciz "v5" 472 .size cpu_elf_name, . - cpu_elf_name 473 474 .type cpu_arm1022_name, #object 475cpu_arm1022_name: 476 .asciz "ARM1022" 477 .size cpu_arm1022_name, . - cpu_arm1022_name 478 479 .align 480 481 .section ".proc.info.init", #alloc, #execinstr 482 483 .type __arm1022_proc_info,#object 484__arm1022_proc_info: 485 .long 0x4105a220 @ ARM 1022E (v5TE) 486 .long 0xff0ffff0 487 .long PMD_TYPE_SECT | \ 488 PMD_BIT4 | \ 489 PMD_SECT_AP_WRITE | \ 490 PMD_SECT_AP_READ 491 .long PMD_TYPE_SECT | \ 492 PMD_BIT4 | \ 493 PMD_SECT_AP_WRITE | \ 494 PMD_SECT_AP_READ 495 b __arm1022_setup 496 .long cpu_arch_name 497 .long cpu_elf_name 498 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 499 .long cpu_arm1022_name 500 .long arm1022_processor_functions 501 .long v4wbi_tlb_fns 502 .long v4wb_user_fns 503 .long arm1022_cache_fns 504 .size __arm1022_proc_info, . - __arm1022_proc_info 505