1/* 2 * linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * These are the low level assembler for performing cache and TLB 23 * functions on the arm1020. 24 * 25 * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt 26 */ 27#include <linux/linkage.h> 28#include <linux/config.h> 29#include <linux/init.h> 30#include <asm/assembler.h> 31#include <asm/constants.h> 32#include <asm/pgtable.h> 33#include <asm/procinfo.h> 34#include <asm/ptrace.h> 35#include <asm/hardware.h> 36 37/* 38 * This is the maximum size of an area which will be invalidated 39 * using the single invalidate entry instructions. Anything larger 40 * than this, and we go for the whole cache. 41 * 42 * This value should be chosen such that we choose the cheapest 43 * alternative. 44 */ 45#define MAX_AREA_SIZE 32768 46 47/* 48 * The size of one data cache line. 49 */ 50#define CACHE_DLINESIZE 32 51 52/* 53 * The number of data cache segments. 54 */ 55#define CACHE_DSEGMENTS 16 56 57/* 58 * The number of lines in a cache segment. 59 */ 60#define CACHE_DENTRIES 64 61 62/* 63 * This is the size at which it becomes more efficient to 64 * clean the whole cache, rather than using the individual 65 * cache line maintainence instructions. 66 */ 67#define CACHE_DLIMIT 32768 68 69 .text 70/* 71 * cpu_arm1020_proc_init() 72 */ 73ENTRY(cpu_arm1020_proc_init) 74 mov pc, lr 75 76/* 77 * cpu_arm1020_proc_fin() 78 */ 79ENTRY(cpu_arm1020_proc_fin) 80 stmfd sp!, {lr} 81 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 82 msr cpsr_c, ip 83 bl arm1020_flush_kern_cache_all 84 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 85 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x000e @ ............wca. 87 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 ldmfd sp!, {pc} 89 90/* 91 * cpu_arm1020_reset(loc) 92 * 93 * Perform a soft reset of the system. Put the CPU into the 94 * same state as it would be if it had been reset, and branch 95 * to what would be the reset vector. 96 * 97 * loc: location to jump to for soft reset 98 */ 99 .align 5 100ENTRY(cpu_arm1020_reset) 101 mov ip, #0 102 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 103 mcr p15, 0, ip, c7, c10, 4 @ drain WB 104 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 105 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 106 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x1100 @ ...i...s........ 108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mov pc, r0 110 111/* 112 * cpu_arm1020_do_idle() 113 */ 114 .align 5 115ENTRY(cpu_arm1020_do_idle) 116 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 117 mov pc, lr 118 119/* ================================= CACHE ================================ */ 120 121 .align 5 122/* 123 * flush_user_cache_all() 124 * 125 * Invalidate all cache entries in a particular address 126 * space. 127 */ 128ENTRY(arm1020_flush_user_cache_all) 129 /* FALLTHROUGH */ 130/* 131 * flush_kern_cache_all() 132 * 133 * Clean and invalidate the entire cache. 134 */ 135ENTRY(arm1020_flush_kern_cache_all) 136 mov r2, #VM_EXEC 137 mov ip, #0 138__flush_whole_cache: 139#ifndef CONFIG_CPU_DCACHE_DISABLE 140 mcr p15, 0, ip, c7, c10, 4 @ drain WB 141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1432: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 144 mcr p15, 0, ip, c7, c10, 4 @ drain WB 145 subs r3, r3, #1 << 26 146 bcs 2b @ entries 63 to 0 147 subs r1, r1, #1 << 5 148 bcs 1b @ segments 15 to 0 149#endif 150 tst r2, #VM_EXEC 151#ifndef CONFIG_CPU_ICACHE_DISABLE 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 153#endif 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 155 mov pc, lr 156 157/* 158 * flush_user_cache_range(start, end, flags) 159 * 160 * Invalidate a range of cache entries in the specified 161 * address space. 162 * 163 * - start - start address (inclusive) 164 * - end - end address (exclusive) 165 * - flags - vm_flags for this space 166 */ 167ENTRY(arm1020_flush_user_cache_range) 168 mov ip, #0 169 sub r3, r1, r0 @ calculate total size 170 cmp r3, #CACHE_DLIMIT 171 bhs __flush_whole_cache 172 173#ifndef CONFIG_CPU_DCACHE_DISABLE 174 mcr p15, 0, ip, c7, c10, 4 1751: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 176 mcr p15, 0, ip, c7, c10, 4 @ drain WB 177 add r0, r0, #CACHE_DLINESIZE 178 cmp r0, r1 179 blo 1b 180#endif 181 tst r2, #VM_EXEC 182#ifndef CONFIG_CPU_ICACHE_DISABLE 183 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 184#endif 185 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 186 mov pc, lr 187 188/* 189 * coherent_kern_range(start, end) 190 * 191 * Ensure coherency between the Icache and the Dcache in the 192 * region described by start. If you have non-snooping 193 * Harvard caches, you need to implement this function. 194 * 195 * - start - virtual start address 196 * - end - virtual end address 197 */ 198ENTRY(arm1020_coherent_kern_range) 199 /* FALLTRHOUGH */ 200 201/* 202 * coherent_user_range(start, end) 203 * 204 * Ensure coherency between the Icache and the Dcache in the 205 * region described by start. If you have non-snooping 206 * Harvard caches, you need to implement this function. 207 * 208 * - start - virtual start address 209 * - end - virtual end address 210 */ 211ENTRY(arm1020_coherent_user_range) 212 mov ip, #0 213 bic r0, r0, #CACHE_DLINESIZE - 1 214 mcr p15, 0, ip, c7, c10, 4 2151: 216#ifndef CONFIG_CPU_DCACHE_DISABLE 217 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 218 mcr p15, 0, ip, c7, c10, 4 @ drain WB 219#endif 220#ifndef CONFIG_CPU_ICACHE_DISABLE 221 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 222#endif 223 add r0, r0, #CACHE_DLINESIZE 224 cmp r0, r1 225 blo 1b 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 227 mov pc, lr 228 229/* 230 * flush_kern_dcache_page(void *page) 231 * 232 * Ensure no D cache aliasing occurs, either with itself or 233 * the I cache 234 * 235 * - page - page aligned address 236 */ 237ENTRY(arm1020_flush_kern_dcache_page) 238 mov ip, #0 239#ifndef CONFIG_CPU_DCACHE_DISABLE 240 add r1, r0, #PAGE_SZ 2411: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 242 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 add r0, r0, #CACHE_DLINESIZE 244 cmp r0, r1 245 blo 1b 246#endif 247 mcr p15, 0, ip, c7, c10, 4 @ drain WB 248 mov pc, lr 249 250/* 251 * dma_inv_range(start, end) 252 * 253 * Invalidate (discard) the specified virtual address range. 254 * May not write back any entries. If 'start' or 'end' 255 * are not cache line aligned, those lines must be written 256 * back. 257 * 258 * - start - virtual start address 259 * - end - virtual end address 260 * 261 * (same as v4wb) 262 */ 263ENTRY(arm1020_dma_inv_range) 264 mov ip, #0 265#ifndef CONFIG_CPU_DCACHE_DISABLE 266 tst r0, #CACHE_DLINESIZE - 1 267 bic r0, r0, #CACHE_DLINESIZE - 1 268 mcrne p15, 0, ip, c7, c10, 4 269 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 270 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 271 tst r1, #CACHE_DLINESIZE - 1 272 mcrne p15, 0, ip, c7, c10, 4 273 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 274 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 2751: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 276 add r0, r0, #CACHE_DLINESIZE 277 cmp r0, r1 278 blo 1b 279#endif 280 mcr p15, 0, ip, c7, c10, 4 @ drain WB 281 mov pc, lr 282 283/* 284 * dma_clean_range(start, end) 285 * 286 * Clean the specified virtual address range. 287 * 288 * - start - virtual start address 289 * - end - virtual end address 290 * 291 * (same as v4wb) 292 */ 293ENTRY(arm1020_dma_clean_range) 294 mov ip, #0 295#ifndef CONFIG_CPU_DCACHE_DISABLE 296 bic r0, r0, #CACHE_DLINESIZE - 1 2971: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 298 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 add r0, r0, #CACHE_DLINESIZE 300 cmp r0, r1 301 blo 1b 302#endif 303 mcr p15, 0, ip, c7, c10, 4 @ drain WB 304 mov pc, lr 305 306/* 307 * dma_flush_range(start, end) 308 * 309 * Clean and invalidate the specified virtual address range. 310 * 311 * - start - virtual start address 312 * - end - virtual end address 313 */ 314ENTRY(arm1020_dma_flush_range) 315 mov ip, #0 316#ifndef CONFIG_CPU_DCACHE_DISABLE 317 bic r0, r0, #CACHE_DLINESIZE - 1 318 mcr p15, 0, ip, c7, c10, 4 3191: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 320 mcr p15, 0, ip, c7, c10, 4 @ drain WB 321 add r0, r0, #CACHE_DLINESIZE 322 cmp r0, r1 323 blo 1b 324#endif 325 mcr p15, 0, ip, c7, c10, 4 @ drain WB 326 mov pc, lr 327 328ENTRY(arm1020_cache_fns) 329 .long arm1020_flush_kern_cache_all 330 .long arm1020_flush_user_cache_all 331 .long arm1020_flush_user_cache_range 332 .long arm1020_coherent_kern_range 333 .long arm1020_coherent_user_range 334 .long arm1020_flush_kern_dcache_page 335 .long arm1020_dma_inv_range 336 .long arm1020_dma_clean_range 337 .long arm1020_dma_flush_range 338 339 .align 5 340ENTRY(cpu_arm1020_dcache_clean_area) 341#ifndef CONFIG_CPU_DCACHE_DISABLE 342 mov ip, #0 3431: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 344 mcr p15, 0, ip, c7, c10, 4 @ drain WB 345 add r0, r0, #CACHE_DLINESIZE 346 subs r1, r1, #CACHE_DLINESIZE 347 bhi 1b 348#endif 349 mov pc, lr 350 351/* =============================== PageTable ============================== */ 352 353/* 354 * cpu_arm1020_switch_mm(pgd) 355 * 356 * Set the translation base pointer to be as described by pgd. 357 * 358 * pgd: new page tables 359 */ 360 .align 5 361ENTRY(cpu_arm1020_switch_mm) 362#ifndef CONFIG_CPU_DCACHE_DISABLE 363 mcr p15, 0, r3, c7, c10, 4 364 mov r1, #0xF @ 16 segments 3651: mov r3, #0x3F @ 64 entries 3662: mov ip, r3, LSL #26 @ shift up entry 367 orr ip, ip, r1, LSL #5 @ shift in/up index 368 mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry 369 mov ip, #0 370 mcr p15, 0, ip, c7, c10, 4 371 subs r3, r3, #1 372 cmp r3, #0 373 bge 2b @ entries 3F to 0 374 subs r1, r1, #1 375 cmp r1, #0 376 bge 1b @ segments 15 to 0 377 378#endif 379 mov r1, #0 380#ifndef CONFIG_CPU_ICACHE_DISABLE 381 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 382#endif 383 mcr p15, 0, r1, c7, c10, 4 @ drain WB 384 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 385 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 386 mov pc, lr 387 388/* 389 * cpu_arm1020_set_pte(ptep, pte) 390 * 391 * Set a PTE and flush it out 392 */ 393 .align 5 394ENTRY(cpu_arm1020_set_pte) 395 str r1, [r0], #-2048 @ linux version 396 397 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 398 399 bic r2, r1, #PTE_SMALL_AP_MASK 400 bic r2, r2, #PTE_TYPE_MASK 401 orr r2, r2, #PTE_TYPE_SMALL 402 403 tst r1, #L_PTE_USER @ User? 404 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 405 406 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 407 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 408 409 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 410 movne r2, #0 411 412#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 413 eor r3, r1, #0x0a @ C & small page? 414 tst r3, #0x0b 415 biceq r2, r2, #4 416#endif 417 str r2, [r0] @ hardware version 418 mov r0, r0 419#ifndef CONFIG_CPU_DCACHE_DISABLE 420 mcr p15, 0, r0, c7, c10, 4 421 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 422#endif 423 mcr p15, 0, r0, c7, c10, 4 @ drain WB 424 mov pc, lr 425 426 __INIT 427 428 .type __arm1020_setup, #function 429__arm1020_setup: 430 mov r0, #0 431 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 432 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 433 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 434 mrc p15, 0, r0, c1, c0 @ get control register v4 435 ldr r5, arm1020_cr1_clear 436 bic r0, r0, r5 437 ldr r5, arm1020_cr1_set 438 orr r0, r0, r5 439#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 440 orr r0, r0, #0x4000 @ .R.. .... .... .... 441#endif 442 mov pc, lr 443 .size __arm1020_setup, . - __arm1020_setup 444 445 /* 446 * R 447 * .RVI ZFRS BLDP WCAM 448 * .0.1 1001 ..11 0101 /* FIXME: why no V bit? */ 449 */ 450 .type arm1020_cr1_clear, #object 451 .type arm1020_cr1_set, #object 452arm1020_cr1_clear: 453 .word 0x593f 454arm1020_cr1_set: 455 .word 0x1935 456 457 __INITDATA 458 459/* 460 * Purpose : Function pointers used to access above functions - all calls 461 * come through these 462 */ 463 .type arm1020_processor_functions, #object 464arm1020_processor_functions: 465 .word v4t_early_abort 466 .word cpu_arm1020_proc_init 467 .word cpu_arm1020_proc_fin 468 .word cpu_arm1020_reset 469 .word cpu_arm1020_do_idle 470 .word cpu_arm1020_dcache_clean_area 471 .word cpu_arm1020_switch_mm 472 .word cpu_arm1020_set_pte 473 .size arm1020_processor_functions, . - arm1020_processor_functions 474 475 .section ".rodata" 476 477 .type cpu_arch_name, #object 478cpu_arch_name: 479 .asciz "armv5t" 480 .size cpu_arch_name, . - cpu_arch_name 481 482 .type cpu_elf_name, #object 483cpu_elf_name: 484 .asciz "v5" 485 .size cpu_elf_name, . - cpu_elf_name 486 487 .type cpu_arm1020_name, #object 488cpu_arm1020_name: 489 .ascii "ARM1020" 490#ifndef CONFIG_CPU_ICACHE_DISABLE 491 .ascii "i" 492#endif 493#ifndef CONFIG_CPU_DCACHE_DISABLE 494 .ascii "d" 495#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 496 .ascii "(wt)" 497#else 498 .ascii "(wb)" 499#endif 500#endif 501#ifndef CONFIG_CPU_BPREDICT_DISABLE 502 .ascii "B" 503#endif 504#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 505 .ascii "RR" 506#endif 507 .ascii "\0" 508 .size cpu_arm1020_name, . - cpu_arm1020_name 509 510 .align 511 512 .section ".proc.info", #alloc, #execinstr 513 514 .type __arm1020_proc_info,#object 515__arm1020_proc_info: 516 .long 0x4104a200 @ ARM 1020T (Architecture v5T) 517 .long 0xff0ffff0 518 .long PMD_TYPE_SECT | \ 519 PMD_SECT_AP_WRITE | \ 520 PMD_SECT_AP_READ 521 b __arm1020_setup 522 .long cpu_arch_name 523 .long cpu_elf_name 524 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 525 .long cpu_arm1020_name 526 .long arm1020_processor_functions 527 .long v4wbi_tlb_fns 528 .long v4wb_user_fns 529 .long arm1020_cache_fns 530 .size __arm1020_proc_info, . - __arm1020_proc_info 531