1/* 2 * linux/arch/arm/mm/proc-xsc3.S 3 * 4 * Original Author: Matthew Gilbert 5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 6 * 7 * Copyright 2004 (C) Intel Corp. 8 * Copyright 2005 (C) MontaVista Software, Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is 15 * an extension to Intel's original XScale core that adds the following 16 * features: 17 * 18 * - ARMv6 Supersections 19 * - Low Locality Reference pages (replaces mini-cache) 20 * - 36-bit addressing 21 * - L2 cache 22 * - Cache coherency if chipset supports it 23 * 24 * Based on original XScale code by Nicolas Pitre. 25 */ 26 27#include <linux/linkage.h> 28#include <linux/init.h> 29#include <asm/assembler.h> 30#include <asm/hwcap.h> 31#include <mach/hardware.h> 32#include <asm/pgtable.h> 33#include <asm/pgtable-hwdef.h> 34#include <asm/page.h> 35#include <asm/ptrace.h> 36#include "proc-macros.S" 37 38/* 39 * This is the maximum size of an area which will be flushed. If the 40 * area is larger than this, then we flush the whole cache. 41 */ 42#define MAX_AREA_SIZE 32768 43 44/* 45 * The cache line size of the L1 I, L1 D and unified L2 cache. 46 */ 47#define CACHELINESIZE 32 48 49/* 50 * The size of the L1 D cache. 51 */ 52#define CACHESIZE 32768 53 54/* 55 * This macro is used to wait for a CP15 write and is needed when we 56 * have to ensure that the last operation to the coprocessor was 57 * completed before continuing with operation. 58 */ 59 .macro cpwait_ret, lr, rd 60 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 61 sub pc, \lr, \rd, LSR #32 @ wait for completion and 62 @ flush instruction pipeline 63 .endm 64 65/* 66 * This macro cleans and invalidates the entire L1 D cache. 67 */ 68 69 .macro clean_d_cache rd, rs 70 mov \rd, #0x1f00 71 orr \rd, \rd, #0x00e0 721: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line 73 adds \rd, \rd, #0x40000000 74 bcc 1b 75 subs \rd, \rd, #0x20 76 bpl 1b 77 .endm 78 79 .text 80 81/* 82 * cpu_xsc3_proc_init() 83 * 84 * Nothing too exciting at the moment 85 */ 86ENTRY(cpu_xsc3_proc_init) 87 mov pc, lr 88 89/* 90 * cpu_xsc3_proc_fin() 91 */ 92ENTRY(cpu_xsc3_proc_fin) 93 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 94 bic r0, r0, #0x1800 @ ...IZ........... 95 bic r0, r0, #0x0006 @ .............CA. 96 mcr p15, 0, r0, c1, c0, 0 @ disable caches 97 mov pc, lr 98 99/* 100 * cpu_xsc3_reset(loc) 101 * 102 * Perform a soft reset of the system. Put the CPU into the 103 * same state as it would be if it had been reset, and branch 104 * to what would be the reset vector. 105 * 106 * loc: location to jump to for soft reset 107 */ 108 .align 5 109ENTRY(cpu_xsc3_reset) 110 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 111 msr cpsr_c, r1 @ reset CPSR 112 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 113 bic r1, r1, #0x3900 @ ..VIZ..S........ 114 bic r1, r1, #0x0086 @ ........B....CA. 115 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 116 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 117 bic r1, r1, #0x0001 @ ...............M 118 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 119 @ CAUTION: MMU turned off from this point. We count on the pipeline 120 @ already containing those two last instructions to survive. 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 122 mov pc, r0 123 124/* 125 * cpu_xsc3_do_idle() 126 * 127 * Cause the processor to idle 128 * 129 * For now we do nothing but go to idle mode for every case 130 * 131 * XScale supports clock switching, but using idle mode support 132 * allows external hardware to react to system state changes. 133 */ 134 .align 5 135 136ENTRY(cpu_xsc3_do_idle) 137 mov r0, #1 138 mcr p14, 0, r0, c7, c0, 0 @ go to idle 139 mov pc, lr 140 141/* ================================= CACHE ================================ */ 142 143/* 144 * flush_user_cache_all() 145 * 146 * Invalidate all cache entries in a particular address 147 * space. 148 */ 149ENTRY(xsc3_flush_user_cache_all) 150 /* FALLTHROUGH */ 151 152/* 153 * flush_kern_cache_all() 154 * 155 * Clean and invalidate the entire cache. 156 */ 157ENTRY(xsc3_flush_kern_cache_all) 158 mov r2, #VM_EXEC 159 mov ip, #0 160__flush_whole_cache: 161 clean_d_cache r0, r1 162 tst r2, #VM_EXEC 163 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 164 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 165 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 166 mov pc, lr 167 168/* 169 * flush_user_cache_range(start, end, vm_flags) 170 * 171 * Invalidate a range of cache entries in the specified 172 * address space. 173 * 174 * - start - start address (may not be aligned) 175 * - end - end address (exclusive, may not be aligned) 176 * - vma - vma_area_struct describing address space 177 */ 178 .align 5 179ENTRY(xsc3_flush_user_cache_range) 180 mov ip, #0 181 sub r3, r1, r0 @ calculate total size 182 cmp r3, #MAX_AREA_SIZE 183 bhs __flush_whole_cache 184 1851: tst r2, #VM_EXEC 186 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line 187 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 188 add r0, r0, #CACHELINESIZE 189 cmp r0, r1 190 blo 1b 191 tst r2, #VM_EXEC 192 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 193 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 194 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 195 mov pc, lr 196 197/* 198 * coherent_kern_range(start, end) 199 * 200 * Ensure coherency between the I cache and the D cache in the 201 * region described by start. If you have non-snooping 202 * Harvard caches, you need to implement this function. 203 * 204 * - start - virtual start address 205 * - end - virtual end address 206 * 207 * Note: single I-cache line invalidation isn't used here since 208 * it also trashes the mini I-cache used by JTAG debuggers. 209 */ 210ENTRY(xsc3_coherent_kern_range) 211/* FALLTHROUGH */ 212ENTRY(xsc3_coherent_user_range) 213 bic r0, r0, #CACHELINESIZE - 1 2141: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 215 add r0, r0, #CACHELINESIZE 216 cmp r0, r1 217 blo 1b 218 mov r0, #0 219 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 220 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 221 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 222 mov pc, lr 223 224/* 225 * flush_kern_dcache_area(void *addr, size_t size) 226 * 227 * Ensure no D cache aliasing occurs, either with itself or 228 * the I cache. 229 * 230 * - addr - kernel address 231 * - size - region size 232 */ 233ENTRY(xsc3_flush_kern_dcache_area) 234 add r1, r0, r1 2351: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 236 add r0, r0, #CACHELINESIZE 237 cmp r0, r1 238 blo 1b 239 mov r0, #0 240 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 241 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 242 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 243 mov pc, lr 244 245/* 246 * dma_inv_range(start, end) 247 * 248 * Invalidate (discard) the specified virtual address range. 249 * May not write back any entries. If 'start' or 'end' 250 * are not cache line aligned, those lines must be written 251 * back. 252 * 253 * - start - virtual start address 254 * - end - virtual end address 255 */ 256xsc3_dma_inv_range: 257 tst r0, #CACHELINESIZE - 1 258 bic r0, r0, #CACHELINESIZE - 1 259 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line 260 tst r1, #CACHELINESIZE - 1 261 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 2621: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line 263 add r0, r0, #CACHELINESIZE 264 cmp r0, r1 265 blo 1b 266 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 267 mov pc, lr 268 269/* 270 * dma_clean_range(start, end) 271 * 272 * Clean the specified virtual address range. 273 * 274 * - start - virtual start address 275 * - end - virtual end address 276 */ 277xsc3_dma_clean_range: 278 bic r0, r0, #CACHELINESIZE - 1 2791: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 280 add r0, r0, #CACHELINESIZE 281 cmp r0, r1 282 blo 1b 283 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 284 mov pc, lr 285 286/* 287 * dma_flush_range(start, end) 288 * 289 * Clean and invalidate the specified virtual address range. 290 * 291 * - start - virtual start address 292 * - end - virtual end address 293 */ 294ENTRY(xsc3_dma_flush_range) 295 bic r0, r0, #CACHELINESIZE - 1 2961: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 297 add r0, r0, #CACHELINESIZE 298 cmp r0, r1 299 blo 1b 300 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 301 mov pc, lr 302 303/* 304 * dma_map_area(start, size, dir) 305 * - start - kernel virtual start address 306 * - size - size of region 307 * - dir - DMA direction 308 */ 309ENTRY(xsc3_dma_map_area) 310 add r1, r1, r0 311 cmp r2, #DMA_TO_DEVICE 312 beq xsc3_dma_clean_range 313 bcs xsc3_dma_inv_range 314 b xsc3_dma_flush_range 315ENDPROC(xsc3_dma_map_area) 316 317/* 318 * dma_unmap_area(start, size, dir) 319 * - start - kernel virtual start address 320 * - size - size of region 321 * - dir - DMA direction 322 */ 323ENTRY(xsc3_dma_unmap_area) 324 mov pc, lr 325ENDPROC(xsc3_dma_unmap_area) 326 327ENTRY(xsc3_cache_fns) 328 .long xsc3_flush_kern_cache_all 329 .long xsc3_flush_user_cache_all 330 .long xsc3_flush_user_cache_range 331 .long xsc3_coherent_kern_range 332 .long xsc3_coherent_user_range 333 .long xsc3_flush_kern_dcache_area 334 .long xsc3_dma_map_area 335 .long xsc3_dma_unmap_area 336 .long xsc3_dma_flush_range 337 338ENTRY(cpu_xsc3_dcache_clean_area) 3391: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 340 add r0, r0, #CACHELINESIZE 341 subs r1, r1, #CACHELINESIZE 342 bhi 1b 343 mov pc, lr 344 345/* =============================== PageTable ============================== */ 346 347/* 348 * cpu_xsc3_switch_mm(pgd) 349 * 350 * Set the translation base pointer to be as described by pgd. 351 * 352 * pgd: new page tables 353 */ 354 .align 5 355ENTRY(cpu_xsc3_switch_mm) 356 clean_d_cache r1, r2 357 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 358 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 359 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 360 orr r0, r0, #0x18 @ cache the page table in L2 361 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 362 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 363 cpwait_ret lr, ip 364 365/* 366 * cpu_xsc3_set_pte_ext(ptep, pte, ext) 367 * 368 * Set a PTE and flush it out 369 */ 370cpu_xsc3_mt_table: 371 .long 0x00 @ L_PTE_MT_UNCACHED 372 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE 373 .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 374 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 375 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 376 .long 0x00 @ unused 377 .long 0x00 @ L_PTE_MT_MINICACHE (not present) 378 .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) 379 .long 0x00 @ unused 380 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC 381 .long 0x00 @ unused 382 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 383 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 384 .long 0x00 @ unused 385 .long 0x00 @ unused 386 .long 0x00 @ unused 387 388 .align 5 389ENTRY(cpu_xsc3_set_pte_ext) 390 xscale_set_pte_ext_prologue 391 392 tst r1, #L_PTE_SHARED @ shared? 393 and r1, r1, #L_PTE_MT_MASK 394 adr ip, cpu_xsc3_mt_table 395 ldr ip, [ip, r1] 396 orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit 397 bic r2, r2, #0x0c @ clear old C,B bits 398 orr r2, r2, ip 399 400 xscale_set_pte_ext_epilogue 401 mov pc, lr 402 403 .ltorg 404 405 .align 406 407 __CPUINIT 408 409 .type __xsc3_setup, #function 410__xsc3_setup: 411 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 412 msr cpsr_c, r0 413 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 414 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 415 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 416 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 417 orr r4, r4, #0x18 @ cache the page table in L2 418 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 419 420 mov r0, #1 << 6 @ cp6 access for early sched_clock 421 mcr p15, 0, r0, c15, c1, 0 @ write CP access register 422 423 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 424 and r0, r0, #2 @ preserve bit P bit setting 425 orr r0, r0, #(1 << 10) @ enable L2 for LLR cache 426 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 427 428 adr r5, xsc3_crval 429 ldmia r5, {r5, r6} 430 431#ifdef CONFIG_CACHE_XSC3L2 432 mrc p15, 1, r0, c0, c0, 1 @ get L2 present information 433 ands r0, r0, #0xf8 434 orrne r6, r6, #(1 << 26) @ enable L2 if present 435#endif 436 437 mrc p15, 0, r0, c1, c0, 0 @ get control register 438 bic r0, r0, r5 @ ..V. ..R. .... ..A. 439 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 440 @ ...I Z..S .... .... (uc) 441 mov pc, lr 442 443 .size __xsc3_setup, . - __xsc3_setup 444 445 .type xsc3_crval, #object 446xsc3_crval: 447 crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 448 449 __INITDATA 450 451/* 452 * Purpose : Function pointers used to access above functions - all calls 453 * come through these 454 */ 455 456 .type xsc3_processor_functions, #object 457ENTRY(xsc3_processor_functions) 458 .word v5t_early_abort 459 .word legacy_pabort 460 .word cpu_xsc3_proc_init 461 .word cpu_xsc3_proc_fin 462 .word cpu_xsc3_reset 463 .word cpu_xsc3_do_idle 464 .word cpu_xsc3_dcache_clean_area 465 .word cpu_xsc3_switch_mm 466 .word cpu_xsc3_set_pte_ext 467 .size xsc3_processor_functions, . - xsc3_processor_functions 468 469 .section ".rodata" 470 471 .type cpu_arch_name, #object 472cpu_arch_name: 473 .asciz "armv5te" 474 .size cpu_arch_name, . - cpu_arch_name 475 476 .type cpu_elf_name, #object 477cpu_elf_name: 478 .asciz "v5" 479 .size cpu_elf_name, . - cpu_elf_name 480 481 .type cpu_xsc3_name, #object 482cpu_xsc3_name: 483 .asciz "XScale-V3 based processor" 484 .size cpu_xsc3_name, . - cpu_xsc3_name 485 486 .align 487 488 .section ".proc.info.init", #alloc, #execinstr 489 490 .type __xsc3_proc_info,#object 491__xsc3_proc_info: 492 .long 0x69056000 493 .long 0xffffe000 494 .long PMD_TYPE_SECT | \ 495 PMD_SECT_BUFFERABLE | \ 496 PMD_SECT_CACHEABLE | \ 497 PMD_SECT_AP_WRITE | \ 498 PMD_SECT_AP_READ 499 .long PMD_TYPE_SECT | \ 500 PMD_SECT_AP_WRITE | \ 501 PMD_SECT_AP_READ 502 b __xsc3_setup 503 .long cpu_arch_name 504 .long cpu_elf_name 505 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 506 .long cpu_xsc3_name 507 .long xsc3_processor_functions 508 .long v4wbi_tlb_fns 509 .long xsc3_mc_user_fns 510 .long xsc3_cache_fns 511 .size __xsc3_proc_info, . - __xsc3_proc_info 512 513/* Note: PXA935 changed its implementor ID from Intel to Marvell */ 514 515 .type __xsc3_pxa935_proc_info,#object 516__xsc3_pxa935_proc_info: 517 .long 0x56056000 518 .long 0xffffe000 519 .long PMD_TYPE_SECT | \ 520 PMD_SECT_BUFFERABLE | \ 521 PMD_SECT_CACHEABLE | \ 522 PMD_SECT_AP_WRITE | \ 523 PMD_SECT_AP_READ 524 .long PMD_TYPE_SECT | \ 525 PMD_SECT_AP_WRITE | \ 526 PMD_SECT_AP_READ 527 b __xsc3_setup 528 .long cpu_arch_name 529 .long cpu_elf_name 530 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 531 .long cpu_xsc3_name 532 .long xsc3_processor_functions 533 .long v4wbi_tlb_fns 534 .long xsc3_mc_user_fns 535 .long xsc3_cache_fns 536 .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info 537