1/* 2 * linux/arch/arm/mm/proc-xsc3.S 3 * 4 * Original Author: Matthew Gilbert 5 * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 6 * 7 * Copyright 2004 (C) Intel Corp. 8 * Copyright 2005 (C) MontaVista Software, Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is 15 * an extension to Intel's original XScale core that adds the following 16 * features: 17 * 18 * - ARMv6 Supersections 19 * - Low Locality Reference pages (replaces mini-cache) 20 * - 36-bit addressing 21 * - L2 cache 22 * - Cache coherency if chipset supports it 23 * 24 * Based on original XScale code by Nicolas Pitre. 25 */ 26 27#include <linux/linkage.h> 28#include <linux/init.h> 29#include <asm/assembler.h> 30#include <asm/hwcap.h> 31#include <mach/hardware.h> 32#include <asm/pgtable.h> 33#include <asm/pgtable-hwdef.h> 34#include <asm/page.h> 35#include <asm/ptrace.h> 36#include "proc-macros.S" 37 38/* 39 * This is the maximum size of an area which will be flushed. If the 40 * area is larger than this, then we flush the whole cache. 41 */ 42#define MAX_AREA_SIZE 32768 43 44/* 45 * The cache line size of the L1 I, L1 D and unified L2 cache. 46 */ 47#define CACHELINESIZE 32 48 49/* 50 * The size of the L1 D cache. 51 */ 52#define CACHESIZE 32768 53 54/* 55 * This macro is used to wait for a CP15 write and is needed when we 56 * have to ensure that the last operation to the coprocessor was 57 * completed before continuing with operation. 58 */ 59 .macro cpwait_ret, lr, rd 60 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 61 sub pc, \lr, \rd, LSR #32 @ wait for completion and 62 @ flush instruction pipeline 63 .endm 64 65/* 66 * This macro cleans and invalidates the entire L1 D cache. 67 */ 68 69 .macro clean_d_cache rd, rs 70 mov \rd, #0x1f00 71 orr \rd, \rd, #0x00e0 721: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line 73 adds \rd, \rd, #0x40000000 74 bcc 1b 75 subs \rd, \rd, #0x20 76 bpl 1b 77 .endm 78 79 .text 80 81/* 82 * cpu_xsc3_proc_init() 83 * 84 * Nothing too exciting at the moment 85 */ 86ENTRY(cpu_xsc3_proc_init) 87 mov pc, lr 88 89/* 90 * cpu_xsc3_proc_fin() 91 */ 92ENTRY(cpu_xsc3_proc_fin) 93 str lr, [sp, #-4]! 94 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 95 msr cpsr_c, r0 96 bl xsc3_flush_kern_cache_all @ clean caches 97 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 98 bic r0, r0, #0x1800 @ ...IZ........... 99 bic r0, r0, #0x0006 @ .............CA. 100 mcr p15, 0, r0, c1, c0, 0 @ disable caches 101 ldr pc, [sp], #4 102 103/* 104 * cpu_xsc3_reset(loc) 105 * 106 * Perform a soft reset of the system. Put the CPU into the 107 * same state as it would be if it had been reset, and branch 108 * to what would be the reset vector. 109 * 110 * loc: location to jump to for soft reset 111 */ 112 .align 5 113ENTRY(cpu_xsc3_reset) 114 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 115 msr cpsr_c, r1 @ reset CPSR 116 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 117 bic r1, r1, #0x3900 @ ..VIZ..S........ 118 bic r1, r1, #0x0086 @ ........B....CA. 119 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 120 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 121 bic r1, r1, #0x0001 @ ...............M 122 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 123 @ CAUTION: MMU turned off from this point. We count on the pipeline 124 @ already containing those two last instructions to survive. 125 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 126 mov pc, r0 127 128/* 129 * cpu_xsc3_do_idle() 130 * 131 * Cause the processor to idle 132 * 133 * For now we do nothing but go to idle mode for every case 134 * 135 * XScale supports clock switching, but using idle mode support 136 * allows external hardware to react to system state changes. 137 */ 138 .align 5 139 140ENTRY(cpu_xsc3_do_idle) 141 mov r0, #1 142 mcr p14, 0, r0, c7, c0, 0 @ go to idle 143 mov pc, lr 144 145/* ================================= CACHE ================================ */ 146 147/* 148 * flush_user_cache_all() 149 * 150 * Invalidate all cache entries in a particular address 151 * space. 152 */ 153ENTRY(xsc3_flush_user_cache_all) 154 /* FALLTHROUGH */ 155 156/* 157 * flush_kern_cache_all() 158 * 159 * Clean and invalidate the entire cache. 160 */ 161ENTRY(xsc3_flush_kern_cache_all) 162 mov r2, #VM_EXEC 163 mov ip, #0 164__flush_whole_cache: 165 clean_d_cache r0, r1 166 tst r2, #VM_EXEC 167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 168 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 169 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 170 mov pc, lr 171 172/* 173 * flush_user_cache_range(start, end, vm_flags) 174 * 175 * Invalidate a range of cache entries in the specified 176 * address space. 177 * 178 * - start - start address (may not be aligned) 179 * - end - end address (exclusive, may not be aligned) 180 * - vma - vma_area_struct describing address space 181 */ 182 .align 5 183ENTRY(xsc3_flush_user_cache_range) 184 mov ip, #0 185 sub r3, r1, r0 @ calculate total size 186 cmp r3, #MAX_AREA_SIZE 187 bhs __flush_whole_cache 188 1891: tst r2, #VM_EXEC 190 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line 191 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 192 add r0, r0, #CACHELINESIZE 193 cmp r0, r1 194 blo 1b 195 tst r2, #VM_EXEC 196 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 197 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 198 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 199 mov pc, lr 200 201/* 202 * coherent_kern_range(start, end) 203 * 204 * Ensure coherency between the I cache and the D cache in the 205 * region described by start. If you have non-snooping 206 * Harvard caches, you need to implement this function. 207 * 208 * - start - virtual start address 209 * - end - virtual end address 210 * 211 * Note: single I-cache line invalidation isn't used here since 212 * it also trashes the mini I-cache used by JTAG debuggers. 213 */ 214ENTRY(xsc3_coherent_kern_range) 215/* FALLTHROUGH */ 216ENTRY(xsc3_coherent_user_range) 217 bic r0, r0, #CACHELINESIZE - 1 2181: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 219 add r0, r0, #CACHELINESIZE 220 cmp r0, r1 221 blo 1b 222 mov r0, #0 223 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 224 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 225 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 226 mov pc, lr 227 228/* 229 * flush_kern_dcache_page(void *page) 230 * 231 * Ensure no D cache aliasing occurs, either with itself or 232 * the I cache. 233 * 234 * - addr - page aligned address 235 */ 236ENTRY(xsc3_flush_kern_dcache_page) 237 add r1, r0, #PAGE_SZ 2381: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 239 add r0, r0, #CACHELINESIZE 240 cmp r0, r1 241 blo 1b 242 mov r0, #0 243 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 244 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 245 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 246 mov pc, lr 247 248/* 249 * dma_inv_range(start, end) 250 * 251 * Invalidate (discard) the specified virtual address range. 252 * May not write back any entries. If 'start' or 'end' 253 * are not cache line aligned, those lines must be written 254 * back. 255 * 256 * - start - virtual start address 257 * - end - virtual end address 258 */ 259ENTRY(xsc3_dma_inv_range) 260 tst r0, #CACHELINESIZE - 1 261 bic r0, r0, #CACHELINESIZE - 1 262 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line 263 tst r1, #CACHELINESIZE - 1 264 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 2651: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line 266 add r0, r0, #CACHELINESIZE 267 cmp r0, r1 268 blo 1b 269 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 270 mov pc, lr 271 272/* 273 * dma_clean_range(start, end) 274 * 275 * Clean the specified virtual address range. 276 * 277 * - start - virtual start address 278 * - end - virtual end address 279 */ 280ENTRY(xsc3_dma_clean_range) 281 bic r0, r0, #CACHELINESIZE - 1 2821: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 283 add r0, r0, #CACHELINESIZE 284 cmp r0, r1 285 blo 1b 286 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 287 mov pc, lr 288 289/* 290 * dma_flush_range(start, end) 291 * 292 * Clean and invalidate the specified virtual address range. 293 * 294 * - start - virtual start address 295 * - end - virtual end address 296 */ 297ENTRY(xsc3_dma_flush_range) 298 bic r0, r0, #CACHELINESIZE - 1 2991: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 300 add r0, r0, #CACHELINESIZE 301 cmp r0, r1 302 blo 1b 303 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 304 mov pc, lr 305 306ENTRY(xsc3_cache_fns) 307 .long xsc3_flush_kern_cache_all 308 .long xsc3_flush_user_cache_all 309 .long xsc3_flush_user_cache_range 310 .long xsc3_coherent_kern_range 311 .long xsc3_coherent_user_range 312 .long xsc3_flush_kern_dcache_page 313 .long xsc3_dma_inv_range 314 .long xsc3_dma_clean_range 315 .long xsc3_dma_flush_range 316 317ENTRY(cpu_xsc3_dcache_clean_area) 3181: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 319 add r0, r0, #CACHELINESIZE 320 subs r1, r1, #CACHELINESIZE 321 bhi 1b 322 mov pc, lr 323 324/* =============================== PageTable ============================== */ 325 326/* 327 * cpu_xsc3_switch_mm(pgd) 328 * 329 * Set the translation base pointer to be as described by pgd. 330 * 331 * pgd: new page tables 332 */ 333 .align 5 334ENTRY(cpu_xsc3_switch_mm) 335 clean_d_cache r1, r2 336 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 337 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 338 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 339 orr r0, r0, #0x18 @ cache the page table in L2 340 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 341 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 342 cpwait_ret lr, ip 343 344/* 345 * cpu_xsc3_set_pte_ext(ptep, pte, ext) 346 * 347 * Set a PTE and flush it out 348 */ 349cpu_xsc3_mt_table: 350 .long 0x00 @ L_PTE_MT_UNCACHED 351 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE 352 .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 353 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 354 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 355 .long 0x00 @ unused 356 .long 0x00 @ L_PTE_MT_MINICACHE (not present) 357 .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) 358 .long 0x00 @ unused 359 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC 360 .long 0x00 @ unused 361 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 362 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 363 .long 0x00 @ unused 364 .long 0x00 @ unused 365 .long 0x00 @ unused 366 367 .align 5 368ENTRY(cpu_xsc3_set_pte_ext) 369 xscale_set_pte_ext_prologue 370 371 tst r1, #L_PTE_SHARED @ shared? 372 and r1, r1, #L_PTE_MT_MASK 373 adr ip, cpu_xsc3_mt_table 374 ldr ip, [ip, r1] 375 orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit 376 bic r2, r2, #0x0c @ clear old C,B bits 377 orr r2, r2, ip 378 379 xscale_set_pte_ext_epilogue 380 mov pc, lr 381 382 .ltorg 383 384 .align 385 386 __INIT 387 388 .type __xsc3_setup, #function 389__xsc3_setup: 390 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 391 msr cpsr_c, r0 392 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 393 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 394 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 395 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 396 orr r4, r4, #0x18 @ cache the page table in L2 397 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 398 399 mov r0, #0 @ don't allow CP access 400 mcr p15, 0, r0, c15, c1, 0 @ write CP access register 401 402 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 403 and r0, r0, #2 @ preserve bit P bit setting 404 orr r0, r0, #(1 << 10) @ enable L2 for LLR cache 405 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 406 407 adr r5, xsc3_crval 408 ldmia r5, {r5, r6} 409 mrc p15, 0, r0, c1, c0, 0 @ get control register 410 bic r0, r0, r5 @ ..V. ..R. .... ..A. 411 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 412 @ ...I Z..S .... .... (uc) 413 mov pc, lr 414 415 .size __xsc3_setup, . - __xsc3_setup 416 417 .type xsc3_crval, #object 418xsc3_crval: 419 crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 420 421 __INITDATA 422 423/* 424 * Purpose : Function pointers used to access above functions - all calls 425 * come through these 426 */ 427 428 .type xsc3_processor_functions, #object 429ENTRY(xsc3_processor_functions) 430 .word v5t_early_abort 431 .word pabort_noifar 432 .word cpu_xsc3_proc_init 433 .word cpu_xsc3_proc_fin 434 .word cpu_xsc3_reset 435 .word cpu_xsc3_do_idle 436 .word cpu_xsc3_dcache_clean_area 437 .word cpu_xsc3_switch_mm 438 .word cpu_xsc3_set_pte_ext 439 .size xsc3_processor_functions, . - xsc3_processor_functions 440 441 .section ".rodata" 442 443 .type cpu_arch_name, #object 444cpu_arch_name: 445 .asciz "armv5te" 446 .size cpu_arch_name, . - cpu_arch_name 447 448 .type cpu_elf_name, #object 449cpu_elf_name: 450 .asciz "v5" 451 .size cpu_elf_name, . - cpu_elf_name 452 453 .type cpu_xsc3_name, #object 454cpu_xsc3_name: 455 .asciz "XScale-V3 based processor" 456 .size cpu_xsc3_name, . - cpu_xsc3_name 457 458 .align 459 460 .section ".proc.info.init", #alloc, #execinstr 461 462 .type __xsc3_proc_info,#object 463__xsc3_proc_info: 464 .long 0x69056000 465 .long 0xffffe000 466 .long PMD_TYPE_SECT | \ 467 PMD_SECT_BUFFERABLE | \ 468 PMD_SECT_CACHEABLE | \ 469 PMD_SECT_AP_WRITE | \ 470 PMD_SECT_AP_READ 471 .long PMD_TYPE_SECT | \ 472 PMD_SECT_AP_WRITE | \ 473 PMD_SECT_AP_READ 474 b __xsc3_setup 475 .long cpu_arch_name 476 .long cpu_elf_name 477 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 478 .long cpu_xsc3_name 479 .long xsc3_processor_functions 480 .long v4wbi_tlb_fns 481 .long xsc3_mc_user_fns 482 .long xsc3_cache_fns 483 .size __xsc3_proc_info, . - __xsc3_proc_info 484 485/* Note: PXA935 changed its implementor ID from Intel to Marvell */ 486 487 .type __xsc3_pxa935_proc_info,#object 488__xsc3_pxa935_proc_info: 489 .long 0x56056000 490 .long 0xffffe000 491 .long PMD_TYPE_SECT | \ 492 PMD_SECT_BUFFERABLE | \ 493 PMD_SECT_CACHEABLE | \ 494 PMD_SECT_AP_WRITE | \ 495 PMD_SECT_AP_READ 496 .long PMD_TYPE_SECT | \ 497 PMD_SECT_AP_WRITE | \ 498 PMD_SECT_AP_READ 499 b __xsc3_setup 500 .long cpu_arch_name 501 .long cpu_elf_name 502 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 503 .long cpu_xsc3_name 504 .long xsc3_processor_functions 505 .long v4wbi_tlb_fns 506 .long xsc3_mc_user_fns 507 .long xsc3_cache_fns 508 .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info 509