1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon 4 * 5 * Heavily based on proc-arm926.S 6 * Maintainer: Assaf Hoffman <hoffman@marvell.com> 7 */ 8 9#include <linux/linkage.h> 10#include <linux/init.h> 11#include <linux/pgtable.h> 12#include <asm/assembler.h> 13#include <asm/hwcap.h> 14#include <asm/pgtable-hwdef.h> 15#include <asm/page.h> 16#include <asm/ptrace.h> 17#include "proc-macros.S" 18 19/* 20 * This is the maximum size of an area which will be invalidated 21 * using the single invalidate entry instructions. Anything larger 22 * than this, and we go for the whole cache. 23 * 24 * This value should be chosen such that we choose the cheapest 25 * alternative. 26 */ 27#define CACHE_DLIMIT 16384 28 29/* 30 * the cache line size of the I and D cache 31 */ 32#define CACHE_DLINESIZE 32 33 34 .bss 35 .align 3 36__cache_params_loc: 37 .space 8 38 39 .text 40__cache_params: 41 .word __cache_params_loc 42 43/* 44 * cpu_feroceon_proc_init() 45 */ 46ENTRY(cpu_feroceon_proc_init) 47 mrc p15, 0, r0, c0, c0, 1 @ read cache type register 48 ldr r1, __cache_params 49 mov r2, #(16 << 5) 50 tst r0, #(1 << 16) @ get way 51 mov r0, r0, lsr #18 @ get cache size order 52 movne r3, #((4 - 1) << 30) @ 4-way 53 and r0, r0, #0xf 54 moveq r3, #0 @ 1-way 55 mov r2, r2, lsl r0 @ actual cache size 56 movne r2, r2, lsr #2 @ turned into # of sets 57 sub r2, r2, #(1 << 5) 58 stmia r1, {r2, r3} 59 ret lr 60 61/* 62 * cpu_feroceon_proc_fin() 63 */ 64ENTRY(cpu_feroceon_proc_fin) 65#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 66 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 67 mov r0, #0 68 mcr p15, 1, r0, c15, c9, 0 @ clean L2 69 mcr p15, 0, r0, c7, c10, 4 @ drain WB 70#endif 71 72 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 73 bic r0, r0, #0x1000 @ ...i............ 74 bic r0, r0, #0x000e @ ............wca. 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 76 ret lr 77 78/* 79 * cpu_feroceon_reset(loc) 80 * 81 * Perform a soft reset of the system. Put the CPU into the 82 * same state as it would be if it had been reset, and branch 83 * to what would be the reset vector. 84 * 85 * loc: location to jump to for soft reset 86 */ 87 .align 5 88 .pushsection .idmap.text, "ax" 89ENTRY(cpu_feroceon_reset) 90 mov ip, #0 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 92 mcr p15, 0, ip, c7, c10, 4 @ drain WB 93#ifdef CONFIG_MMU 94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 95#endif 96 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 97 bic ip, ip, #0x000f @ ............wcam 98 bic ip, ip, #0x1100 @ ...i...s........ 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 100 ret r0 101ENDPROC(cpu_feroceon_reset) 102 .popsection 103 104/* 105 * cpu_feroceon_do_idle() 106 * 107 * Called with IRQs disabled 108 */ 109 .align 5 110ENTRY(cpu_feroceon_do_idle) 111 mov r0, #0 112 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 113 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 114 ret lr 115 116/* 117 * flush_icache_all() 118 * 119 * Unconditionally clean and invalidate the entire icache. 120 */ 121ENTRY(feroceon_flush_icache_all) 122 mov r0, #0 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 ret lr 125ENDPROC(feroceon_flush_icache_all) 126 127/* 128 * flush_user_cache_all() 129 * 130 * Clean and invalidate all cache entries in a particular 131 * address space. 132 */ 133 .align 5 134ENTRY(feroceon_flush_user_cache_all) 135 /* FALLTHROUGH */ 136 137/* 138 * flush_kern_cache_all() 139 * 140 * Clean and invalidate the entire cache. 141 */ 142ENTRY(feroceon_flush_kern_cache_all) 143 mov r2, #VM_EXEC 144 145__flush_whole_cache: 146 ldr r1, __cache_params 147 ldmia r1, {r1, r3} 1481: orr ip, r1, r3 1492: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way 150 subs ip, ip, #(1 << 30) @ next way 151 bcs 2b 152 subs r1, r1, #(1 << 5) @ next set 153 bcs 1b 154 155 tst r2, #VM_EXEC 156 mov ip, #0 157 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159 ret lr 160 161/* 162 * flush_user_cache_range(start, end, flags) 163 * 164 * Clean and invalidate a range of cache entries in the 165 * specified address range. 166 * 167 * - start - start address (inclusive) 168 * - end - end address (exclusive) 169 * - flags - vm_flags describing address space 170 */ 171 .align 5 172ENTRY(feroceon_flush_user_cache_range) 173 sub r3, r1, r0 @ calculate total size 174 cmp r3, #CACHE_DLIMIT 175 bgt __flush_whole_cache 1761: tst r2, #VM_EXEC 177 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 178 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 179 add r0, r0, #CACHE_DLINESIZE 180 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 181 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 182 add r0, r0, #CACHE_DLINESIZE 183 cmp r0, r1 184 blo 1b 185 tst r2, #VM_EXEC 186 mov ip, #0 187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 188 ret lr 189 190/* 191 * coherent_kern_range(start, end) 192 * 193 * Ensure coherency between the Icache and the Dcache in the 194 * region described by start, end. If you have non-snooping 195 * Harvard caches, you need to implement this function. 196 * 197 * - start - virtual start address 198 * - end - virtual end address 199 */ 200 .align 5 201ENTRY(feroceon_coherent_kern_range) 202 /* FALLTHROUGH */ 203 204/* 205 * coherent_user_range(start, end) 206 * 207 * Ensure coherency between the Icache and the Dcache in the 208 * region described by start, end. If you have non-snooping 209 * Harvard caches, you need to implement this function. 210 * 211 * - start - virtual start address 212 * - end - virtual end address 213 */ 214ENTRY(feroceon_coherent_user_range) 215 bic r0, r0, #CACHE_DLINESIZE - 1 2161: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 217 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 218 add r0, r0, #CACHE_DLINESIZE 219 cmp r0, r1 220 blo 1b 221 mcr p15, 0, r0, c7, c10, 4 @ drain WB 222 mov r0, #0 223 ret lr 224 225/* 226 * flush_kern_dcache_area(void *addr, size_t size) 227 * 228 * Ensure no D cache aliasing occurs, either with itself or 229 * the I cache 230 * 231 * - addr - kernel address 232 * - size - region size 233 */ 234 .align 5 235ENTRY(feroceon_flush_kern_dcache_area) 236 add r1, r0, r1 2371: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 238 add r0, r0, #CACHE_DLINESIZE 239 cmp r0, r1 240 blo 1b 241 mov r0, #0 242 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 243 mcr p15, 0, r0, c7, c10, 4 @ drain WB 244 ret lr 245 246 .align 5 247ENTRY(feroceon_range_flush_kern_dcache_area) 248 mrs r2, cpsr 249 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 250 orr r3, r2, #PSR_I_BIT 251 msr cpsr_c, r3 @ disable interrupts 252 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start 253 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 254 msr cpsr_c, r2 @ restore interrupts 255 mov r0, #0 256 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 257 mcr p15, 0, r0, c7, c10, 4 @ drain WB 258 ret lr 259 260/* 261 * dma_inv_range(start, end) 262 * 263 * Invalidate (discard) the specified virtual address range. 264 * May not write back any entries. If 'start' or 'end' 265 * are not cache line aligned, those lines must be written 266 * back. 267 * 268 * - start - virtual start address 269 * - end - virtual end address 270 * 271 * (same as v4wb) 272 */ 273 .align 5 274feroceon_dma_inv_range: 275 tst r0, #CACHE_DLINESIZE - 1 276 bic r0, r0, #CACHE_DLINESIZE - 1 277 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 278 tst r1, #CACHE_DLINESIZE - 1 279 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 2801: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 281 add r0, r0, #CACHE_DLINESIZE 282 cmp r0, r1 283 blo 1b 284 mcr p15, 0, r0, c7, c10, 4 @ drain WB 285 ret lr 286 287 .align 5 288feroceon_range_dma_inv_range: 289 mrs r2, cpsr 290 tst r0, #CACHE_DLINESIZE - 1 291 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 292 tst r1, #CACHE_DLINESIZE - 1 293 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 294 cmp r1, r0 295 subne r1, r1, #1 @ top address is inclusive 296 orr r3, r2, #PSR_I_BIT 297 msr cpsr_c, r3 @ disable interrupts 298 mcr p15, 5, r0, c15, c14, 0 @ D inv range start 299 mcr p15, 5, r1, c15, c14, 1 @ D inv range top 300 msr cpsr_c, r2 @ restore interrupts 301 ret lr 302 303/* 304 * dma_clean_range(start, end) 305 * 306 * Clean the specified virtual address range. 307 * 308 * - start - virtual start address 309 * - end - virtual end address 310 * 311 * (same as v4wb) 312 */ 313 .align 5 314feroceon_dma_clean_range: 315 bic r0, r0, #CACHE_DLINESIZE - 1 3161: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 317 add r0, r0, #CACHE_DLINESIZE 318 cmp r0, r1 319 blo 1b 320 mcr p15, 0, r0, c7, c10, 4 @ drain WB 321 ret lr 322 323 .align 5 324feroceon_range_dma_clean_range: 325 mrs r2, cpsr 326 cmp r1, r0 327 subne r1, r1, #1 @ top address is inclusive 328 orr r3, r2, #PSR_I_BIT 329 msr cpsr_c, r3 @ disable interrupts 330 mcr p15, 5, r0, c15, c13, 0 @ D clean range start 331 mcr p15, 5, r1, c15, c13, 1 @ D clean range top 332 msr cpsr_c, r2 @ restore interrupts 333 mcr p15, 0, r0, c7, c10, 4 @ drain WB 334 ret lr 335 336/* 337 * dma_flush_range(start, end) 338 * 339 * Clean and invalidate the specified virtual address range. 340 * 341 * - start - virtual start address 342 * - end - virtual end address 343 */ 344 .align 5 345ENTRY(feroceon_dma_flush_range) 346 bic r0, r0, #CACHE_DLINESIZE - 1 3471: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 348 add r0, r0, #CACHE_DLINESIZE 349 cmp r0, r1 350 blo 1b 351 mcr p15, 0, r0, c7, c10, 4 @ drain WB 352 ret lr 353 354 .align 5 355ENTRY(feroceon_range_dma_flush_range) 356 mrs r2, cpsr 357 cmp r1, r0 358 subne r1, r1, #1 @ top address is inclusive 359 orr r3, r2, #PSR_I_BIT 360 msr cpsr_c, r3 @ disable interrupts 361 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start 362 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 363 msr cpsr_c, r2 @ restore interrupts 364 mcr p15, 0, r0, c7, c10, 4 @ drain WB 365 ret lr 366 367/* 368 * dma_map_area(start, size, dir) 369 * - start - kernel virtual start address 370 * - size - size of region 371 * - dir - DMA direction 372 */ 373ENTRY(feroceon_dma_map_area) 374 add r1, r1, r0 375 cmp r2, #DMA_TO_DEVICE 376 beq feroceon_dma_clean_range 377 bcs feroceon_dma_inv_range 378 b feroceon_dma_flush_range 379ENDPROC(feroceon_dma_map_area) 380 381/* 382 * dma_map_area(start, size, dir) 383 * - start - kernel virtual start address 384 * - size - size of region 385 * - dir - DMA direction 386 */ 387ENTRY(feroceon_range_dma_map_area) 388 add r1, r1, r0 389 cmp r2, #DMA_TO_DEVICE 390 beq feroceon_range_dma_clean_range 391 bcs feroceon_range_dma_inv_range 392 b feroceon_range_dma_flush_range 393ENDPROC(feroceon_range_dma_map_area) 394 395/* 396 * dma_unmap_area(start, size, dir) 397 * - start - kernel virtual start address 398 * - size - size of region 399 * - dir - DMA direction 400 */ 401ENTRY(feroceon_dma_unmap_area) 402 ret lr 403ENDPROC(feroceon_dma_unmap_area) 404 405 .globl feroceon_flush_kern_cache_louis 406 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all 407 408 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 409 define_cache_functions feroceon 410 411.macro range_alias basename 412 .globl feroceon_range_\basename 413 .type feroceon_range_\basename , %function 414 .equ feroceon_range_\basename , feroceon_\basename 415.endm 416 417/* 418 * Most of the cache functions are unchanged for this case. 419 * Export suitable alias symbols for the unchanged functions: 420 */ 421 range_alias flush_icache_all 422 range_alias flush_user_cache_all 423 range_alias flush_kern_cache_all 424 range_alias flush_kern_cache_louis 425 range_alias flush_user_cache_range 426 range_alias coherent_kern_range 427 range_alias coherent_user_range 428 range_alias dma_unmap_area 429 430 define_cache_functions feroceon_range 431 432 .align 5 433ENTRY(cpu_feroceon_dcache_clean_area) 434#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 435 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 436 mov r2, r0 437 mov r3, r1 438#endif 4391: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 440 add r0, r0, #CACHE_DLINESIZE 441 subs r1, r1, #CACHE_DLINESIZE 442 bhi 1b 443#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 444 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 4451: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry 446 add r2, r2, #CACHE_DLINESIZE 447 subs r3, r3, #CACHE_DLINESIZE 448 bhi 1b 449#endif 450 mcr p15, 0, r0, c7, c10, 4 @ drain WB 451 ret lr 452 453/* =============================== PageTable ============================== */ 454 455/* 456 * cpu_feroceon_switch_mm(pgd) 457 * 458 * Set the translation base pointer to be as described by pgd. 459 * 460 * pgd: new page tables 461 */ 462 .align 5 463ENTRY(cpu_feroceon_switch_mm) 464#ifdef CONFIG_MMU 465 /* 466 * Note: we wish to call __flush_whole_cache but we need to preserve 467 * lr to do so. The only way without touching main memory is to 468 * use r2 which is normally used to test the VM_EXEC flag, and 469 * compensate locally for the skipped ops if it is not set. 470 */ 471 mov r2, lr @ abuse r2 to preserve lr 472 bl __flush_whole_cache 473 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already 474 tst r2, #VM_EXEC 475 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache 476 mcreq p15, 0, ip, c7, c10, 4 @ drain WB 477 478 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 479 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 480 ret r2 481#else 482 ret lr 483#endif 484 485/* 486 * cpu_feroceon_set_pte_ext(ptep, pte, ext) 487 * 488 * Set a PTE and flush it out 489 */ 490 .align 5 491ENTRY(cpu_feroceon_set_pte_ext) 492#ifdef CONFIG_MMU 493 armv3_set_pte_ext wc_disable=0 494 mov r0, r0 495 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 496#if defined(CONFIG_CACHE_FEROCEON_L2) && \ 497 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 498 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry 499#endif 500 mcr p15, 0, r0, c7, c10, 4 @ drain WB 501#endif 502 ret lr 503 504/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 505.globl cpu_feroceon_suspend_size 506.equ cpu_feroceon_suspend_size, 4 * 3 507#ifdef CONFIG_ARM_CPU_SUSPEND 508ENTRY(cpu_feroceon_do_suspend) 509 stmfd sp!, {r4 - r6, lr} 510 mrc p15, 0, r4, c13, c0, 0 @ PID 511 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 512 mrc p15, 0, r6, c1, c0, 0 @ Control register 513 stmia r0, {r4 - r6} 514 ldmfd sp!, {r4 - r6, pc} 515ENDPROC(cpu_feroceon_do_suspend) 516 517ENTRY(cpu_feroceon_do_resume) 518 mov ip, #0 519 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 520 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches 521 ldmia r0, {r4 - r6} 522 mcr p15, 0, r4, c13, c0, 0 @ PID 523 mcr p15, 0, r5, c3, c0, 0 @ Domain ID 524 mcr p15, 0, r1, c2, c0, 0 @ TTB address 525 mov r0, r6 @ control register 526 b cpu_resume_mmu 527ENDPROC(cpu_feroceon_do_resume) 528#endif 529 530 .type __feroceon_setup, #function 531__feroceon_setup: 532 mov r0, #0 533 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 534 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 535#ifdef CONFIG_MMU 536 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 537#endif 538 539 adr r5, feroceon_crval 540 ldmia r5, {r5, r6} 541 mrc p15, 0, r0, c1, c0 @ get control register v4 542 bic r0, r0, r5 543 orr r0, r0, r6 544 ret lr 545 .size __feroceon_setup, . - __feroceon_setup 546 547 /* 548 * B 549 * R P 550 * .RVI UFRS BLDP WCAM 551 * .011 .001 ..11 0101 552 * 553 */ 554 .type feroceon_crval, #object 555feroceon_crval: 556 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134 557 558 __INITDATA 559 560 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 561 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort 562 563 .section ".rodata" 564 565 string cpu_arch_name, "armv5te" 566 string cpu_elf_name, "v5" 567 string cpu_feroceon_name, "Feroceon" 568 string cpu_88fr531_name, "Feroceon 88FR531-vd" 569 string cpu_88fr571_name, "Feroceon 88FR571-vd" 570 string cpu_88fr131_name, "Feroceon 88FR131" 571 572 .align 573 574 .section ".proc.info.init", "a" 575 576.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req 577 .type __\name\()_proc_info,#object 578__\name\()_proc_info: 579 .long \cpu_val 580 .long \cpu_mask 581 .long PMD_TYPE_SECT | \ 582 PMD_SECT_BUFFERABLE | \ 583 PMD_SECT_CACHEABLE | \ 584 PMD_BIT4 | \ 585 PMD_SECT_AP_WRITE | \ 586 PMD_SECT_AP_READ 587 .long PMD_TYPE_SECT | \ 588 PMD_BIT4 | \ 589 PMD_SECT_AP_WRITE | \ 590 PMD_SECT_AP_READ 591 initfn __feroceon_setup, __\name\()_proc_info 592 .long cpu_arch_name 593 .long cpu_elf_name 594 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 595 .long \cpu_name 596 .long feroceon_processor_functions 597 .long v4wbi_tlb_fns 598 .long feroceon_user_fns 599 .long \cache 600 .size __\name\()_proc_info, . - __\name\()_proc_info 601.endm 602 603#ifdef CONFIG_CPU_FEROCEON_OLD_ID 604 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ 605 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns 606#endif 607 608 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ 609 cache=feroceon_cache_fns 610 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ 611 cache=feroceon_range_cache_fns 612 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ 613 cache=feroceon_range_cache_fns 614