1/* 2 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core 3 * 4 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core. 5 * 6 * Heavily based on proc-arm926.S and proc-xsc3.S 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable-hwdef.h> 28#include <asm/pgtable.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the 35 * area is larger than this, then we flush the whole cache. 36 */ 37#define CACHE_DLIMIT 32768 38 39/* 40 * The cache line size of the L1 D cache. 41 */ 42#define CACHE_DLINESIZE 32 43 44/* 45 * cpu_mohawk_proc_init() 46 */ 47ENTRY(cpu_mohawk_proc_init) 48 mov pc, lr 49 50/* 51 * cpu_mohawk_proc_fin() 52 */ 53ENTRY(cpu_mohawk_proc_fin) 54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 55 bic r0, r0, #0x1800 @ ...iz........... 56 bic r0, r0, #0x0006 @ .............ca. 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 58 mov pc, lr 59 60/* 61 * cpu_mohawk_reset(loc) 62 * 63 * Perform a soft reset of the system. Put the CPU into the 64 * same state as it would be if it had been reset, and branch 65 * to what would be the reset vector. 66 * 67 * loc: location to jump to for soft reset 68 * 69 * (same as arm926) 70 */ 71 .align 5 72 .pushsection .idmap.text, "ax" 73ENTRY(cpu_mohawk_reset) 74 mov ip, #0 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 76 mcr p15, 0, ip, c7, c10, 4 @ drain WB 77 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 78 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 79 bic ip, ip, #0x0007 @ .............cam 80 bic ip, ip, #0x1100 @ ...i...s........ 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 82 mov pc, r0 83ENDPROC(cpu_mohawk_reset) 84 .popsection 85 86/* 87 * cpu_mohawk_do_idle() 88 * 89 * Called with IRQs disabled 90 */ 91 .align 5 92ENTRY(cpu_mohawk_do_idle) 93 mov r0, #0 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 96 mov pc, lr 97 98/* 99 * flush_icache_all() 100 * 101 * Unconditionally clean and invalidate the entire icache. 102 */ 103ENTRY(mohawk_flush_icache_all) 104 mov r0, #0 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 106 mov pc, lr 107ENDPROC(mohawk_flush_icache_all) 108 109/* 110 * flush_user_cache_all() 111 * 112 * Clean and invalidate all cache entries in a particular 113 * address space. 114 */ 115ENTRY(mohawk_flush_user_cache_all) 116 /* FALLTHROUGH */ 117 118/* 119 * flush_kern_cache_all() 120 * 121 * Clean and invalidate the entire cache. 122 */ 123ENTRY(mohawk_flush_kern_cache_all) 124 mov r2, #VM_EXEC 125 mov ip, #0 126__flush_whole_cache: 127 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 128 tst r2, #VM_EXEC 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 131 mov pc, lr 132 133/* 134 * flush_user_cache_range(start, end, flags) 135 * 136 * Clean and invalidate a range of cache entries in the 137 * specified address range. 138 * 139 * - start - start address (inclusive) 140 * - end - end address (exclusive) 141 * - flags - vm_flags describing address space 142 * 143 * (same as arm926) 144 */ 145ENTRY(mohawk_flush_user_cache_range) 146 mov ip, #0 147 sub r3, r1, r0 @ calculate total size 148 cmp r3, #CACHE_DLIMIT 149 bgt __flush_whole_cache 1501: tst r2, #VM_EXEC 151 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 152 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 153 add r0, r0, #CACHE_DLINESIZE 154 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 155 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 156 add r0, r0, #CACHE_DLINESIZE 157 cmp r0, r1 158 blo 1b 159 tst r2, #VM_EXEC 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 161 mov pc, lr 162 163/* 164 * coherent_kern_range(start, end) 165 * 166 * Ensure coherency between the Icache and the Dcache in the 167 * region described by start, end. If you have non-snooping 168 * Harvard caches, you need to implement this function. 169 * 170 * - start - virtual start address 171 * - end - virtual end address 172 */ 173ENTRY(mohawk_coherent_kern_range) 174 /* FALLTHROUGH */ 175 176/* 177 * coherent_user_range(start, end) 178 * 179 * Ensure coherency between the Icache and the Dcache in the 180 * region described by start, end. If you have non-snooping 181 * Harvard caches, you need to implement this function. 182 * 183 * - start - virtual start address 184 * - end - virtual end address 185 * 186 * (same as arm926) 187 */ 188ENTRY(mohawk_coherent_user_range) 189 bic r0, r0, #CACHE_DLINESIZE - 1 1901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 191 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 192 add r0, r0, #CACHE_DLINESIZE 193 cmp r0, r1 194 blo 1b 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 196 mov r0, #0 197 mov pc, lr 198 199/* 200 * flush_kern_dcache_area(void *addr, size_t size) 201 * 202 * Ensure no D cache aliasing occurs, either with itself or 203 * the I cache 204 * 205 * - addr - kernel address 206 * - size - region size 207 */ 208ENTRY(mohawk_flush_kern_dcache_area) 209 add r1, r0, r1 2101: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 211 add r0, r0, #CACHE_DLINESIZE 212 cmp r0, r1 213 blo 1b 214 mov r0, #0 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 216 mcr p15, 0, r0, c7, c10, 4 @ drain WB 217 mov pc, lr 218 219/* 220 * dma_inv_range(start, end) 221 * 222 * Invalidate (discard) the specified virtual address range. 223 * May not write back any entries. If 'start' or 'end' 224 * are not cache line aligned, those lines must be written 225 * back. 226 * 227 * - start - virtual start address 228 * - end - virtual end address 229 * 230 * (same as v4wb) 231 */ 232mohawk_dma_inv_range: 233 tst r0, #CACHE_DLINESIZE - 1 234 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 235 tst r1, #CACHE_DLINESIZE - 1 236 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 237 bic r0, r0, #CACHE_DLINESIZE - 1 2381: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 239 add r0, r0, #CACHE_DLINESIZE 240 cmp r0, r1 241 blo 1b 242 mcr p15, 0, r0, c7, c10, 4 @ drain WB 243 mov pc, lr 244 245/* 246 * dma_clean_range(start, end) 247 * 248 * Clean the specified virtual address range. 249 * 250 * - start - virtual start address 251 * - end - virtual end address 252 * 253 * (same as v4wb) 254 */ 255mohawk_dma_clean_range: 256 bic r0, r0, #CACHE_DLINESIZE - 1 2571: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 258 add r0, r0, #CACHE_DLINESIZE 259 cmp r0, r1 260 blo 1b 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 mov pc, lr 263 264/* 265 * dma_flush_range(start, end) 266 * 267 * Clean and invalidate the specified virtual address range. 268 * 269 * - start - virtual start address 270 * - end - virtual end address 271 */ 272ENTRY(mohawk_dma_flush_range) 273 bic r0, r0, #CACHE_DLINESIZE - 1 2741: 275 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 276 add r0, r0, #CACHE_DLINESIZE 277 cmp r0, r1 278 blo 1b 279 mcr p15, 0, r0, c7, c10, 4 @ drain WB 280 mov pc, lr 281 282/* 283 * dma_map_area(start, size, dir) 284 * - start - kernel virtual start address 285 * - size - size of region 286 * - dir - DMA direction 287 */ 288ENTRY(mohawk_dma_map_area) 289 add r1, r1, r0 290 cmp r2, #DMA_TO_DEVICE 291 beq mohawk_dma_clean_range 292 bcs mohawk_dma_inv_range 293 b mohawk_dma_flush_range 294ENDPROC(mohawk_dma_map_area) 295 296/* 297 * dma_unmap_area(start, size, dir) 298 * - start - kernel virtual start address 299 * - size - size of region 300 * - dir - DMA direction 301 */ 302ENTRY(mohawk_dma_unmap_area) 303 mov pc, lr 304ENDPROC(mohawk_dma_unmap_area) 305 306 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 307 define_cache_functions mohawk 308 309ENTRY(cpu_mohawk_dcache_clean_area) 3101: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 311 add r0, r0, #CACHE_DLINESIZE 312 subs r1, r1, #CACHE_DLINESIZE 313 bhi 1b 314 mcr p15, 0, r0, c7, c10, 4 @ drain WB 315 mov pc, lr 316 317/* 318 * cpu_mohawk_switch_mm(pgd) 319 * 320 * Set the translation base pointer to be as described by pgd. 321 * 322 * pgd: new page tables 323 */ 324 .align 5 325ENTRY(cpu_mohawk_switch_mm) 326 mov ip, #0 327 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 328 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 329 mcr p15, 0, ip, c7, c10, 4 @ drain WB 330 orr r0, r0, #0x18 @ cache the page table in L2 331 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 332 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 333 mov pc, lr 334 335/* 336 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 337 * 338 * Set a PTE and flush it out 339 */ 340 .align 5 341ENTRY(cpu_mohawk_set_pte_ext) 342 armv3_set_pte_ext 343 mov r0, r0 344 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 345 mcr p15, 0, r0, c7, c10, 4 @ drain WB 346 mov pc, lr 347 348.globl cpu_mohawk_suspend_size 349.equ cpu_mohawk_suspend_size, 4 * 6 350#ifdef CONFIG_PM_SLEEP 351ENTRY(cpu_mohawk_do_suspend) 352 stmfd sp!, {r4 - r9, lr} 353 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 354 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 355 mrc p15, 0, r6, c13, c0, 0 @ PID 356 mrc p15, 0, r7, c3, c0, 0 @ domain ID 357 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg 358 mrc p15, 0, r9, c1, c0, 0 @ control reg 359 bic r4, r4, #2 @ clear frequency change bit 360 stmia r0, {r4 - r9} @ store cp regs 361 ldmia sp!, {r4 - r9, pc} 362ENDPROC(cpu_mohawk_do_suspend) 363 364ENTRY(cpu_mohawk_do_resume) 365 ldmia r0, {r4 - r9} @ load cp regs 366 mov ip, #0 367 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 368 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 369 mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer 370 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 371 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 372 mcr p15, 0, r5, c15, c1, 0 @ CP access reg 373 mcr p15, 0, r6, c13, c0, 0 @ PID 374 mcr p15, 0, r7, c3, c0, 0 @ domain ID 375 orr r1, r1, #0x18 @ cache the page table in L2 376 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 377 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 378 mov r0, r9 @ control register 379 b cpu_resume_mmu 380ENDPROC(cpu_mohawk_do_resume) 381#endif 382 383 __CPUINIT 384 385 .type __mohawk_setup, #function 386__mohawk_setup: 387 mov r0, #0 388 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches 389 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 390 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs 391 orr r4, r4, #0x18 @ cache the page table in L2 392 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 393 394 mov r0, #0 @ don't allow CP access 395 mcr p15, 0, r0, c15, c1, 0 @ write CP access register 396 397 adr r5, mohawk_crval 398 ldmia r5, {r5, r6} 399 mrc p15, 0, r0, c1, c0 @ get control register 400 bic r0, r0, r5 401 orr r0, r0, r6 402 mov pc, lr 403 404 .size __mohawk_setup, . - __mohawk_setup 405 406 /* 407 * R 408 * .RVI ZFRS BLDP WCAM 409 * .011 1001 ..00 0101 410 * 411 */ 412 .type mohawk_crval, #object 413mohawk_crval: 414 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134 415 416 __INITDATA 417 418 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 419 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort 420 421 .section ".rodata" 422 423 string cpu_arch_name, "armv5te" 424 string cpu_elf_name, "v5" 425 string cpu_mohawk_name, "Marvell 88SV331x" 426 427 .align 428 429 .section ".proc.info.init", #alloc, #execinstr 430 431 .type __88sv331x_proc_info,#object 432__88sv331x_proc_info: 433 .long 0x56158000 @ Marvell 88SV331x (MOHAWK) 434 .long 0xfffff000 435 .long PMD_TYPE_SECT | \ 436 PMD_SECT_BUFFERABLE | \ 437 PMD_SECT_CACHEABLE | \ 438 PMD_BIT4 | \ 439 PMD_SECT_AP_WRITE | \ 440 PMD_SECT_AP_READ 441 .long PMD_TYPE_SECT | \ 442 PMD_BIT4 | \ 443 PMD_SECT_AP_WRITE | \ 444 PMD_SECT_AP_READ 445 b __mohawk_setup 446 .long cpu_arch_name 447 .long cpu_elf_name 448 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 449 .long cpu_mohawk_name 450 .long mohawk_processor_functions 451 .long v4wbi_tlb_fns 452 .long v4wb_user_fns 453 .long mohawk_cache_fns 454 .size __88sv331x_proc_info, . - __88sv331x_proc_info 455