1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 223bdf86aSLennert Buytenhek/* 323bdf86aSLennert Buytenhek * linux/arch/arm/mm/proc-xsc3.S 423bdf86aSLennert Buytenhek * 523bdf86aSLennert Buytenhek * Original Author: Matthew Gilbert 657fee39fSLennert Buytenhek * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> 723bdf86aSLennert Buytenhek * 823bdf86aSLennert Buytenhek * Copyright 2004 (C) Intel Corp. 9850b4293SLennert Buytenhek * Copyright 2005 (C) MontaVista Software, Inc. 1023bdf86aSLennert Buytenhek * 11850b4293SLennert Buytenhek * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is 12850b4293SLennert Buytenhek * an extension to Intel's original XScale core that adds the following 1323bdf86aSLennert Buytenhek * features: 1423bdf86aSLennert Buytenhek * 1523bdf86aSLennert Buytenhek * - ARMv6 Supersections 1623bdf86aSLennert Buytenhek * - Low Locality Reference pages (replaces mini-cache) 1723bdf86aSLennert Buytenhek * - 36-bit addressing 1823bdf86aSLennert Buytenhek * - L2 cache 19850b4293SLennert Buytenhek * - Cache coherency if chipset supports it 2023bdf86aSLennert Buytenhek * 21850b4293SLennert Buytenhek * Based on original XScale code by Nicolas Pitre. 2223bdf86aSLennert Buytenhek */ 2323bdf86aSLennert Buytenhek 2423bdf86aSLennert Buytenhek#include <linux/linkage.h> 2523bdf86aSLennert Buytenhek#include <linux/init.h> 26*65fddcfcSMike Rapoport#include <linux/pgtable.h> 2723bdf86aSLennert Buytenhek#include <asm/assembler.h> 285ec9407dSRussell King#include <asm/hwcap.h> 29b48340afSLennert Buytenhek#include <asm/pgtable-hwdef.h> 3023bdf86aSLennert Buytenhek#include <asm/page.h> 3123bdf86aSLennert Buytenhek#include <asm/ptrace.h> 3223bdf86aSLennert Buytenhek#include "proc-macros.S" 3323bdf86aSLennert Buytenhek 3423bdf86aSLennert Buytenhek/* 3523bdf86aSLennert Buytenhek * This is the maximum size of an area which will be flushed. If the 3623bdf86aSLennert Buytenhek * area is larger than this, then we flush the whole cache. 3723bdf86aSLennert Buytenhek */ 3823bdf86aSLennert Buytenhek#define MAX_AREA_SIZE 32768 3923bdf86aSLennert Buytenhek 4023bdf86aSLennert Buytenhek/* 41850b4293SLennert Buytenhek * The cache line size of the L1 I, L1 D and unified L2 cache. 4223bdf86aSLennert Buytenhek */ 4323bdf86aSLennert Buytenhek#define CACHELINESIZE 32 4423bdf86aSLennert Buytenhek 4523bdf86aSLennert Buytenhek/* 46850b4293SLennert Buytenhek * The size of the L1 D cache. 4723bdf86aSLennert Buytenhek */ 4823bdf86aSLennert Buytenhek#define CACHESIZE 32768 4923bdf86aSLennert Buytenhek 5023bdf86aSLennert Buytenhek/* 51850b4293SLennert Buytenhek * This macro is used to wait for a CP15 write and is needed when we 52850b4293SLennert Buytenhek * have to ensure that the last operation to the coprocessor was 53850b4293SLennert Buytenhek * completed before continuing with operation. 5423bdf86aSLennert Buytenhek */ 5523bdf86aSLennert Buytenhek .macro cpwait_ret, lr, rd 5623bdf86aSLennert Buytenhek mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 5723bdf86aSLennert Buytenhek sub pc, \lr, \rd, LSR #32 @ wait for completion and 5823bdf86aSLennert Buytenhek @ flush instruction pipeline 5923bdf86aSLennert Buytenhek .endm 6023bdf86aSLennert Buytenhek 6123bdf86aSLennert Buytenhek/* 62850b4293SLennert Buytenhek * This macro cleans and invalidates the entire L1 D cache. 6323bdf86aSLennert Buytenhek */ 6423bdf86aSLennert Buytenhek 6523bdf86aSLennert Buytenhek .macro clean_d_cache rd, rs 6623bdf86aSLennert Buytenhek mov \rd, #0x1f00 6723bdf86aSLennert Buytenhek orr \rd, \rd, #0x00e0 68850b4293SLennert Buytenhek1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line 6923bdf86aSLennert Buytenhek adds \rd, \rd, #0x40000000 7023bdf86aSLennert Buytenhek bcc 1b 7123bdf86aSLennert Buytenhek subs \rd, \rd, #0x20 7223bdf86aSLennert Buytenhek bpl 1b 7323bdf86aSLennert Buytenhek .endm 7423bdf86aSLennert Buytenhek 7523bdf86aSLennert Buytenhek .text 7623bdf86aSLennert Buytenhek 7723bdf86aSLennert Buytenhek/* 7823bdf86aSLennert Buytenhek * cpu_xsc3_proc_init() 7923bdf86aSLennert Buytenhek * 8023bdf86aSLennert Buytenhek * Nothing too exciting at the moment 8123bdf86aSLennert Buytenhek */ 8223bdf86aSLennert BuytenhekENTRY(cpu_xsc3_proc_init) 836ebbf2ceSRussell King ret lr 8423bdf86aSLennert Buytenhek 8523bdf86aSLennert Buytenhek/* 8623bdf86aSLennert Buytenhek * cpu_xsc3_proc_fin() 8723bdf86aSLennert Buytenhek */ 8823bdf86aSLennert BuytenhekENTRY(cpu_xsc3_proc_fin) 8923bdf86aSLennert Buytenhek mrc p15, 0, r0, c1, c0, 0 @ ctrl register 9023bdf86aSLennert Buytenhek bic r0, r0, #0x1800 @ ...IZ........... 9123bdf86aSLennert Buytenhek bic r0, r0, #0x0006 @ .............CA. 9223bdf86aSLennert Buytenhek mcr p15, 0, r0, c1, c0, 0 @ disable caches 936ebbf2ceSRussell King ret lr 9423bdf86aSLennert Buytenhek 9523bdf86aSLennert Buytenhek/* 9623bdf86aSLennert Buytenhek * cpu_xsc3_reset(loc) 9723bdf86aSLennert Buytenhek * 9823bdf86aSLennert Buytenhek * Perform a soft reset of the system. Put the CPU into the 9923bdf86aSLennert Buytenhek * same state as it would be if it had been reset, and branch 10023bdf86aSLennert Buytenhek * to what would be the reset vector. 10123bdf86aSLennert Buytenhek * 10223bdf86aSLennert Buytenhek * loc: location to jump to for soft reset 10323bdf86aSLennert Buytenhek */ 10423bdf86aSLennert Buytenhek .align 5 1051a4baafaSWill Deacon .pushsection .idmap.text, "ax" 10623bdf86aSLennert BuytenhekENTRY(cpu_xsc3_reset) 10723bdf86aSLennert Buytenhek mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 10823bdf86aSLennert Buytenhek msr cpsr_c, r1 @ reset CPSR 10923bdf86aSLennert Buytenhek mrc p15, 0, r1, c1, c0, 0 @ ctrl register 11023bdf86aSLennert Buytenhek bic r1, r1, #0x3900 @ ..VIZ..S........ 111850b4293SLennert Buytenhek bic r1, r1, #0x0086 @ ........B....CA. 11223bdf86aSLennert Buytenhek mcr p15, 0, r1, c1, c0, 0 @ ctrl register 113850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 11423bdf86aSLennert Buytenhek bic r1, r1, #0x0001 @ ...............M 11523bdf86aSLennert Buytenhek mcr p15, 0, r1, c1, c0, 0 @ ctrl register 11623bdf86aSLennert Buytenhek @ CAUTION: MMU turned off from this point. We count on the pipeline 11723bdf86aSLennert Buytenhek @ already containing those two last instructions to survive. 118850b4293SLennert Buytenhek mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 1196ebbf2ceSRussell King ret r0 1201a4baafaSWill DeaconENDPROC(cpu_xsc3_reset) 1211a4baafaSWill Deacon .popsection 12223bdf86aSLennert Buytenhek 12323bdf86aSLennert Buytenhek/* 12423bdf86aSLennert Buytenhek * cpu_xsc3_do_idle() 12523bdf86aSLennert Buytenhek * 12623bdf86aSLennert Buytenhek * Cause the processor to idle 12723bdf86aSLennert Buytenhek * 12823bdf86aSLennert Buytenhek * For now we do nothing but go to idle mode for every case 12923bdf86aSLennert Buytenhek * 13023bdf86aSLennert Buytenhek * XScale supports clock switching, but using idle mode support 13123bdf86aSLennert Buytenhek * allows external hardware to react to system state changes. 13223bdf86aSLennert Buytenhek */ 13323bdf86aSLennert Buytenhek .align 5 13423bdf86aSLennert Buytenhek 13523bdf86aSLennert BuytenhekENTRY(cpu_xsc3_do_idle) 13623bdf86aSLennert Buytenhek mov r0, #1 137850b4293SLennert Buytenhek mcr p14, 0, r0, c7, c0, 0 @ go to idle 1386ebbf2ceSRussell King ret lr 13923bdf86aSLennert Buytenhek 14023bdf86aSLennert Buytenhek/* ================================= CACHE ================================ */ 14123bdf86aSLennert Buytenhek 14223bdf86aSLennert Buytenhek/* 143c8c90860SMika Westerberg * flush_icache_all() 144c8c90860SMika Westerberg * 145c8c90860SMika Westerberg * Unconditionally clean and invalidate the entire icache. 146c8c90860SMika Westerberg */ 147c8c90860SMika WesterbergENTRY(xsc3_flush_icache_all) 148c8c90860SMika Westerberg mov r0, #0 149c8c90860SMika Westerberg mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 1506ebbf2ceSRussell King ret lr 151c8c90860SMika WesterbergENDPROC(xsc3_flush_icache_all) 152c8c90860SMika Westerberg 153c8c90860SMika Westerberg/* 15423bdf86aSLennert Buytenhek * flush_user_cache_all() 15523bdf86aSLennert Buytenhek * 15623bdf86aSLennert Buytenhek * Invalidate all cache entries in a particular address 15723bdf86aSLennert Buytenhek * space. 15823bdf86aSLennert Buytenhek */ 15923bdf86aSLennert BuytenhekENTRY(xsc3_flush_user_cache_all) 16023bdf86aSLennert Buytenhek /* FALLTHROUGH */ 16123bdf86aSLennert Buytenhek 16223bdf86aSLennert Buytenhek/* 16323bdf86aSLennert Buytenhek * flush_kern_cache_all() 16423bdf86aSLennert Buytenhek * 16523bdf86aSLennert Buytenhek * Clean and invalidate the entire cache. 16623bdf86aSLennert Buytenhek */ 16723bdf86aSLennert BuytenhekENTRY(xsc3_flush_kern_cache_all) 16823bdf86aSLennert Buytenhek mov r2, #VM_EXEC 16923bdf86aSLennert Buytenhek mov ip, #0 17023bdf86aSLennert Buytenhek__flush_whole_cache: 17123bdf86aSLennert Buytenhek clean_d_cache r0, r1 17223bdf86aSLennert Buytenhek tst r2, #VM_EXEC 173850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 174850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 175850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 1766ebbf2ceSRussell King ret lr 17723bdf86aSLennert Buytenhek 17823bdf86aSLennert Buytenhek/* 17923bdf86aSLennert Buytenhek * flush_user_cache_range(start, end, vm_flags) 18023bdf86aSLennert Buytenhek * 18123bdf86aSLennert Buytenhek * Invalidate a range of cache entries in the specified 18223bdf86aSLennert Buytenhek * address space. 18323bdf86aSLennert Buytenhek * 18423bdf86aSLennert Buytenhek * - start - start address (may not be aligned) 18523bdf86aSLennert Buytenhek * - end - end address (exclusive, may not be aligned) 18623bdf86aSLennert Buytenhek * - vma - vma_area_struct describing address space 18723bdf86aSLennert Buytenhek */ 18823bdf86aSLennert Buytenhek .align 5 18923bdf86aSLennert BuytenhekENTRY(xsc3_flush_user_cache_range) 19023bdf86aSLennert Buytenhek mov ip, #0 19123bdf86aSLennert Buytenhek sub r3, r1, r0 @ calculate total size 19223bdf86aSLennert Buytenhek cmp r3, #MAX_AREA_SIZE 19323bdf86aSLennert Buytenhek bhs __flush_whole_cache 19423bdf86aSLennert Buytenhek 19523bdf86aSLennert Buytenhek1: tst r2, #VM_EXEC 196850b4293SLennert Buytenhek mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line 197850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 19823bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 19923bdf86aSLennert Buytenhek cmp r0, r1 20023bdf86aSLennert Buytenhek blo 1b 20123bdf86aSLennert Buytenhek tst r2, #VM_EXEC 202850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 203850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 204850b4293SLennert Buytenhek mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 2056ebbf2ceSRussell King ret lr 20623bdf86aSLennert Buytenhek 20723bdf86aSLennert Buytenhek/* 20823bdf86aSLennert Buytenhek * coherent_kern_range(start, end) 20923bdf86aSLennert Buytenhek * 21023bdf86aSLennert Buytenhek * Ensure coherency between the I cache and the D cache in the 21123bdf86aSLennert Buytenhek * region described by start. If you have non-snooping 21223bdf86aSLennert Buytenhek * Harvard caches, you need to implement this function. 21323bdf86aSLennert Buytenhek * 21423bdf86aSLennert Buytenhek * - start - virtual start address 21523bdf86aSLennert Buytenhek * - end - virtual end address 21623bdf86aSLennert Buytenhek * 21723bdf86aSLennert Buytenhek * Note: single I-cache line invalidation isn't used here since 21823bdf86aSLennert Buytenhek * it also trashes the mini I-cache used by JTAG debuggers. 21923bdf86aSLennert Buytenhek */ 22023bdf86aSLennert BuytenhekENTRY(xsc3_coherent_kern_range) 22123bdf86aSLennert Buytenhek/* FALLTHROUGH */ 22223bdf86aSLennert BuytenhekENTRY(xsc3_coherent_user_range) 22323bdf86aSLennert Buytenhek bic r0, r0, #CACHELINESIZE - 1 224850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 22523bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 22623bdf86aSLennert Buytenhek cmp r0, r1 22723bdf86aSLennert Buytenhek blo 1b 22823bdf86aSLennert Buytenhek mov r0, #0 229850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 230850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c10, 4 @ data write barrier 231850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 2326ebbf2ceSRussell King ret lr 23323bdf86aSLennert Buytenhek 23423bdf86aSLennert Buytenhek/* 2352c9b9c84SRussell King * flush_kern_dcache_area(void *addr, size_t size) 23623bdf86aSLennert Buytenhek * 23723bdf86aSLennert Buytenhek * Ensure no D cache aliasing occurs, either with itself or 238850b4293SLennert Buytenhek * the I cache. 23923bdf86aSLennert Buytenhek * 2402c9b9c84SRussell King * - addr - kernel address 2412c9b9c84SRussell King * - size - region size 24223bdf86aSLennert Buytenhek */ 2432c9b9c84SRussell KingENTRY(xsc3_flush_kern_dcache_area) 2442c9b9c84SRussell King add r1, r0, r1 245850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 24623bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 24723bdf86aSLennert Buytenhek cmp r0, r1 24823bdf86aSLennert Buytenhek blo 1b 24923bdf86aSLennert Buytenhek mov r0, #0 250850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 251850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c10, 4 @ data write barrier 252850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 2536ebbf2ceSRussell King ret lr 25423bdf86aSLennert Buytenhek 25523bdf86aSLennert Buytenhek/* 25623bdf86aSLennert Buytenhek * dma_inv_range(start, end) 25723bdf86aSLennert Buytenhek * 25823bdf86aSLennert Buytenhek * Invalidate (discard) the specified virtual address range. 25923bdf86aSLennert Buytenhek * May not write back any entries. If 'start' or 'end' 26023bdf86aSLennert Buytenhek * are not cache line aligned, those lines must be written 26123bdf86aSLennert Buytenhek * back. 26223bdf86aSLennert Buytenhek * 26323bdf86aSLennert Buytenhek * - start - virtual start address 26423bdf86aSLennert Buytenhek * - end - virtual end address 26523bdf86aSLennert Buytenhek */ 266702b94bfSRussell Kingxsc3_dma_inv_range: 26723bdf86aSLennert Buytenhek tst r0, #CACHELINESIZE - 1 26823bdf86aSLennert Buytenhek bic r0, r0, #CACHELINESIZE - 1 269850b4293SLennert Buytenhek mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line 27023bdf86aSLennert Buytenhek tst r1, #CACHELINESIZE - 1 271850b4293SLennert Buytenhek mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 272850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line 27323bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 27423bdf86aSLennert Buytenhek cmp r0, r1 27523bdf86aSLennert Buytenhek blo 1b 276850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c10, 4 @ data write barrier 2776ebbf2ceSRussell King ret lr 27823bdf86aSLennert Buytenhek 27923bdf86aSLennert Buytenhek/* 28023bdf86aSLennert Buytenhek * dma_clean_range(start, end) 28123bdf86aSLennert Buytenhek * 28223bdf86aSLennert Buytenhek * Clean the specified virtual address range. 28323bdf86aSLennert Buytenhek * 28423bdf86aSLennert Buytenhek * - start - virtual start address 28523bdf86aSLennert Buytenhek * - end - virtual end address 28623bdf86aSLennert Buytenhek */ 287702b94bfSRussell Kingxsc3_dma_clean_range: 28823bdf86aSLennert Buytenhek bic r0, r0, #CACHELINESIZE - 1 289850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 29023bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 29123bdf86aSLennert Buytenhek cmp r0, r1 29223bdf86aSLennert Buytenhek blo 1b 293850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c10, 4 @ data write barrier 2946ebbf2ceSRussell King ret lr 29523bdf86aSLennert Buytenhek 29623bdf86aSLennert Buytenhek/* 29723bdf86aSLennert Buytenhek * dma_flush_range(start, end) 29823bdf86aSLennert Buytenhek * 29923bdf86aSLennert Buytenhek * Clean and invalidate the specified virtual address range. 30023bdf86aSLennert Buytenhek * 30123bdf86aSLennert Buytenhek * - start - virtual start address 30223bdf86aSLennert Buytenhek * - end - virtual end address 30323bdf86aSLennert Buytenhek */ 30423bdf86aSLennert BuytenhekENTRY(xsc3_dma_flush_range) 30523bdf86aSLennert Buytenhek bic r0, r0, #CACHELINESIZE - 1 306850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 30723bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 30823bdf86aSLennert Buytenhek cmp r0, r1 30923bdf86aSLennert Buytenhek blo 1b 310850b4293SLennert Buytenhek mcr p15, 0, r0, c7, c10, 4 @ data write barrier 3116ebbf2ceSRussell King ret lr 31223bdf86aSLennert Buytenhek 313a9c9147eSRussell King/* 314a9c9147eSRussell King * dma_map_area(start, size, dir) 315a9c9147eSRussell King * - start - kernel virtual start address 316a9c9147eSRussell King * - size - size of region 317a9c9147eSRussell King * - dir - DMA direction 318a9c9147eSRussell King */ 319a9c9147eSRussell KingENTRY(xsc3_dma_map_area) 320a9c9147eSRussell King add r1, r1, r0 321a9c9147eSRussell King cmp r2, #DMA_TO_DEVICE 322a9c9147eSRussell King beq xsc3_dma_clean_range 323a9c9147eSRussell King bcs xsc3_dma_inv_range 324a9c9147eSRussell King b xsc3_dma_flush_range 325a9c9147eSRussell KingENDPROC(xsc3_dma_map_area) 326a9c9147eSRussell King 327a9c9147eSRussell King/* 328a9c9147eSRussell King * dma_unmap_area(start, size, dir) 329a9c9147eSRussell King * - start - kernel virtual start address 330a9c9147eSRussell King * - size - size of region 331a9c9147eSRussell King * - dir - DMA direction 332a9c9147eSRussell King */ 333a9c9147eSRussell KingENTRY(xsc3_dma_unmap_area) 3346ebbf2ceSRussell King ret lr 335a9c9147eSRussell KingENDPROC(xsc3_dma_unmap_area) 336a9c9147eSRussell King 337031bd879SLorenzo Pieralisi .globl xsc3_flush_kern_cache_louis 338031bd879SLorenzo Pieralisi .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all 339031bd879SLorenzo Pieralisi 340c21898f9SDave Martin @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 341c21898f9SDave Martin define_cache_functions xsc3 34223bdf86aSLennert Buytenhek 34323bdf86aSLennert BuytenhekENTRY(cpu_xsc3_dcache_clean_area) 344850b4293SLennert Buytenhek1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 34523bdf86aSLennert Buytenhek add r0, r0, #CACHELINESIZE 34623bdf86aSLennert Buytenhek subs r1, r1, #CACHELINESIZE 34723bdf86aSLennert Buytenhek bhi 1b 3486ebbf2ceSRussell King ret lr 34923bdf86aSLennert Buytenhek 35023bdf86aSLennert Buytenhek/* =============================== PageTable ============================== */ 35123bdf86aSLennert Buytenhek 35223bdf86aSLennert Buytenhek/* 35323bdf86aSLennert Buytenhek * cpu_xsc3_switch_mm(pgd) 35423bdf86aSLennert Buytenhek * 35523bdf86aSLennert Buytenhek * Set the translation base pointer to be as described by pgd. 35623bdf86aSLennert Buytenhek * 35723bdf86aSLennert Buytenhek * pgd: new page tables 35823bdf86aSLennert Buytenhek */ 35923bdf86aSLennert Buytenhek .align 5 36023bdf86aSLennert BuytenhekENTRY(cpu_xsc3_switch_mm) 36123bdf86aSLennert Buytenhek clean_d_cache r1, r2 362850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 363850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c10, 4 @ data write barrier 364850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 36523bdf86aSLennert Buytenhek orr r0, r0, #0x18 @ cache the page table in L2 36623bdf86aSLennert Buytenhek mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367850b4293SLennert Buytenhek mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 36823bdf86aSLennert Buytenhek cpwait_ret lr, ip 36923bdf86aSLennert Buytenhek 37023bdf86aSLennert Buytenhek/* 371ad1ae2feSRussell King * cpu_xsc3_set_pte_ext(ptep, pte, ext) 37223bdf86aSLennert Buytenhek * 37323bdf86aSLennert Buytenhek * Set a PTE and flush it out 37423bdf86aSLennert Buytenhek */ 3759e8b5199SRussell Kingcpu_xsc3_mt_table: 3769e8b5199SRussell King .long 0x00 @ L_PTE_MT_UNCACHED 37740df2d1dSRussell King .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE 3786bee00dbSDan Williams .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 37940df2d1dSRussell King .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 38040df2d1dSRussell King .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 381639b0ae7SRussell King .long 0x00 @ unused 3829e8b5199SRussell King .long 0x00 @ L_PTE_MT_MINICACHE (not present) 3839e8b5199SRussell King .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) 384639b0ae7SRussell King .long 0x00 @ unused 3859e8b5199SRussell King .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC 3869e8b5199SRussell King .long 0x00 @ unused 38740df2d1dSRussell King .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 38840df2d1dSRussell King .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 389db5b7169SRussell King .long 0x00 @ unused 3909e8b5199SRussell King .long 0x00 @ unused 3919e8b5199SRussell King .long 0x00 @ unused 3929e8b5199SRussell King 39323bdf86aSLennert Buytenhek .align 5 394ad1ae2feSRussell KingENTRY(cpu_xsc3_set_pte_ext) 395da091653SRussell King xscale_set_pte_ext_prologue 39623bdf86aSLennert Buytenhek 397850b4293SLennert Buytenhek tst r1, #L_PTE_SHARED @ shared? 3989e8b5199SRussell King and r1, r1, #L_PTE_MT_MASK 3999e8b5199SRussell King adr ip, cpu_xsc3_mt_table 4009e8b5199SRussell King ldr ip, [ip, r1] 4019e8b5199SRussell King orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit 4029e8b5199SRussell King bic r2, r2, #0x0c @ clear old C,B bits 4039e8b5199SRussell King orr r2, r2, ip 40423bdf86aSLennert Buytenhek 405da091653SRussell King xscale_set_pte_ext_epilogue 4066ebbf2ceSRussell King ret lr 40723bdf86aSLennert Buytenhek 40823bdf86aSLennert Buytenhek .ltorg 40923bdf86aSLennert Buytenhek .align 41023bdf86aSLennert Buytenhek 411f6b0fa02SRussell King.globl cpu_xsc3_suspend_size 412de8e71caSRussell King.equ cpu_xsc3_suspend_size, 4 * 6 413b6c7aabdSRussell King#ifdef CONFIG_ARM_CPU_SUSPEND 414f6b0fa02SRussell KingENTRY(cpu_xsc3_do_suspend) 415de8e71caSRussell King stmfd sp!, {r4 - r9, lr} 416f6b0fa02SRussell King mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 417f6b0fa02SRussell King mrc p15, 0, r5, c15, c1, 0 @ CP access reg 418f6b0fa02SRussell King mrc p15, 0, r6, c13, c0, 0 @ PID 419f6b0fa02SRussell King mrc p15, 0, r7, c3, c0, 0 @ domain ID 420de8e71caSRussell King mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg 421de8e71caSRussell King mrc p15, 0, r9, c1, c0, 0 @ control reg 422f6b0fa02SRussell King bic r4, r4, #2 @ clear frequency change bit 423de8e71caSRussell King stmia r0, {r4 - r9} @ store cp regs 424de8e71caSRussell King ldmia sp!, {r4 - r9, pc} 425f6b0fa02SRussell KingENDPROC(cpu_xsc3_do_suspend) 426f6b0fa02SRussell King 427f6b0fa02SRussell KingENTRY(cpu_xsc3_do_resume) 428de8e71caSRussell King ldmia r0, {r4 - r9} @ load cp regs 429f6b0fa02SRussell King mov ip, #0 430f6b0fa02SRussell King mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 431f6b0fa02SRussell King mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 432f6b0fa02SRussell King mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer 433f6b0fa02SRussell King mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 434f6b0fa02SRussell King mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. 435f6b0fa02SRussell King mcr p15, 0, r5, c15, c1, 0 @ CP access reg 436f6b0fa02SRussell King mcr p15, 0, r6, c13, c0, 0 @ PID 437f6b0fa02SRussell King mcr p15, 0, r7, c3, c0, 0 @ domain ID 438de8e71caSRussell King orr r1, r1, #0x18 @ cache the page table in L2 439de8e71caSRussell King mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 440de8e71caSRussell King mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 441de8e71caSRussell King mov r0, r9 @ control register 442f6b0fa02SRussell King b cpu_resume_mmu 443f6b0fa02SRussell KingENDPROC(cpu_xsc3_do_resume) 444f6b0fa02SRussell King#endif 445f6b0fa02SRussell King 44623bdf86aSLennert Buytenhek .type __xsc3_setup, #function 44723bdf86aSLennert Buytenhek__xsc3_setup: 44823bdf86aSLennert Buytenhek mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 44923bdf86aSLennert Buytenhek msr cpsr_c, r0 450850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 451850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c10, 4 @ data write barrier 452850b4293SLennert Buytenhek mcr p15, 0, ip, c7, c5, 4 @ prefetch flush 453850b4293SLennert Buytenhek mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 45423bdf86aSLennert Buytenhek orr r4, r4, #0x18 @ cache the page table in L2 45523bdf86aSLennert Buytenhek mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 456850b4293SLennert Buytenhek 457345a3229SMikael Pettersson mov r0, #1 << 6 @ cp6 access for early sched_clock 458850b4293SLennert Buytenhek mcr p15, 0, r0, c15, c1, 0 @ write CP access register 459850b4293SLennert Buytenhek 46023bdf86aSLennert Buytenhek mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg 46123bdf86aSLennert Buytenhek and r0, r0, #2 @ preserve bit P bit setting 46223bdf86aSLennert Buytenhek orr r0, r0, #(1 << 10) @ enable L2 for LLR cache 46323bdf86aSLennert Buytenhek mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 46422b19086SRussell King 46522b19086SRussell King adr r5, xsc3_crval 46622b19086SRussell King ldmia r5, {r5, r6} 467548c6af4SHaojian Zhuang 468548c6af4SHaojian Zhuang#ifdef CONFIG_CACHE_XSC3L2 469548c6af4SHaojian Zhuang mrc p15, 1, r0, c0, c0, 1 @ get L2 present information 470548c6af4SHaojian Zhuang ands r0, r0, #0xf8 471548c6af4SHaojian Zhuang orrne r6, r6, #(1 << 26) @ enable L2 if present 472548c6af4SHaojian Zhuang#endif 473548c6af4SHaojian Zhuang 47423bdf86aSLennert Buytenhek mrc p15, 0, r0, c1, c0, 0 @ get control register 475850b4293SLennert Buytenhek bic r0, r0, r5 @ ..V. ..R. .... ..A. 476850b4293SLennert Buytenhek orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 477850b4293SLennert Buytenhek @ ...I Z..S .... .... (uc) 4786ebbf2ceSRussell King ret lr 47923bdf86aSLennert Buytenhek 48023bdf86aSLennert Buytenhek .size __xsc3_setup, . - __xsc3_setup 48123bdf86aSLennert Buytenhek 48222b19086SRussell King .type xsc3_crval, #object 48322b19086SRussell Kingxsc3_crval: 484850b4293SLennert Buytenhek crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900 48522b19086SRussell King 48623bdf86aSLennert Buytenhek __INITDATA 48723bdf86aSLennert Buytenhek 488c21898f9SDave Martin @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 489c21898f9SDave Martin define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 49023bdf86aSLennert Buytenhek 49123bdf86aSLennert Buytenhek .section ".rodata" 49223bdf86aSLennert Buytenhek 493c21898f9SDave Martin string cpu_arch_name, "armv5te" 494c21898f9SDave Martin string cpu_elf_name, "v5" 495c21898f9SDave Martin string cpu_xsc3_name, "XScale-V3 based processor" 49623bdf86aSLennert Buytenhek 49723bdf86aSLennert Buytenhek .align 49823bdf86aSLennert Buytenhek 499790756c7SNick Desaulniers .section ".proc.info.init", "a" 50023bdf86aSLennert Buytenhek 501c21898f9SDave Martin.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req 502c21898f9SDave Martin .type __\name\()_proc_info,#object 503c21898f9SDave Martin__\name\()_proc_info: 504c21898f9SDave Martin .long \cpu_val 505c21898f9SDave Martin .long \cpu_mask 5068799ee9fSRussell King .long PMD_TYPE_SECT | \ 5078799ee9fSRussell King PMD_SECT_BUFFERABLE | \ 5088799ee9fSRussell King PMD_SECT_CACHEABLE | \ 5098799ee9fSRussell King PMD_SECT_AP_WRITE | \ 5108799ee9fSRussell King PMD_SECT_AP_READ 5118799ee9fSRussell King .long PMD_TYPE_SECT | \ 5128799ee9fSRussell King PMD_SECT_AP_WRITE | \ 5138799ee9fSRussell King PMD_SECT_AP_READ 514bf35706fSArd Biesheuvel initfn __xsc3_setup, __\name\()_proc_info 51523bdf86aSLennert Buytenhek .long cpu_arch_name 51623bdf86aSLennert Buytenhek .long cpu_elf_name 51723bdf86aSLennert Buytenhek .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 51823bdf86aSLennert Buytenhek .long cpu_xsc3_name 51923bdf86aSLennert Buytenhek .long xsc3_processor_functions 52023bdf86aSLennert Buytenhek .long v4wbi_tlb_fns 52123bdf86aSLennert Buytenhek .long xsc3_mc_user_fns 52223bdf86aSLennert Buytenhek .long xsc3_cache_fns 523c21898f9SDave Martin .size __\name\()_proc_info, . - __\name\()_proc_info 524c21898f9SDave Martin.endm 525c21898f9SDave Martin 526c21898f9SDave Martin xsc3_proc_info xsc3, 0x69056000, 0xffffe000 52759c7bcd4SEric Miao 52859c7bcd4SEric Miao/* Note: PXA935 changed its implementor ID from Intel to Marvell */ 529c21898f9SDave Martin xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000 530