1d2912cb1SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */ 29a1af5f2SVladimir Murzin/* 39a1af5f2SVladimir Murzin * linux/arch/arm/mm/cache-v7m.S 49a1af5f2SVladimir Murzin * 59a1af5f2SVladimir Murzin * Based on linux/arch/arm/mm/cache-v7.S 69a1af5f2SVladimir Murzin * 79a1af5f2SVladimir Murzin * Copyright (C) 2001 Deep Blue Solutions Ltd. 89a1af5f2SVladimir Murzin * Copyright (C) 2005 ARM Ltd. 99a1af5f2SVladimir Murzin * 109a1af5f2SVladimir Murzin * This is the "shell" of the ARMv7M processor support. 119a1af5f2SVladimir Murzin */ 129a1af5f2SVladimir Murzin#include <linux/linkage.h> 139a1af5f2SVladimir Murzin#include <linux/init.h> 149a1af5f2SVladimir Murzin#include <asm/assembler.h> 159a1af5f2SVladimir Murzin#include <asm/errno.h> 169a1af5f2SVladimir Murzin#include <asm/unwind.h> 179a1af5f2SVladimir Murzin#include <asm/v7m.h> 189a1af5f2SVladimir Murzin 199a1af5f2SVladimir Murzin#include "proc-macros.S" 209a1af5f2SVladimir Murzin 21*a2faac39SNick Desaulniers.arch armv7-m 22*a2faac39SNick Desaulniers 239a1af5f2SVladimir Murzin/* Generic V7M read/write macros for memory mapped cache operations */ 249a1af5f2SVladimir Murzin.macro v7m_cache_read, rt, reg 259a1af5f2SVladimir Murzin movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg 269a1af5f2SVladimir Murzin movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg 279a1af5f2SVladimir Murzin ldr \rt, [\rt] 289a1af5f2SVladimir Murzin.endm 299a1af5f2SVladimir Murzin 309a1af5f2SVladimir Murzin.macro v7m_cacheop, rt, tmp, op, c = al 319a1af5f2SVladimir Murzin movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op 329a1af5f2SVladimir Murzin movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op 339a1af5f2SVladimir Murzin str\c \rt, [\tmp] 349a1af5f2SVladimir Murzin.endm 359a1af5f2SVladimir Murzin 369a1af5f2SVladimir Murzin 379a1af5f2SVladimir Murzin.macro read_ccsidr, rt 389a1af5f2SVladimir Murzin v7m_cache_read \rt, V7M_SCB_CCSIDR 399a1af5f2SVladimir Murzin.endm 409a1af5f2SVladimir Murzin 419a1af5f2SVladimir Murzin.macro read_clidr, rt 429a1af5f2SVladimir Murzin v7m_cache_read \rt, V7M_SCB_CLIDR 439a1af5f2SVladimir Murzin.endm 449a1af5f2SVladimir Murzin 459a1af5f2SVladimir Murzin.macro write_csselr, rt, tmp 469a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR 479a1af5f2SVladimir Murzin.endm 489a1af5f2SVladimir Murzin 499a1af5f2SVladimir Murzin/* 509a1af5f2SVladimir Murzin * dcisw: Invalidate data cache by set/way 519a1af5f2SVladimir Murzin */ 529a1af5f2SVladimir Murzin.macro dcisw, rt, tmp 539a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCISW 549a1af5f2SVladimir Murzin.endm 559a1af5f2SVladimir Murzin 569a1af5f2SVladimir Murzin/* 579a1af5f2SVladimir Murzin * dccisw: Clean and invalidate data cache by set/way 589a1af5f2SVladimir Murzin */ 599a1af5f2SVladimir Murzin.macro dccisw, rt, tmp 609a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW 619a1af5f2SVladimir Murzin.endm 629a1af5f2SVladimir Murzin 639a1af5f2SVladimir Murzin/* 649a1af5f2SVladimir Murzin * dccimvac: Clean and invalidate data cache line by MVA to PoC. 659a1af5f2SVladimir Murzin */ 669a1af5f2SVladimir Murzin.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 679a1af5f2SVladimir Murzin.macro dccimvac\c, rt, tmp 689a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c 699a1af5f2SVladimir Murzin.endm 709a1af5f2SVladimir Murzin.endr 719a1af5f2SVladimir Murzin 729a1af5f2SVladimir Murzin/* 739a1af5f2SVladimir Murzin * dcimvac: Invalidate data cache line by MVA to PoC 749a1af5f2SVladimir Murzin */ 753d0358d0SVladimir Murzin.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 763d0358d0SVladimir Murzin.macro dcimvac\c, rt, tmp 773d0358d0SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c 789a1af5f2SVladimir Murzin.endm 793d0358d0SVladimir Murzin.endr 809a1af5f2SVladimir Murzin 819a1af5f2SVladimir Murzin/* 829a1af5f2SVladimir Murzin * dccmvau: Clean data cache line by MVA to PoU 839a1af5f2SVladimir Murzin */ 849a1af5f2SVladimir Murzin.macro dccmvau, rt, tmp 859a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU 869a1af5f2SVladimir Murzin.endm 879a1af5f2SVladimir Murzin 889a1af5f2SVladimir Murzin/* 899a1af5f2SVladimir Murzin * dccmvac: Clean data cache line by MVA to PoC 909a1af5f2SVladimir Murzin */ 919a1af5f2SVladimir Murzin.macro dccmvac, rt, tmp 929a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC 939a1af5f2SVladimir Murzin.endm 949a1af5f2SVladimir Murzin 959a1af5f2SVladimir Murzin/* 969a1af5f2SVladimir Murzin * icimvau: Invalidate instruction caches by MVA to PoU 979a1af5f2SVladimir Murzin */ 989a1af5f2SVladimir Murzin.macro icimvau, rt, tmp 999a1af5f2SVladimir Murzin v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU 1009a1af5f2SVladimir Murzin.endm 1019a1af5f2SVladimir Murzin 1029a1af5f2SVladimir Murzin/* 1039a1af5f2SVladimir Murzin * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP. 1049a1af5f2SVladimir Murzin * rt data ignored by ICIALLU(IS), so can be used for the address 1059a1af5f2SVladimir Murzin */ 1069a1af5f2SVladimir Murzin.macro invalidate_icache, rt 1079a1af5f2SVladimir Murzin v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU 1089a1af5f2SVladimir Murzin mov \rt, #0 1099a1af5f2SVladimir Murzin.endm 1109a1af5f2SVladimir Murzin 1119a1af5f2SVladimir Murzin/* 1129a1af5f2SVladimir Murzin * Invalidate the BTB, inner shareable if SMP. 1139a1af5f2SVladimir Murzin * rt data ignored by BPIALL, so it can be used for the address 1149a1af5f2SVladimir Murzin */ 1159a1af5f2SVladimir Murzin.macro invalidate_bp, rt 1169a1af5f2SVladimir Murzin v7m_cacheop \rt, \rt, V7M_SCB_BPIALL 1179a1af5f2SVladimir Murzin mov \rt, #0 1189a1af5f2SVladimir Murzin.endm 1199a1af5f2SVladimir Murzin 1209a1af5f2SVladimir MurzinENTRY(v7m_invalidate_l1) 1219a1af5f2SVladimir Murzin mov r0, #0 1229a1af5f2SVladimir Murzin 1239a1af5f2SVladimir Murzin write_csselr r0, r1 1249a1af5f2SVladimir Murzin read_ccsidr r0 1259a1af5f2SVladimir Murzin 1269a1af5f2SVladimir Murzin movw r1, #0x7fff 1279a1af5f2SVladimir Murzin and r2, r1, r0, lsr #13 1289a1af5f2SVladimir Murzin 1299a1af5f2SVladimir Murzin movw r1, #0x3ff 1309a1af5f2SVladimir Murzin 1319a1af5f2SVladimir Murzin and r3, r1, r0, lsr #3 @ NumWays - 1 1329a1af5f2SVladimir Murzin add r2, r2, #1 @ NumSets 1339a1af5f2SVladimir Murzin 1349a1af5f2SVladimir Murzin and r0, r0, #0x7 1359a1af5f2SVladimir Murzin add r0, r0, #4 @ SetShift 1369a1af5f2SVladimir Murzin 1379a1af5f2SVladimir Murzin clz r1, r3 @ WayShift 1389a1af5f2SVladimir Murzin add r4, r3, #1 @ NumWays 1399a1af5f2SVladimir Murzin1: sub r2, r2, #1 @ NumSets-- 1409a1af5f2SVladimir Murzin mov r3, r4 @ Temp = NumWays 1419a1af5f2SVladimir Murzin2: subs r3, r3, #1 @ Temp-- 1429a1af5f2SVladimir Murzin mov r5, r3, lsl r1 1439a1af5f2SVladimir Murzin mov r6, r2, lsl r0 1449a1af5f2SVladimir Murzin orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) 1459a1af5f2SVladimir Murzin dcisw r5, r6 1469a1af5f2SVladimir Murzin bgt 2b 1479a1af5f2SVladimir Murzin cmp r2, #0 1489a1af5f2SVladimir Murzin bgt 1b 1499a1af5f2SVladimir Murzin dsb st 1509a1af5f2SVladimir Murzin isb 1519a1af5f2SVladimir Murzin ret lr 1529a1af5f2SVladimir MurzinENDPROC(v7m_invalidate_l1) 1539a1af5f2SVladimir Murzin 1549a1af5f2SVladimir Murzin/* 1559a1af5f2SVladimir Murzin * v7m_flush_icache_all() 1569a1af5f2SVladimir Murzin * 1579a1af5f2SVladimir Murzin * Flush the whole I-cache. 1589a1af5f2SVladimir Murzin * 1599a1af5f2SVladimir Murzin * Registers: 1609a1af5f2SVladimir Murzin * r0 - set to 0 1619a1af5f2SVladimir Murzin */ 1629a1af5f2SVladimir MurzinENTRY(v7m_flush_icache_all) 1639a1af5f2SVladimir Murzin invalidate_icache r0 1649a1af5f2SVladimir Murzin ret lr 1659a1af5f2SVladimir MurzinENDPROC(v7m_flush_icache_all) 1669a1af5f2SVladimir Murzin 1679a1af5f2SVladimir Murzin/* 1689a1af5f2SVladimir Murzin * v7m_flush_dcache_all() 1699a1af5f2SVladimir Murzin * 1709a1af5f2SVladimir Murzin * Flush the whole D-cache. 1719a1af5f2SVladimir Murzin * 1729a1af5f2SVladimir Murzin * Corrupted registers: r0-r7, r9-r11 1739a1af5f2SVladimir Murzin */ 1749a1af5f2SVladimir MurzinENTRY(v7m_flush_dcache_all) 1759a1af5f2SVladimir Murzin dmb @ ensure ordering with previous memory accesses 1769a1af5f2SVladimir Murzin read_clidr r0 1779a1af5f2SVladimir Murzin mov r3, r0, lsr #23 @ move LoC into position 1789a1af5f2SVladimir Murzin ands r3, r3, #7 << 1 @ extract LoC*2 from clidr 1799a1af5f2SVladimir Murzin beq finished @ if loc is 0, then no need to clean 1809a1af5f2SVladimir Murzinstart_flush_levels: 1819a1af5f2SVladimir Murzin mov r10, #0 @ start clean at cache level 0 1829a1af5f2SVladimir Murzinflush_levels: 1839a1af5f2SVladimir Murzin add r2, r10, r10, lsr #1 @ work out 3x current cache level 1849a1af5f2SVladimir Murzin mov r1, r0, lsr r2 @ extract cache type bits from clidr 1859a1af5f2SVladimir Murzin and r1, r1, #7 @ mask of the bits for current cache only 1869a1af5f2SVladimir Murzin cmp r1, #2 @ see what cache we have at this level 1879a1af5f2SVladimir Murzin blt skip @ skip if no cache, or just i-cache 188e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION 1899a1af5f2SVladimir Murzin save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic 1909a1af5f2SVladimir Murzin#endif 1919a1af5f2SVladimir Murzin write_csselr r10, r1 @ set current cache level 1929a1af5f2SVladimir Murzin isb @ isb to sych the new cssr&csidr 1939a1af5f2SVladimir Murzin read_ccsidr r1 @ read the new csidr 194e7289c6dSThomas Gleixner#ifdef CONFIG_PREEMPTION 1959a1af5f2SVladimir Murzin restore_irqs_notrace r9 1969a1af5f2SVladimir Murzin#endif 1979a1af5f2SVladimir Murzin and r2, r1, #7 @ extract the length of the cache lines 1989a1af5f2SVladimir Murzin add r2, r2, #4 @ add 4 (line length offset) 1999a1af5f2SVladimir Murzin movw r4, #0x3ff 2009a1af5f2SVladimir Murzin ands r4, r4, r1, lsr #3 @ find maximum number on the way size 2019a1af5f2SVladimir Murzin clz r5, r4 @ find bit position of way size increment 2029a1af5f2SVladimir Murzin movw r7, #0x7fff 2039a1af5f2SVladimir Murzin ands r7, r7, r1, lsr #13 @ extract max number of the index size 2049a1af5f2SVladimir Murzinloop1: 2059a1af5f2SVladimir Murzin mov r9, r7 @ create working copy of max index 2069a1af5f2SVladimir Murzinloop2: 2079a1af5f2SVladimir Murzin lsl r6, r4, r5 2089a1af5f2SVladimir Murzin orr r11, r10, r6 @ factor way and cache number into r11 2099a1af5f2SVladimir Murzin lsl r6, r9, r2 2109a1af5f2SVladimir Murzin orr r11, r11, r6 @ factor index number into r11 2119a1af5f2SVladimir Murzin dccisw r11, r6 @ clean/invalidate by set/way 2129a1af5f2SVladimir Murzin subs r9, r9, #1 @ decrement the index 2139a1af5f2SVladimir Murzin bge loop2 2149a1af5f2SVladimir Murzin subs r4, r4, #1 @ decrement the way 2159a1af5f2SVladimir Murzin bge loop1 2169a1af5f2SVladimir Murzinskip: 2179a1af5f2SVladimir Murzin add r10, r10, #2 @ increment cache number 2189a1af5f2SVladimir Murzin cmp r3, r10 2199a1af5f2SVladimir Murzin bgt flush_levels 2209a1af5f2SVladimir Murzinfinished: 22108a7e621SMasahiro Yamada mov r10, #0 @ switch back to cache level 0 2229a1af5f2SVladimir Murzin write_csselr r10, r3 @ select current cache level in cssr 2239a1af5f2SVladimir Murzin dsb st 2249a1af5f2SVladimir Murzin isb 2259a1af5f2SVladimir Murzin ret lr 2269a1af5f2SVladimir MurzinENDPROC(v7m_flush_dcache_all) 2279a1af5f2SVladimir Murzin 2289a1af5f2SVladimir Murzin/* 2299a1af5f2SVladimir Murzin * v7m_flush_cache_all() 2309a1af5f2SVladimir Murzin * 2319a1af5f2SVladimir Murzin * Flush the entire cache system. 2329a1af5f2SVladimir Murzin * The data cache flush is now achieved using atomic clean / invalidates 2339a1af5f2SVladimir Murzin * working outwards from L1 cache. This is done using Set/Way based cache 2349a1af5f2SVladimir Murzin * maintenance instructions. 2359a1af5f2SVladimir Murzin * The instruction cache can still be invalidated back to the point of 2369a1af5f2SVladimir Murzin * unification in a single instruction. 2379a1af5f2SVladimir Murzin * 2389a1af5f2SVladimir Murzin */ 2399a1af5f2SVladimir MurzinENTRY(v7m_flush_kern_cache_all) 2409a1af5f2SVladimir Murzin stmfd sp!, {r4-r7, r9-r11, lr} 2419a1af5f2SVladimir Murzin bl v7m_flush_dcache_all 2429a1af5f2SVladimir Murzin invalidate_icache r0 2439a1af5f2SVladimir Murzin ldmfd sp!, {r4-r7, r9-r11, lr} 2449a1af5f2SVladimir Murzin ret lr 2459a1af5f2SVladimir MurzinENDPROC(v7m_flush_kern_cache_all) 2469a1af5f2SVladimir Murzin 2479a1af5f2SVladimir Murzin/* 2489a1af5f2SVladimir Murzin * v7m_flush_cache_all() 2499a1af5f2SVladimir Murzin * 2509a1af5f2SVladimir Murzin * Flush all TLB entries in a particular address space 2519a1af5f2SVladimir Murzin * 2529a1af5f2SVladimir Murzin * - mm - mm_struct describing address space 2539a1af5f2SVladimir Murzin */ 2549a1af5f2SVladimir MurzinENTRY(v7m_flush_user_cache_all) 2559a1af5f2SVladimir Murzin /*FALLTHROUGH*/ 2569a1af5f2SVladimir Murzin 2579a1af5f2SVladimir Murzin/* 2589a1af5f2SVladimir Murzin * v7m_flush_cache_range(start, end, flags) 2599a1af5f2SVladimir Murzin * 2609a1af5f2SVladimir Murzin * Flush a range of TLB entries in the specified address space. 2619a1af5f2SVladimir Murzin * 2629a1af5f2SVladimir Murzin * - start - start address (may not be aligned) 2639a1af5f2SVladimir Murzin * - end - end address (exclusive, may not be aligned) 2649a1af5f2SVladimir Murzin * - flags - vm_area_struct flags describing address space 2659a1af5f2SVladimir Murzin * 2669a1af5f2SVladimir Murzin * It is assumed that: 2679a1af5f2SVladimir Murzin * - we have a VIPT cache. 2689a1af5f2SVladimir Murzin */ 2699a1af5f2SVladimir MurzinENTRY(v7m_flush_user_cache_range) 2709a1af5f2SVladimir Murzin ret lr 2719a1af5f2SVladimir MurzinENDPROC(v7m_flush_user_cache_all) 2729a1af5f2SVladimir MurzinENDPROC(v7m_flush_user_cache_range) 2739a1af5f2SVladimir Murzin 2749a1af5f2SVladimir Murzin/* 2759a1af5f2SVladimir Murzin * v7m_coherent_kern_range(start,end) 2769a1af5f2SVladimir Murzin * 2779a1af5f2SVladimir Murzin * Ensure that the I and D caches are coherent within specified 2789a1af5f2SVladimir Murzin * region. This is typically used when code has been written to 2799a1af5f2SVladimir Murzin * a memory region, and will be executed. 2809a1af5f2SVladimir Murzin * 2819a1af5f2SVladimir Murzin * - start - virtual start address of region 2829a1af5f2SVladimir Murzin * - end - virtual end address of region 2839a1af5f2SVladimir Murzin * 2849a1af5f2SVladimir Murzin * It is assumed that: 2859a1af5f2SVladimir Murzin * - the Icache does not read data from the write buffer 2869a1af5f2SVladimir Murzin */ 2879a1af5f2SVladimir MurzinENTRY(v7m_coherent_kern_range) 2889a1af5f2SVladimir Murzin /* FALLTHROUGH */ 2899a1af5f2SVladimir Murzin 2909a1af5f2SVladimir Murzin/* 2919a1af5f2SVladimir Murzin * v7m_coherent_user_range(start,end) 2929a1af5f2SVladimir Murzin * 2939a1af5f2SVladimir Murzin * Ensure that the I and D caches are coherent within specified 2949a1af5f2SVladimir Murzin * region. This is typically used when code has been written to 2959a1af5f2SVladimir Murzin * a memory region, and will be executed. 2969a1af5f2SVladimir Murzin * 2979a1af5f2SVladimir Murzin * - start - virtual start address of region 2989a1af5f2SVladimir Murzin * - end - virtual end address of region 2999a1af5f2SVladimir Murzin * 3009a1af5f2SVladimir Murzin * It is assumed that: 3019a1af5f2SVladimir Murzin * - the Icache does not read data from the write buffer 3029a1af5f2SVladimir Murzin */ 3039a1af5f2SVladimir MurzinENTRY(v7m_coherent_user_range) 3049a1af5f2SVladimir Murzin UNWIND(.fnstart ) 3059a1af5f2SVladimir Murzin dcache_line_size r2, r3 3069a1af5f2SVladimir Murzin sub r3, r2, #1 3079a1af5f2SVladimir Murzin bic r12, r0, r3 3089a1af5f2SVladimir Murzin1: 3099a1af5f2SVladimir Murzin/* 3109a1af5f2SVladimir Murzin * We use open coded version of dccmvau otherwise USER() would 3119a1af5f2SVladimir Murzin * point at movw instruction. 3129a1af5f2SVladimir Murzin */ 3139a1af5f2SVladimir Murzin dccmvau r12, r3 3149a1af5f2SVladimir Murzin add r12, r12, r2 3159a1af5f2SVladimir Murzin cmp r12, r1 3169a1af5f2SVladimir Murzin blo 1b 3179a1af5f2SVladimir Murzin dsb ishst 3189a1af5f2SVladimir Murzin icache_line_size r2, r3 3199a1af5f2SVladimir Murzin sub r3, r2, #1 3209a1af5f2SVladimir Murzin bic r12, r0, r3 3219a1af5f2SVladimir Murzin2: 3229a1af5f2SVladimir Murzin icimvau r12, r3 3239a1af5f2SVladimir Murzin add r12, r12, r2 3249a1af5f2SVladimir Murzin cmp r12, r1 3259a1af5f2SVladimir Murzin blo 2b 3269a1af5f2SVladimir Murzin invalidate_bp r0 3279a1af5f2SVladimir Murzin dsb ishst 3289a1af5f2SVladimir Murzin isb 3299a1af5f2SVladimir Murzin ret lr 3309a1af5f2SVladimir Murzin UNWIND(.fnend ) 3319a1af5f2SVladimir MurzinENDPROC(v7m_coherent_kern_range) 3329a1af5f2SVladimir MurzinENDPROC(v7m_coherent_user_range) 3339a1af5f2SVladimir Murzin 3349a1af5f2SVladimir Murzin/* 3359a1af5f2SVladimir Murzin * v7m_flush_kern_dcache_area(void *addr, size_t size) 3369a1af5f2SVladimir Murzin * 3379a1af5f2SVladimir Murzin * Ensure that the data held in the page kaddr is written back 3389a1af5f2SVladimir Murzin * to the page in question. 3399a1af5f2SVladimir Murzin * 3409a1af5f2SVladimir Murzin * - addr - kernel address 3419a1af5f2SVladimir Murzin * - size - region size 3429a1af5f2SVladimir Murzin */ 3439a1af5f2SVladimir MurzinENTRY(v7m_flush_kern_dcache_area) 3449a1af5f2SVladimir Murzin dcache_line_size r2, r3 3459a1af5f2SVladimir Murzin add r1, r0, r1 3469a1af5f2SVladimir Murzin sub r3, r2, #1 3479a1af5f2SVladimir Murzin bic r0, r0, r3 3489a1af5f2SVladimir Murzin1: 3499a1af5f2SVladimir Murzin dccimvac r0, r3 @ clean & invalidate D line / unified line 3509a1af5f2SVladimir Murzin add r0, r0, r2 3519a1af5f2SVladimir Murzin cmp r0, r1 3529a1af5f2SVladimir Murzin blo 1b 3539a1af5f2SVladimir Murzin dsb st 3549a1af5f2SVladimir Murzin ret lr 3559a1af5f2SVladimir MurzinENDPROC(v7m_flush_kern_dcache_area) 3569a1af5f2SVladimir Murzin 3579a1af5f2SVladimir Murzin/* 3589a1af5f2SVladimir Murzin * v7m_dma_inv_range(start,end) 3599a1af5f2SVladimir Murzin * 3609a1af5f2SVladimir Murzin * Invalidate the data cache within the specified region; we will 3619a1af5f2SVladimir Murzin * be performing a DMA operation in this region and we want to 3629a1af5f2SVladimir Murzin * purge old data in the cache. 3639a1af5f2SVladimir Murzin * 3649a1af5f2SVladimir Murzin * - start - virtual start address of region 3659a1af5f2SVladimir Murzin * - end - virtual end address of region 3669a1af5f2SVladimir Murzin */ 3679a1af5f2SVladimir Murzinv7m_dma_inv_range: 3689a1af5f2SVladimir Murzin dcache_line_size r2, r3 3699a1af5f2SVladimir Murzin sub r3, r2, #1 3709a1af5f2SVladimir Murzin tst r0, r3 3719a1af5f2SVladimir Murzin bic r0, r0, r3 3729a1af5f2SVladimir Murzin dccimvacne r0, r3 3733d0358d0SVladimir Murzin addne r0, r0, r2 3749a1af5f2SVladimir Murzin subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac 3759a1af5f2SVladimir Murzin tst r1, r3 3769a1af5f2SVladimir Murzin bic r1, r1, r3 3779a1af5f2SVladimir Murzin dccimvacne r1, r3 3789a1af5f2SVladimir Murzin cmp r0, r1 3793d0358d0SVladimir Murzin1: 3803d0358d0SVladimir Murzin dcimvaclo r0, r3 3813d0358d0SVladimir Murzin addlo r0, r0, r2 3823d0358d0SVladimir Murzin cmplo r0, r1 3839a1af5f2SVladimir Murzin blo 1b 3849a1af5f2SVladimir Murzin dsb st 3859a1af5f2SVladimir Murzin ret lr 3869a1af5f2SVladimir MurzinENDPROC(v7m_dma_inv_range) 3879a1af5f2SVladimir Murzin 3889a1af5f2SVladimir Murzin/* 3899a1af5f2SVladimir Murzin * v7m_dma_clean_range(start,end) 3909a1af5f2SVladimir Murzin * - start - virtual start address of region 3919a1af5f2SVladimir Murzin * - end - virtual end address of region 3929a1af5f2SVladimir Murzin */ 3939a1af5f2SVladimir Murzinv7m_dma_clean_range: 3949a1af5f2SVladimir Murzin dcache_line_size r2, r3 3959a1af5f2SVladimir Murzin sub r3, r2, #1 3969a1af5f2SVladimir Murzin bic r0, r0, r3 3979a1af5f2SVladimir Murzin1: 3989a1af5f2SVladimir Murzin dccmvac r0, r3 @ clean D / U line 3999a1af5f2SVladimir Murzin add r0, r0, r2 4009a1af5f2SVladimir Murzin cmp r0, r1 4019a1af5f2SVladimir Murzin blo 1b 4029a1af5f2SVladimir Murzin dsb st 4039a1af5f2SVladimir Murzin ret lr 4049a1af5f2SVladimir MurzinENDPROC(v7m_dma_clean_range) 4059a1af5f2SVladimir Murzin 4069a1af5f2SVladimir Murzin/* 4079a1af5f2SVladimir Murzin * v7m_dma_flush_range(start,end) 4089a1af5f2SVladimir Murzin * - start - virtual start address of region 4099a1af5f2SVladimir Murzin * - end - virtual end address of region 4109a1af5f2SVladimir Murzin */ 4119a1af5f2SVladimir MurzinENTRY(v7m_dma_flush_range) 4129a1af5f2SVladimir Murzin dcache_line_size r2, r3 4139a1af5f2SVladimir Murzin sub r3, r2, #1 4149a1af5f2SVladimir Murzin bic r0, r0, r3 4159a1af5f2SVladimir Murzin1: 4169a1af5f2SVladimir Murzin dccimvac r0, r3 @ clean & invalidate D / U line 4179a1af5f2SVladimir Murzin add r0, r0, r2 4189a1af5f2SVladimir Murzin cmp r0, r1 4199a1af5f2SVladimir Murzin blo 1b 4209a1af5f2SVladimir Murzin dsb st 4219a1af5f2SVladimir Murzin ret lr 4229a1af5f2SVladimir MurzinENDPROC(v7m_dma_flush_range) 4239a1af5f2SVladimir Murzin 4249a1af5f2SVladimir Murzin/* 4259a1af5f2SVladimir Murzin * dma_map_area(start, size, dir) 4269a1af5f2SVladimir Murzin * - start - kernel virtual start address 4279a1af5f2SVladimir Murzin * - size - size of region 4289a1af5f2SVladimir Murzin * - dir - DMA direction 4299a1af5f2SVladimir Murzin */ 4309a1af5f2SVladimir MurzinENTRY(v7m_dma_map_area) 4319a1af5f2SVladimir Murzin add r1, r1, r0 4329a1af5f2SVladimir Murzin teq r2, #DMA_FROM_DEVICE 4339a1af5f2SVladimir Murzin beq v7m_dma_inv_range 4349a1af5f2SVladimir Murzin b v7m_dma_clean_range 4359a1af5f2SVladimir MurzinENDPROC(v7m_dma_map_area) 4369a1af5f2SVladimir Murzin 4379a1af5f2SVladimir Murzin/* 4389a1af5f2SVladimir Murzin * dma_unmap_area(start, size, dir) 4399a1af5f2SVladimir Murzin * - start - kernel virtual start address 4409a1af5f2SVladimir Murzin * - size - size of region 4419a1af5f2SVladimir Murzin * - dir - DMA direction 4429a1af5f2SVladimir Murzin */ 4439a1af5f2SVladimir MurzinENTRY(v7m_dma_unmap_area) 4449a1af5f2SVladimir Murzin add r1, r1, r0 4459a1af5f2SVladimir Murzin teq r2, #DMA_TO_DEVICE 4469a1af5f2SVladimir Murzin bne v7m_dma_inv_range 4479a1af5f2SVladimir Murzin ret lr 4489a1af5f2SVladimir MurzinENDPROC(v7m_dma_unmap_area) 4499a1af5f2SVladimir Murzin 4509a1af5f2SVladimir Murzin .globl v7m_flush_kern_cache_louis 4519a1af5f2SVladimir Murzin .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all 4529a1af5f2SVladimir Murzin 4539a1af5f2SVladimir Murzin __INITDATA 4549a1af5f2SVladimir Murzin 4559a1af5f2SVladimir Murzin @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 4569a1af5f2SVladimir Murzin define_cache_functions v7m 457