1*905a09d5SEric Miao /* 2*905a09d5SEric Miao * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support 3*905a09d5SEric Miao * 4*905a09d5SEric Miao * Copyright (C) 2007 ARM Limited 5*905a09d5SEric Miao * 6*905a09d5SEric Miao * This program is free software; you can redistribute it and/or modify 7*905a09d5SEric Miao * it under the terms of the GNU General Public License version 2 as 8*905a09d5SEric Miao * published by the Free Software Foundation. 9*905a09d5SEric Miao * 10*905a09d5SEric Miao * This program is distributed in the hope that it will be useful, 11*905a09d5SEric Miao * but WITHOUT ANY WARRANTY; without even the implied warranty of 12*905a09d5SEric Miao * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13*905a09d5SEric Miao * GNU General Public License for more details. 14*905a09d5SEric Miao * 15*905a09d5SEric Miao * You should have received a copy of the GNU General Public License 16*905a09d5SEric Miao * along with this program; if not, write to the Free Software 17*905a09d5SEric Miao * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18*905a09d5SEric Miao */ 19*905a09d5SEric Miao #include <linux/init.h> 20*905a09d5SEric Miao #include <linux/spinlock.h> 21*905a09d5SEric Miao 22*905a09d5SEric Miao #include <asm/system.h> 23*905a09d5SEric Miao #include <asm/cacheflush.h> 24*905a09d5SEric Miao #include <asm/io.h> 25*905a09d5SEric Miao 26*905a09d5SEric Miao #define CR_L2 (1 << 26) 27*905a09d5SEric Miao 28*905a09d5SEric Miao #define CACHE_LINE_SIZE 32 29*905a09d5SEric Miao #define CACHE_LINE_SHIFT 5 30*905a09d5SEric Miao #define CACHE_WAY_PER_SET 8 31*905a09d5SEric Miao 32*905a09d5SEric Miao #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf)) 33*905a09d5SEric Miao #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT) 34*905a09d5SEric Miao 35*905a09d5SEric Miao static inline int xsc3_l2_present(void) 36*905a09d5SEric Miao { 37*905a09d5SEric Miao unsigned long l2ctype; 38*905a09d5SEric Miao 39*905a09d5SEric Miao __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); 40*905a09d5SEric Miao 41*905a09d5SEric Miao return !!(l2ctype & 0xf8); 42*905a09d5SEric Miao } 43*905a09d5SEric Miao 44*905a09d5SEric Miao static inline void xsc3_l2_clean_mva(unsigned long addr) 45*905a09d5SEric Miao { 46*905a09d5SEric Miao __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr)); 47*905a09d5SEric Miao } 48*905a09d5SEric Miao 49*905a09d5SEric Miao static inline void xsc3_l2_clean_pa(unsigned long addr) 50*905a09d5SEric Miao { 51*905a09d5SEric Miao xsc3_l2_clean_mva(__phys_to_virt(addr)); 52*905a09d5SEric Miao } 53*905a09d5SEric Miao 54*905a09d5SEric Miao static inline void xsc3_l2_inv_mva(unsigned long addr) 55*905a09d5SEric Miao { 56*905a09d5SEric Miao __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr)); 57*905a09d5SEric Miao } 58*905a09d5SEric Miao 59*905a09d5SEric Miao static inline void xsc3_l2_inv_pa(unsigned long addr) 60*905a09d5SEric Miao { 61*905a09d5SEric Miao xsc3_l2_inv_mva(__phys_to_virt(addr)); 62*905a09d5SEric Miao } 63*905a09d5SEric Miao 64*905a09d5SEric Miao static inline void xsc3_l2_inv_all(void) 65*905a09d5SEric Miao { 66*905a09d5SEric Miao unsigned long l2ctype, set_way; 67*905a09d5SEric Miao int set, way; 68*905a09d5SEric Miao 69*905a09d5SEric Miao __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); 70*905a09d5SEric Miao 71*905a09d5SEric Miao for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { 72*905a09d5SEric Miao for (way = 0; way < CACHE_WAY_PER_SET; way++) { 73*905a09d5SEric Miao set_way = (way << 29) | (set << 5); 74*905a09d5SEric Miao __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way)); 75*905a09d5SEric Miao } 76*905a09d5SEric Miao } 77*905a09d5SEric Miao 78*905a09d5SEric Miao dsb(); 79*905a09d5SEric Miao } 80*905a09d5SEric Miao 81*905a09d5SEric Miao static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 82*905a09d5SEric Miao { 83*905a09d5SEric Miao if (start == 0 && end == -1ul) { 84*905a09d5SEric Miao xsc3_l2_inv_all(); 85*905a09d5SEric Miao return; 86*905a09d5SEric Miao } 87*905a09d5SEric Miao 88*905a09d5SEric Miao /* 89*905a09d5SEric Miao * Clean and invalidate partial first cache line. 90*905a09d5SEric Miao */ 91*905a09d5SEric Miao if (start & (CACHE_LINE_SIZE - 1)) { 92*905a09d5SEric Miao xsc3_l2_clean_pa(start & ~(CACHE_LINE_SIZE - 1)); 93*905a09d5SEric Miao xsc3_l2_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); 94*905a09d5SEric Miao start = (start | (CACHE_LINE_SIZE - 1)) + 1; 95*905a09d5SEric Miao } 96*905a09d5SEric Miao 97*905a09d5SEric Miao /* 98*905a09d5SEric Miao * Clean and invalidate partial last cache line. 99*905a09d5SEric Miao */ 100*905a09d5SEric Miao if (end & (CACHE_LINE_SIZE - 1)) { 101*905a09d5SEric Miao xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); 102*905a09d5SEric Miao xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); 103*905a09d5SEric Miao end &= ~(CACHE_LINE_SIZE - 1); 104*905a09d5SEric Miao } 105*905a09d5SEric Miao 106*905a09d5SEric Miao /* 107*905a09d5SEric Miao * Invalidate all full cache lines between 'start' and 'end'. 108*905a09d5SEric Miao */ 109*905a09d5SEric Miao while (start != end) { 110*905a09d5SEric Miao xsc3_l2_inv_pa(start); 111*905a09d5SEric Miao start += CACHE_LINE_SIZE; 112*905a09d5SEric Miao } 113*905a09d5SEric Miao 114*905a09d5SEric Miao dsb(); 115*905a09d5SEric Miao } 116*905a09d5SEric Miao 117*905a09d5SEric Miao static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 118*905a09d5SEric Miao { 119*905a09d5SEric Miao start &= ~(CACHE_LINE_SIZE - 1); 120*905a09d5SEric Miao while (start < end) { 121*905a09d5SEric Miao xsc3_l2_clean_pa(start); 122*905a09d5SEric Miao start += CACHE_LINE_SIZE; 123*905a09d5SEric Miao } 124*905a09d5SEric Miao 125*905a09d5SEric Miao dsb(); 126*905a09d5SEric Miao } 127*905a09d5SEric Miao 128*905a09d5SEric Miao /* 129*905a09d5SEric Miao * optimize L2 flush all operation by set/way format 130*905a09d5SEric Miao */ 131*905a09d5SEric Miao static inline void xsc3_l2_flush_all(void) 132*905a09d5SEric Miao { 133*905a09d5SEric Miao unsigned long l2ctype, set_way; 134*905a09d5SEric Miao int set, way; 135*905a09d5SEric Miao 136*905a09d5SEric Miao __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype)); 137*905a09d5SEric Miao 138*905a09d5SEric Miao for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { 139*905a09d5SEric Miao for (way = 0; way < CACHE_WAY_PER_SET; way++) { 140*905a09d5SEric Miao set_way = (way << 29) | (set << 5); 141*905a09d5SEric Miao __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way)); 142*905a09d5SEric Miao } 143*905a09d5SEric Miao } 144*905a09d5SEric Miao 145*905a09d5SEric Miao dsb(); 146*905a09d5SEric Miao } 147*905a09d5SEric Miao 148*905a09d5SEric Miao static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 149*905a09d5SEric Miao { 150*905a09d5SEric Miao if (start == 0 && end == -1ul) { 151*905a09d5SEric Miao xsc3_l2_flush_all(); 152*905a09d5SEric Miao return; 153*905a09d5SEric Miao } 154*905a09d5SEric Miao 155*905a09d5SEric Miao start &= ~(CACHE_LINE_SIZE - 1); 156*905a09d5SEric Miao while (start < end) { 157*905a09d5SEric Miao xsc3_l2_clean_pa(start); 158*905a09d5SEric Miao xsc3_l2_inv_pa(start); 159*905a09d5SEric Miao start += CACHE_LINE_SIZE; 160*905a09d5SEric Miao } 161*905a09d5SEric Miao 162*905a09d5SEric Miao dsb(); 163*905a09d5SEric Miao } 164*905a09d5SEric Miao 165*905a09d5SEric Miao static int __init xsc3_l2_init(void) 166*905a09d5SEric Miao { 167*905a09d5SEric Miao if (!cpu_is_xsc3() || !xsc3_l2_present()) 168*905a09d5SEric Miao return 0; 169*905a09d5SEric Miao 170*905a09d5SEric Miao if (!(get_cr() & CR_L2)) { 171*905a09d5SEric Miao pr_info("XScale3 L2 cache enabled.\n"); 172*905a09d5SEric Miao adjust_cr(CR_L2, CR_L2); 173*905a09d5SEric Miao xsc3_l2_inv_all(); 174*905a09d5SEric Miao } 175*905a09d5SEric Miao 176*905a09d5SEric Miao outer_cache.inv_range = xsc3_l2_inv_range; 177*905a09d5SEric Miao outer_cache.clean_range = xsc3_l2_clean_range; 178*905a09d5SEric Miao outer_cache.flush_range = xsc3_l2_flush_range; 179*905a09d5SEric Miao 180*905a09d5SEric Miao return 0; 181*905a09d5SEric Miao } 182*905a09d5SEric Miao core_initcall(xsc3_l2_init); 183