1 /* 2 * arch/xtensa/include/asm/initialize_mmu.h 3 * 4 * Initializes MMU: 5 * 6 * For the new V3 MMU we remap the TLB from virtual == physical 7 * to the standard Linux mapping used in earlier MMU's. 8 * 9 * The the MMU we also support a new configuration register that 10 * specifies how the S32C1I instruction operates with the cache 11 * controller. 12 * 13 * This file is subject to the terms and conditions of the GNU General 14 * Public License. See the file "COPYING" in the main directory of 15 * this archive for more details. 16 * 17 * Copyright (C) 2008 - 2012 Tensilica, Inc. 18 * 19 * Marc Gauthier <marc@tensilica.com> 20 * Pete Delaney <piet@tensilica.com> 21 */ 22 23 #ifndef _XTENSA_INITIALIZE_MMU_H 24 #define _XTENSA_INITIALIZE_MMU_H 25 26 #include <asm/pgtable.h> 27 #include <asm/vectors.h> 28 29 #ifdef __ASSEMBLY__ 30 31 #define XTENSA_HWVERSION_RC_2009_0 230000 32 33 .macro initialize_mmu 34 35 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) 36 /* 37 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it. 38 * For details see Documentation/xtensa/atomctl.txt 39 */ 40 #if XCHAL_DCACHE_IS_COHERENT 41 movi a3, 0x25 /* For SMP/MX -- internal for writeback, 42 * RCW otherwise 43 */ 44 #else 45 movi a3, 0x29 /* non-MX -- Most cores use Std Memory 46 * Controlers which usually can't use RCW 47 */ 48 #endif 49 wsr a3, atomctl 50 #endif /* XCHAL_HAVE_S32C1I && 51 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) 52 */ 53 54 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 55 /* 56 * Have MMU v3 57 */ 58 59 #if !XCHAL_HAVE_VECBASE 60 # error "MMU v3 requires reloc vectors" 61 #endif 62 63 movi a1, 0 64 _call0 1f 65 _j 2f 66 67 .align 4 68 1: movi a2, 0x10000000 69 movi a3, 0x18000000 70 add a2, a2, a0 71 9: bgeu a2, a3, 9b /* PC is out of the expected range */ 72 73 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ 74 75 movi a2, 0x40000006 76 idtlb a2 77 iitlb a2 78 isync 79 80 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code 81 * and jump to the new mapping. 82 */ 83 #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) 84 #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) 85 86 srli a3, a0, 27 87 slli a3, a3, 27 88 addi a3, a3, CA_BYPASS 89 addi a7, a2, -1 90 wdtlb a3, a7 91 witlb a3, a7 92 isync 93 94 slli a4, a0, 5 95 srli a4, a4, 5 96 addi a5, a2, -6 97 add a4, a4, a5 98 jx a4 99 100 /* Step 3: unmap everything other than current area. 101 * Start at 0x60000000, wrap around, and end with 0x20000000 102 */ 103 2: movi a4, 0x20000000 104 add a5, a2, a4 105 3: idtlb a5 106 iitlb a5 107 add a5, a5, a4 108 bne a5, a2, 3b 109 110 /* Step 4: Setup MMU with the old V2 mappings. */ 111 movi a6, 0x01000000 112 wsr a6, ITLBCFG 113 wsr a6, DTLBCFG 114 isync 115 116 movi a5, 0xd0000005 117 movi a4, CA_WRITEBACK 118 wdtlb a4, a5 119 witlb a4, a5 120 121 movi a5, 0xd8000005 122 movi a4, CA_BYPASS 123 wdtlb a4, a5 124 witlb a4, a5 125 126 movi a5, 0xe0000006 127 movi a4, 0xf0000000 + CA_WRITEBACK 128 wdtlb a4, a5 129 witlb a4, a5 130 131 movi a5, 0xf0000006 132 movi a4, 0xf0000000 + CA_BYPASS 133 wdtlb a4, a5 134 witlb a4, a5 135 136 isync 137 138 /* Jump to self, using MMU v2 mappings. */ 139 movi a4, 1f 140 jx a4 141 142 1: 143 movi a2, VECBASE_RESET_VADDR 144 wsr a2, vecbase 145 146 /* Step 5: remove temporary mapping. */ 147 idtlb a7 148 iitlb a7 149 isync 150 151 movi a0, 0 152 wsr a0, ptevaddr 153 rsync 154 155 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && 156 XCHAL_HAVE_SPANNING_WAY */ 157 158 .endm 159 160 #endif /*__ASSEMBLY__*/ 161 162 #endif /* _XTENSA_INITIALIZE_MMU_H */ 163