1/* 2 * We need constants.h for: 3 * VMA_VM_MM 4 * VMA_VM_FLAGS 5 * VM_EXEC 6 */ 7#include <asm/asm-offsets.h> 8#include <asm/thread_info.h> 9 10/* 11 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) 12 */ 13 .macro vma_vm_mm, rd, rn 14 ldr \rd, [\rn, #VMA_VM_MM] 15 .endm 16 17/* 18 * vma_vm_flags - get vma->vm_flags 19 */ 20 .macro vma_vm_flags, rd, rn 21 ldr \rd, [\rn, #VMA_VM_FLAGS] 22 .endm 23 24 .macro tsk_mm, rd, rn 25 ldr \rd, [\rn, #TI_TASK] 26 ldr \rd, [\rd, #TSK_ACTIVE_MM] 27 .endm 28 29/* 30 * act_mm - get current->active_mm 31 */ 32 .macro act_mm, rd 33 bic \rd, sp, #8128 34 bic \rd, \rd, #63 35 ldr \rd, [\rd, #TI_TASK] 36 ldr \rd, [\rd, #TSK_ACTIVE_MM] 37 .endm 38 39/* 40 * mmid - get context id from mm pointer (mm->context.id) 41 */ 42 .macro mmid, rd, rn 43 ldr \rd, [\rn, #MM_CONTEXT_ID] 44 .endm 45 46/* 47 * mask_asid - mask the ASID from the context ID 48 */ 49 .macro asid, rd, rn 50 and \rd, \rn, #255 51 .endm 52 53 .macro crval, clear, mmuset, ucset 54#ifdef CONFIG_MMU 55 .word \clear 56 .word \mmuset 57#else 58 .word \clear 59 .word \ucset 60#endif 61 .endm 62 63/* 64 * cache_line_size - get the cache line size from the CSIDR register 65 * (available on ARMv7+). It assumes that the CSSR register was configured 66 * to access the L1 data cache CSIDR. 67 */ 68 .macro dcache_line_size, reg, tmp 69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR 70 and \tmp, \tmp, #7 @ cache line size encoding 71 mov \reg, #16 @ size offset 72 mov \reg, \reg, lsl \tmp @ actual cache line size 73 .endm 74 75 76/* 77 * Sanity check the PTE configuration for the code below - which makes 78 * certain assumptions about how these bits are layed out. 79 */ 80#ifdef CONFIG_MMU 81#if L_PTE_SHARED != PTE_EXT_SHARED 82#error PTE shared bit mismatch 83#endif 84#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ 85 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED 86#error Invalid Linux PTE bit settings 87#endif 88#endif /* CONFIG_MMU */ 89 90/* 91 * The ARMv6 and ARMv7 set_pte_ext translation function. 92 * 93 * Permission translation: 94 * YUWD APX AP1 AP0 SVC User 95 * 0xxx 0 0 0 no acc no acc 96 * 100x 1 0 1 r/o no acc 97 * 10x0 1 0 1 r/o no acc 98 * 1011 0 0 1 r/w no acc 99 * 110x 0 1 0 r/w r/o 100 * 11x0 0 1 0 r/w r/o 101 * 1111 0 1 1 r/w r/w 102 */ 103 .macro armv6_mt_table pfx 104\pfx\()_mt_table: 105 .long 0x00 @ L_PTE_MT_UNCACHED 106 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE 107 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 108 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 109 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 110 .long 0x00 @ unused 111 .long 0x00 @ L_PTE_MT_MINICACHE (not present) 112 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 113 .long 0x00 @ unused 114 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC 115 .long 0x00 @ unused 116 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 117 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 118 .long 0x00 @ unused 119 .long 0x00 @ unused 120 .long 0x00 @ unused 121 .endm 122 123 .macro armv6_set_pte_ext pfx 124 str r1, [r0], #-2048 @ linux version 125 126 bic r3, r1, #0x000003fc 127 bic r3, r3, #PTE_TYPE_MASK 128 orr r3, r3, r2 129 orr r3, r3, #PTE_EXT_AP0 | 2 130 131 adr ip, \pfx\()_mt_table 132 and r2, r1, #L_PTE_MT_MASK 133 ldr r2, [ip, r2] 134 135 tst r1, #L_PTE_WRITE 136 tstne r1, #L_PTE_DIRTY 137 orreq r3, r3, #PTE_EXT_APX 138 139 tst r1, #L_PTE_USER 140 orrne r3, r3, #PTE_EXT_AP1 141 tstne r3, #PTE_EXT_APX 142 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 143 144 tst r1, #L_PTE_EXEC 145 orreq r3, r3, #PTE_EXT_XN 146 147 orr r3, r3, r2 148 149 tst r1, #L_PTE_YOUNG 150 tstne r1, #L_PTE_PRESENT 151 moveq r3, #0 152 153 str r3, [r0] 154 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 155 .endm 156 157 158/* 159 * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function, 160 * covering most CPUs except Xscale and Xscale 3. 161 * 162 * Permission translation: 163 * YUWD AP SVC User 164 * 0xxx 0x00 no acc no acc 165 * 100x 0x00 r/o no acc 166 * 10x0 0x00 r/o no acc 167 * 1011 0x55 r/w no acc 168 * 110x 0xaa r/w r/o 169 * 11x0 0xaa r/w r/o 170 * 1111 0xff r/w r/w 171 */ 172 .macro armv3_set_pte_ext wc_disable=1 173 str r1, [r0], #-2048 @ linux version 174 175 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 176 177 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits 178 bic r2, r2, #PTE_TYPE_MASK 179 orr r2, r2, #PTE_TYPE_SMALL 180 181 tst r3, #L_PTE_USER @ user? 182 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 183 184 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? 185 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 186 187 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? 188 movne r2, #0 189 190 .if \wc_disable 191#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 192 tst r2, #PTE_CACHEABLE 193 bicne r2, r2, #PTE_BUFFERABLE 194#endif 195 .endif 196 str r2, [r0] @ hardware version 197 .endm 198 199 200/* 201 * Xscale set_pte_ext translation, split into two halves to cope 202 * with work-arounds. r3 must be preserved by code between these 203 * two macros. 204 * 205 * Permission translation: 206 * YUWD AP SVC User 207 * 0xxx 00 no acc no acc 208 * 100x 00 r/o no acc 209 * 10x0 00 r/o no acc 210 * 1011 01 r/w no acc 211 * 110x 10 r/w r/o 212 * 11x0 10 r/w r/o 213 * 1111 11 r/w r/w 214 */ 215 .macro xscale_set_pte_ext_prologue 216 str r1, [r0], #-2048 @ linux version 217 218 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 219 220 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits 221 orr r2, r2, #PTE_TYPE_EXT @ extended page 222 223 tst r3, #L_PTE_USER @ user? 224 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 225 226 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? 227 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 228 @ combined with user -> user r/w 229 .endm 230 231 .macro xscale_set_pte_ext_epilogue 232 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? 233 movne r2, #0 @ no -> fault 234 235 str r2, [r0] @ hardware version 236 mov ip, #0 237 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 238 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 239 .endm 240