1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Inline assembly cache operations. 7 * 8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org) 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) 11 */ 12 #ifndef _ASM_R4KCACHE_H 13 #define _ASM_R4KCACHE_H 14 15 #include <linux/stringify.h> 16 17 #include <asm/asm.h> 18 #include <asm/cacheops.h> 19 #include <asm/compiler.h> 20 #include <asm/cpu-features.h> 21 #include <asm/cpu-type.h> 22 #include <asm/mipsmtregs.h> 23 #include <linux/uaccess.h> /* for uaccess_kernel() */ 24 25 extern void (*r4k_blast_dcache)(void); 26 extern void (*r4k_blast_icache)(void); 27 28 /* 29 * This macro return a properly sign-extended address suitable as base address 30 * for indexed cache operations. Two issues here: 31 * 32 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive 33 * the index bits from the virtual address. This breaks with tradition 34 * set by the R4000. To keep unpleasant surprises from happening we pick 35 * an address in KSEG0 / CKSEG0. 36 * - We need a properly sign extended address for 64-bit code. To get away 37 * without ifdefs we let the compiler do it by a type cast. 38 */ 39 #define INDEX_BASE CKSEG0 40 41 #define cache_op(op,addr) \ 42 __asm__ __volatile__( \ 43 " .set push \n" \ 44 " .set noreorder \n" \ 45 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 46 " cache %0, %1 \n" \ 47 " .set pop \n" \ 48 : \ 49 : "i" (op), "R" (*(unsigned char *)(addr))) 50 51 static inline void flush_icache_line_indexed(unsigned long addr) 52 { 53 cache_op(Index_Invalidate_I, addr); 54 } 55 56 static inline void flush_dcache_line_indexed(unsigned long addr) 57 { 58 cache_op(Index_Writeback_Inv_D, addr); 59 } 60 61 static inline void flush_scache_line_indexed(unsigned long addr) 62 { 63 cache_op(Index_Writeback_Inv_SD, addr); 64 } 65 66 static inline void flush_icache_line(unsigned long addr) 67 { 68 switch (boot_cpu_type()) { 69 case CPU_LOONGSON2: 70 cache_op(Hit_Invalidate_I_Loongson2, addr); 71 break; 72 73 default: 74 cache_op(Hit_Invalidate_I, addr); 75 break; 76 } 77 } 78 79 static inline void flush_dcache_line(unsigned long addr) 80 { 81 cache_op(Hit_Writeback_Inv_D, addr); 82 } 83 84 static inline void invalidate_dcache_line(unsigned long addr) 85 { 86 cache_op(Hit_Invalidate_D, addr); 87 } 88 89 static inline void invalidate_scache_line(unsigned long addr) 90 { 91 cache_op(Hit_Invalidate_SD, addr); 92 } 93 94 static inline void flush_scache_line(unsigned long addr) 95 { 96 cache_op(Hit_Writeback_Inv_SD, addr); 97 } 98 99 #define protected_cache_op(op,addr) \ 100 ({ \ 101 int __err = 0; \ 102 __asm__ __volatile__( \ 103 " .set push \n" \ 104 " .set noreorder \n" \ 105 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 106 "1: cache %1, (%2) \n" \ 107 "2: .insn \n" \ 108 " .set pop \n" \ 109 " .section .fixup,\"ax\" \n" \ 110 "3: li %0, %3 \n" \ 111 " j 2b \n" \ 112 " .previous \n" \ 113 " .section __ex_table,\"a\" \n" \ 114 " "STR(PTR)" 1b, 3b \n" \ 115 " .previous" \ 116 : "+r" (__err) \ 117 : "i" (op), "r" (addr), "i" (-EFAULT)); \ 118 __err; \ 119 }) 120 121 122 #define protected_cachee_op(op,addr) \ 123 ({ \ 124 int __err = 0; \ 125 __asm__ __volatile__( \ 126 " .set push \n" \ 127 " .set noreorder \n" \ 128 " .set mips0 \n" \ 129 " .set eva \n" \ 130 "1: cachee %1, (%2) \n" \ 131 "2: .insn \n" \ 132 " .set pop \n" \ 133 " .section .fixup,\"ax\" \n" \ 134 "3: li %0, %3 \n" \ 135 " j 2b \n" \ 136 " .previous \n" \ 137 " .section __ex_table,\"a\" \n" \ 138 " "STR(PTR)" 1b, 3b \n" \ 139 " .previous" \ 140 : "+r" (__err) \ 141 : "i" (op), "r" (addr), "i" (-EFAULT)); \ 142 __err; \ 143 }) 144 145 /* 146 * The next two are for badland addresses like signal trampolines. 147 */ 148 static inline int protected_flush_icache_line(unsigned long addr) 149 { 150 switch (boot_cpu_type()) { 151 case CPU_LOONGSON2: 152 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr); 153 154 default: 155 #ifdef CONFIG_EVA 156 return protected_cachee_op(Hit_Invalidate_I, addr); 157 #else 158 return protected_cache_op(Hit_Invalidate_I, addr); 159 #endif 160 } 161 } 162 163 /* 164 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D 165 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style 166 * caches. We're talking about one cacheline unnecessarily getting invalidated 167 * here so the penalty isn't overly hard. 168 */ 169 static inline int protected_writeback_dcache_line(unsigned long addr) 170 { 171 #ifdef CONFIG_EVA 172 return protected_cachee_op(Hit_Writeback_Inv_D, addr); 173 #else 174 return protected_cache_op(Hit_Writeback_Inv_D, addr); 175 #endif 176 } 177 178 static inline int protected_writeback_scache_line(unsigned long addr) 179 { 180 #ifdef CONFIG_EVA 181 return protected_cachee_op(Hit_Writeback_Inv_SD, addr); 182 #else 183 return protected_cache_op(Hit_Writeback_Inv_SD, addr); 184 #endif 185 } 186 187 /* 188 * This one is RM7000-specific 189 */ 190 static inline void invalidate_tcache_page(unsigned long addr) 191 { 192 cache_op(Page_Invalidate_T, addr); 193 } 194 195 #ifndef CONFIG_CPU_MIPSR6 196 #define cache16_unroll32(base,op) \ 197 __asm__ __volatile__( \ 198 " .set push \n" \ 199 " .set noreorder \n" \ 200 " .set mips3 \n" \ 201 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ 202 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ 203 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ 204 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \ 205 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \ 206 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \ 207 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \ 208 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \ 209 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \ 210 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \ 211 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \ 212 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \ 213 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \ 214 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ 215 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ 216 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ 217 " .set pop \n" \ 218 : \ 219 : "r" (base), \ 220 "i" (op)); 221 222 #define cache32_unroll32(base,op) \ 223 __asm__ __volatile__( \ 224 " .set push \n" \ 225 " .set noreorder \n" \ 226 " .set mips3 \n" \ 227 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ 228 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ 229 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ 230 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \ 231 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \ 232 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \ 233 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \ 234 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \ 235 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \ 236 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \ 237 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \ 238 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \ 239 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \ 240 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ 241 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ 242 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ 243 " .set pop \n" \ 244 : \ 245 : "r" (base), \ 246 "i" (op)); 247 248 #define cache64_unroll32(base,op) \ 249 __asm__ __volatile__( \ 250 " .set push \n" \ 251 " .set noreorder \n" \ 252 " .set mips3 \n" \ 253 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ 254 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ 255 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ 256 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \ 257 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \ 258 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \ 259 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \ 260 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \ 261 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \ 262 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \ 263 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \ 264 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \ 265 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \ 266 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ 267 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ 268 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ 269 " .set pop \n" \ 270 : \ 271 : "r" (base), \ 272 "i" (op)); 273 274 #define cache128_unroll32(base,op) \ 275 __asm__ __volatile__( \ 276 " .set push \n" \ 277 " .set noreorder \n" \ 278 " .set mips3 \n" \ 279 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ 280 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ 281 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ 282 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \ 283 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \ 284 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \ 285 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \ 286 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \ 287 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \ 288 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \ 289 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \ 290 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \ 291 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \ 292 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ 293 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ 294 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ 295 " .set pop \n" \ 296 : \ 297 : "r" (base), \ 298 "i" (op)); 299 300 #else 301 /* 302 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. 303 * This means we now need to increment the base register before we flush 304 * more cache lines 305 */ 306 #define cache16_unroll32(base,op) \ 307 __asm__ __volatile__( \ 308 " .set push\n" \ 309 " .set noreorder\n" \ 310 " .set mips64r6\n" \ 311 " .set noat\n" \ 312 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ 313 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ 314 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ 315 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ 316 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ 317 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ 318 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ 319 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ 320 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ 321 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ 322 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ 323 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ 324 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ 325 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ 326 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ 327 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ 328 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ 329 " .set pop\n" \ 330 : \ 331 : "r" (base), \ 332 "i" (op)); 333 334 #define cache32_unroll32(base,op) \ 335 __asm__ __volatile__( \ 336 " .set push\n" \ 337 " .set noreorder\n" \ 338 " .set mips64r6\n" \ 339 " .set noat\n" \ 340 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ 341 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ 342 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ 343 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ 344 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ 345 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 346 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 347 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 348 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 349 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 350 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 351 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 352 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 353 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 354 " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \ 355 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 356 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 357 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 358 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 359 " .set pop\n" \ 360 : \ 361 : "r" (base), \ 362 "i" (op)); 363 364 #define cache64_unroll32(base,op) \ 365 __asm__ __volatile__( \ 366 " .set push\n" \ 367 " .set noreorder\n" \ 368 " .set mips64r6\n" \ 369 " .set noat\n" \ 370 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ 371 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ 372 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ 373 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 374 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 375 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 376 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 377 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 378 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 379 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 380 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 381 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 382 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 383 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 384 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 385 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 386 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 387 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 388 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 389 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 390 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 391 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 392 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 393 " .set pop\n" \ 394 : \ 395 : "r" (base), \ 396 "i" (op)); 397 398 #define cache128_unroll32(base,op) \ 399 __asm__ __volatile__( \ 400 " .set push\n" \ 401 " .set noreorder\n" \ 402 " .set mips64r6\n" \ 403 " .set noat\n" \ 404 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 405 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \ 406 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 407 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 408 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 409 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 410 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 411 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 412 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 413 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 414 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 415 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 416 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 417 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 418 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 419 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 420 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 421 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 422 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 423 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 424 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 425 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 426 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 427 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 428 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 429 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 430 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 431 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 432 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 433 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 434 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 435 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \ 436 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \ 437 " .set pop\n" \ 438 : \ 439 : "r" (base), \ 440 "i" (op)); 441 #endif /* CONFIG_CPU_MIPSR6 */ 442 443 /* 444 * Perform the cache operation specified by op using a user mode virtual 445 * address while in kernel mode. 446 */ 447 #define cache16_unroll32_user(base,op) \ 448 __asm__ __volatile__( \ 449 " .set push \n" \ 450 " .set noreorder \n" \ 451 " .set mips0 \n" \ 452 " .set eva \n" \ 453 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \ 454 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \ 455 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \ 456 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \ 457 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \ 458 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \ 459 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \ 460 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \ 461 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \ 462 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \ 463 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \ 464 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \ 465 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \ 466 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \ 467 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \ 468 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \ 469 " .set pop \n" \ 470 : \ 471 : "r" (base), \ 472 "i" (op)); 473 474 #define cache32_unroll32_user(base, op) \ 475 __asm__ __volatile__( \ 476 " .set push \n" \ 477 " .set noreorder \n" \ 478 " .set mips0 \n" \ 479 " .set eva \n" \ 480 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \ 481 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \ 482 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \ 483 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \ 484 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \ 485 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \ 486 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \ 487 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \ 488 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \ 489 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \ 490 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \ 491 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \ 492 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \ 493 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \ 494 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \ 495 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \ 496 " .set pop \n" \ 497 : \ 498 : "r" (base), \ 499 "i" (op)); 500 501 #define cache64_unroll32_user(base, op) \ 502 __asm__ __volatile__( \ 503 " .set push \n" \ 504 " .set noreorder \n" \ 505 " .set mips0 \n" \ 506 " .set eva \n" \ 507 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \ 508 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \ 509 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \ 510 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \ 511 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \ 512 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \ 513 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \ 514 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \ 515 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \ 516 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \ 517 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \ 518 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \ 519 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \ 520 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \ 521 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \ 522 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \ 523 " .set pop \n" \ 524 : \ 525 : "r" (base), \ 526 "i" (op)); 527 528 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ 529 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ 530 static inline void extra##blast_##pfx##cache##lsize(void) \ 531 { \ 532 unsigned long start = INDEX_BASE; \ 533 unsigned long end = start + current_cpu_data.desc.waysize; \ 534 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ 535 unsigned long ws_end = current_cpu_data.desc.ways << \ 536 current_cpu_data.desc.waybit; \ 537 unsigned long ws, addr; \ 538 \ 539 for (ws = 0; ws < ws_end; ws += ws_inc) \ 540 for (addr = start; addr < end; addr += lsize * 32) \ 541 cache##lsize##_unroll32(addr|ws, indexop); \ 542 } \ 543 \ 544 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ 545 { \ 546 unsigned long start = page; \ 547 unsigned long end = page + PAGE_SIZE; \ 548 \ 549 do { \ 550 cache##lsize##_unroll32(start, hitop); \ 551 start += lsize * 32; \ 552 } while (start < end); \ 553 } \ 554 \ 555 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 556 { \ 557 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ 558 unsigned long start = INDEX_BASE + (page & indexmask); \ 559 unsigned long end = start + PAGE_SIZE; \ 560 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ 561 unsigned long ws_end = current_cpu_data.desc.ways << \ 562 current_cpu_data.desc.waybit; \ 563 unsigned long ws, addr; \ 564 \ 565 for (ws = 0; ws < ws_end; ws += ws_inc) \ 566 for (addr = start; addr < end; addr += lsize * 32) \ 567 cache##lsize##_unroll32(addr|ws, indexop); \ 568 } 569 570 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) 571 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) 572 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) 573 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) 574 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) 575 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) 576 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) 577 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) 578 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) 579 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) 580 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, ) 581 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, ) 582 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) 583 584 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) 585 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) 586 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) 587 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) 588 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) 589 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) 590 591 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \ 592 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ 593 { \ 594 unsigned long start = page; \ 595 unsigned long end = page + PAGE_SIZE; \ 596 \ 597 do { \ 598 cache##lsize##_unroll32_user(start, hitop); \ 599 start += lsize * 32; \ 600 } while (start < end); \ 601 } 602 603 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 604 16) 605 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) 606 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 607 32) 608 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) 609 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 610 64) 611 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) 612 613 /* build blast_xxx_range, protected_blast_xxx_range */ 614 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ 615 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ 616 unsigned long end) \ 617 { \ 618 unsigned long lsize = cpu_##desc##_line_size(); \ 619 unsigned long addr = start & ~(lsize - 1); \ 620 unsigned long aend = (end - 1) & ~(lsize - 1); \ 621 \ 622 while (1) { \ 623 prot##cache_op(hitop, addr); \ 624 if (addr == aend) \ 625 break; \ 626 addr += lsize; \ 627 } \ 628 } 629 630 #ifndef CONFIG_EVA 631 632 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) 633 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) 634 635 #else 636 637 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \ 638 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ 639 unsigned long end) \ 640 { \ 641 unsigned long lsize = cpu_##desc##_line_size(); \ 642 unsigned long addr = start & ~(lsize - 1); \ 643 unsigned long aend = (end - 1) & ~(lsize - 1); \ 644 \ 645 if (!uaccess_kernel()) { \ 646 while (1) { \ 647 protected_cachee_op(hitop, addr); \ 648 if (addr == aend) \ 649 break; \ 650 addr += lsize; \ 651 } \ 652 } else { \ 653 while (1) { \ 654 protected_cache_op(hitop, addr); \ 655 if (addr == aend) \ 656 break; \ 657 addr += lsize; \ 658 } \ 659 \ 660 } \ 661 } 662 663 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D) 664 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I) 665 666 #endif 667 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) 668 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ 669 protected_, loongson2_) 670 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) 671 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , ) 672 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) 673 /* blast_inv_dcache_range */ 674 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) 675 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) 676 677 #endif /* _ASM_R4KCACHE_H */ 678