1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) MIPS Technologies, Inc. 8 * written by Ralf Baechle <ralf@linux-mips.org> 9 */ 10 #ifndef _ASM_HAZARDS_H 11 #define _ASM_HAZARDS_H 12 13 #include <linux/stringify.h> 14 #include <asm/compiler.h> 15 16 #define ___ssnop \ 17 sll $0, $0, 1 18 19 #define ___ehb \ 20 sll $0, $0, 3 21 22 /* 23 * TLB hazards 24 */ 25 #if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \ 26 !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT) 27 28 /* 29 * MIPSR2 defines ehb for hazard avoidance 30 */ 31 32 #define __mtc0_tlbw_hazard \ 33 ___ehb 34 35 #define __mtc0_tlbr_hazard \ 36 ___ehb 37 38 #define __tlbw_use_hazard \ 39 ___ehb 40 41 #define __tlb_read_hazard \ 42 ___ehb 43 44 #define __tlb_probe_hazard \ 45 ___ehb 46 47 #define __irq_enable_hazard \ 48 ___ehb 49 50 #define __irq_disable_hazard \ 51 ___ehb 52 53 #define __back_to_back_c0_hazard \ 54 ___ehb 55 56 /* 57 * gcc has a tradition of misscompiling the previous construct using the 58 * address of a label as argument to inline assembler. Gas otoh has the 59 * annoying difference between la and dla which are only usable for 32-bit 60 * rsp. 64-bit code, so can't be used without conditional compilation. 61 * The alternative is switching the assembler to 64-bit code which happens 62 * to work right even for 32-bit code... 63 */ 64 #define instruction_hazard() \ 65 do { \ 66 unsigned long tmp; \ 67 \ 68 __asm__ __volatile__( \ 69 " .set "MIPS_ISA_LEVEL" \n" \ 70 " dla %0, 1f \n" \ 71 " jr.hb %0 \n" \ 72 " .set mips0 \n" \ 73 "1: \n" \ 74 : "=r" (tmp)); \ 75 } while (0) 76 77 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \ 78 defined(CONFIG_CPU_BMIPS) 79 80 /* 81 * These are slightly complicated by the fact that we guarantee R1 kernels to 82 * run fine on R2 processors. 83 */ 84 85 #define __mtc0_tlbw_hazard \ 86 ___ssnop; \ 87 ___ssnop; \ 88 ___ehb 89 90 #define __mtc0_tlbr_hazard \ 91 ___ssnop; \ 92 ___ssnop; \ 93 ___ehb 94 95 #define __tlbw_use_hazard \ 96 ___ssnop; \ 97 ___ssnop; \ 98 ___ssnop; \ 99 ___ehb 100 101 #define __tlb_read_hazard \ 102 ___ssnop; \ 103 ___ssnop; \ 104 ___ssnop; \ 105 ___ehb 106 107 #define __tlb_probe_hazard \ 108 ___ssnop; \ 109 ___ssnop; \ 110 ___ssnop; \ 111 ___ehb 112 113 #define __irq_enable_hazard \ 114 ___ssnop; \ 115 ___ssnop; \ 116 ___ssnop; \ 117 ___ehb 118 119 #define __irq_disable_hazard \ 120 ___ssnop; \ 121 ___ssnop; \ 122 ___ssnop; \ 123 ___ehb 124 125 #define __back_to_back_c0_hazard \ 126 ___ssnop; \ 127 ___ssnop; \ 128 ___ssnop; \ 129 ___ehb 130 131 /* 132 * gcc has a tradition of misscompiling the previous construct using the 133 * address of a label as argument to inline assembler. Gas otoh has the 134 * annoying difference between la and dla which are only usable for 32-bit 135 * rsp. 64-bit code, so can't be used without conditional compilation. 136 * The alternative is switching the assembler to 64-bit code which happens 137 * to work right even for 32-bit code... 138 */ 139 #define __instruction_hazard() \ 140 do { \ 141 unsigned long tmp; \ 142 \ 143 __asm__ __volatile__( \ 144 " .set mips64r2 \n" \ 145 " dla %0, 1f \n" \ 146 " jr.hb %0 \n" \ 147 " .set mips0 \n" \ 148 "1: \n" \ 149 : "=r" (tmp)); \ 150 } while (0) 151 152 #define instruction_hazard() \ 153 do { \ 154 if (cpu_has_mips_r2_r6) \ 155 __instruction_hazard(); \ 156 } while (0) 157 158 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ 159 defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \ 160 defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR) 161 162 /* 163 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 164 */ 165 166 #define __mtc0_tlbw_hazard 167 168 #define __mtc0_tlbr_hazard 169 170 #define __tlbw_use_hazard 171 172 #define __tlb_read_hazard 173 174 #define __tlb_probe_hazard 175 176 #define __irq_enable_hazard 177 178 #define __irq_disable_hazard 179 180 #define __back_to_back_c0_hazard 181 182 #define instruction_hazard() do { } while (0) 183 184 #elif defined(CONFIG_CPU_SB1) 185 186 /* 187 * Mostly like R4000 for historic reasons 188 */ 189 #define __mtc0_tlbw_hazard 190 191 #define __mtc0_tlbr_hazard 192 193 #define __tlbw_use_hazard 194 195 #define __tlb_read_hazard 196 197 #define __tlb_probe_hazard 198 199 #define __irq_enable_hazard 200 201 #define __irq_disable_hazard \ 202 ___ssnop; \ 203 ___ssnop; \ 204 ___ssnop 205 206 #define __back_to_back_c0_hazard 207 208 #define instruction_hazard() do { } while (0) 209 210 #else 211 212 /* 213 * Finally the catchall case for all other processors including R4000, R4400, 214 * R4600, R4700, R5000, RM7000, NEC VR41xx etc. 215 * 216 * The taken branch will result in a two cycle penalty for the two killed 217 * instructions on R4000 / R4400. Other processors only have a single cycle 218 * hazard so this is nice trick to have an optimal code for a range of 219 * processors. 220 */ 221 #define __mtc0_tlbw_hazard \ 222 nop; \ 223 nop 224 225 #define __mtc0_tlbr_hazard \ 226 nop; \ 227 nop 228 229 #define __tlbw_use_hazard \ 230 nop; \ 231 nop; \ 232 nop 233 234 #define __tlb_read_hazard \ 235 nop; \ 236 nop; \ 237 nop 238 239 #define __tlb_probe_hazard \ 240 nop; \ 241 nop; \ 242 nop 243 244 #define __irq_enable_hazard \ 245 ___ssnop; \ 246 ___ssnop; \ 247 ___ssnop 248 249 #define __irq_disable_hazard \ 250 nop; \ 251 nop; \ 252 nop 253 254 #define __back_to_back_c0_hazard \ 255 ___ssnop; \ 256 ___ssnop; \ 257 ___ssnop 258 259 #define instruction_hazard() do { } while (0) 260 261 #endif 262 263 264 /* FPU hazards */ 265 266 #if defined(CONFIG_CPU_SB1) 267 268 #define __enable_fpu_hazard \ 269 .set push; \ 270 .set mips64; \ 271 .set noreorder; \ 272 ___ssnop; \ 273 bnezl $0, .+4; \ 274 ___ssnop; \ 275 .set pop 276 277 #define __disable_fpu_hazard 278 279 #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 280 281 #define __enable_fpu_hazard \ 282 ___ehb 283 284 #define __disable_fpu_hazard \ 285 ___ehb 286 287 #else 288 289 #define __enable_fpu_hazard \ 290 nop; \ 291 nop; \ 292 nop; \ 293 nop 294 295 #define __disable_fpu_hazard \ 296 ___ehb 297 298 #endif 299 300 #ifdef __ASSEMBLY__ 301 302 #define _ssnop ___ssnop 303 #define _ehb ___ehb 304 #define mtc0_tlbw_hazard __mtc0_tlbw_hazard 305 #define mtc0_tlbr_hazard __mtc0_tlbr_hazard 306 #define tlbw_use_hazard __tlbw_use_hazard 307 #define tlb_read_hazard __tlb_read_hazard 308 #define tlb_probe_hazard __tlb_probe_hazard 309 #define irq_enable_hazard __irq_enable_hazard 310 #define irq_disable_hazard __irq_disable_hazard 311 #define back_to_back_c0_hazard __back_to_back_c0_hazard 312 #define enable_fpu_hazard __enable_fpu_hazard 313 #define disable_fpu_hazard __disable_fpu_hazard 314 315 #else 316 317 #define _ssnop() \ 318 do { \ 319 __asm__ __volatile__( \ 320 __stringify(___ssnop) \ 321 ); \ 322 } while (0) 323 324 #define _ehb() \ 325 do { \ 326 __asm__ __volatile__( \ 327 __stringify(___ehb) \ 328 ); \ 329 } while (0) 330 331 332 #define mtc0_tlbw_hazard() \ 333 do { \ 334 __asm__ __volatile__( \ 335 __stringify(__mtc0_tlbw_hazard) \ 336 ); \ 337 } while (0) 338 339 340 #define mtc0_tlbr_hazard() \ 341 do { \ 342 __asm__ __volatile__( \ 343 __stringify(__mtc0_tlbr_hazard) \ 344 ); \ 345 } while (0) 346 347 348 #define tlbw_use_hazard() \ 349 do { \ 350 __asm__ __volatile__( \ 351 __stringify(__tlbw_use_hazard) \ 352 ); \ 353 } while (0) 354 355 356 #define tlb_read_hazard() \ 357 do { \ 358 __asm__ __volatile__( \ 359 __stringify(__tlb_read_hazard) \ 360 ); \ 361 } while (0) 362 363 364 #define tlb_probe_hazard() \ 365 do { \ 366 __asm__ __volatile__( \ 367 __stringify(__tlb_probe_hazard) \ 368 ); \ 369 } while (0) 370 371 372 #define irq_enable_hazard() \ 373 do { \ 374 __asm__ __volatile__( \ 375 __stringify(__irq_enable_hazard) \ 376 ); \ 377 } while (0) 378 379 380 #define irq_disable_hazard() \ 381 do { \ 382 __asm__ __volatile__( \ 383 __stringify(__irq_disable_hazard) \ 384 ); \ 385 } while (0) 386 387 388 #define back_to_back_c0_hazard() \ 389 do { \ 390 __asm__ __volatile__( \ 391 __stringify(__back_to_back_c0_hazard) \ 392 ); \ 393 } while (0) 394 395 396 #define enable_fpu_hazard() \ 397 do { \ 398 __asm__ __volatile__( \ 399 __stringify(__enable_fpu_hazard) \ 400 ); \ 401 } while (0) 402 403 404 #define disable_fpu_hazard() \ 405 do { \ 406 __asm__ __volatile__( \ 407 __stringify(__disable_fpu_hazard) \ 408 ); \ 409 } while (0) 410 411 /* 412 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 413 */ 414 extern void mips_ihb(void); 415 416 #endif /* __ASSEMBLY__ */ 417 418 #endif /* _ASM_HAZARDS_H */ 419