1 #ifndef __TOOLS_LINUX_SPARC64_BARRIER_H 2 #define __TOOLS_LINUX_SPARC64_BARRIER_H 3 4 /* Copied from the kernel sources to tools/: 5 * 6 * These are here in an effort to more fully work around Spitfire Errata 7 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 8 * branch, the chip can stop executing instructions until a trap occurs. 9 * Therefore, if interrupts are disabled, the chip can hang forever. 10 * 11 * It used to be believed that the memory barrier had to be right in the 12 * delay slot, but a case has been traced recently wherein the memory barrier 13 * was one instruction after the branch delay slot and the chip still hung. 14 * The offending sequence was the following in sym_wakeup_done() of the 15 * sym53c8xx_2 driver: 16 * 17 * call sym_ccb_from_dsa, 0 18 * movge %icc, 0, %l0 19 * brz,pn %o0, .LL1303 20 * mov %o0, %l2 21 * membar #LoadLoad 22 * 23 * The branch has to be mispredicted for the bug to occur. Therefore, we put 24 * the memory barrier explicitly into a "branch always, predicted taken" 25 * delay slot to avoid the problem case. 26 */ 27 #define membar_safe(type) \ 28 do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ 29 " membar " type "\n" \ 30 "1:\n" \ 31 : : : "memory"); \ 32 } while (0) 33 34 /* The kernel always executes in TSO memory model these days, 35 * and furthermore most sparc64 chips implement more stringent 36 * memory ordering than required by the specifications. 37 */ 38 #define mb() membar_safe("#StoreLoad") 39 #define rmb() __asm__ __volatile__("":::"memory") 40 #define wmb() __asm__ __volatile__("":::"memory") 41 42 #endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */ 43