1b8b572e1SStephen Rothwell /* 2b8b572e1SStephen Rothwell * IRQ flags handling 3b8b572e1SStephen Rothwell */ 4b8b572e1SStephen Rothwell #ifndef _ASM_IRQFLAGS_H 5b8b572e1SStephen Rothwell #define _ASM_IRQFLAGS_H 6b8b572e1SStephen Rothwell 7b8b572e1SStephen Rothwell #ifndef __ASSEMBLY__ 8b8b572e1SStephen Rothwell /* 9df9ee292SDavid Howells * Get definitions for arch_local_save_flags(x), etc. 10b8b572e1SStephen Rothwell */ 11b8b572e1SStephen Rothwell #include <asm/hw_irq.h> 12b8b572e1SStephen Rothwell 13b8b572e1SStephen Rothwell #else 14b8b572e1SStephen Rothwell #ifdef CONFIG_TRACE_IRQFLAGS 153cb5f1a3SSteven Rostedt #ifdef CONFIG_IRQSOFF_TRACER 163cb5f1a3SSteven Rostedt /* 173cb5f1a3SSteven Rostedt * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, 183cb5f1a3SSteven Rostedt * which is the stack frame here, we need to force a stack frame 193cb5f1a3SSteven Rostedt * in case we came from user space. 203cb5f1a3SSteven Rostedt */ 213cb5f1a3SSteven Rostedt #define TRACE_WITH_FRAME_BUFFER(func) \ 223cb5f1a3SSteven Rostedt mflr r0; \ 23d51959d7SAnton Blanchard stdu r1, -STACK_FRAME_OVERHEAD(r1); \ 243cb5f1a3SSteven Rostedt std r0, 16(r1); \ 25d51959d7SAnton Blanchard stdu r1, -STACK_FRAME_OVERHEAD(r1); \ 263cb5f1a3SSteven Rostedt bl func; \ 273cb5f1a3SSteven Rostedt ld r1, 0(r1); \ 283cb5f1a3SSteven Rostedt ld r1, 0(r1); 293cb5f1a3SSteven Rostedt #else 303cb5f1a3SSteven Rostedt #define TRACE_WITH_FRAME_BUFFER(func) \ 313cb5f1a3SSteven Rostedt bl func; 323cb5f1a3SSteven Rostedt #endif 333cb5f1a3SSteven Rostedt 34b8b572e1SStephen Rothwell /* 35144beb2fSMichael Ellerman * These are calls to C code, so the caller must be prepared for volatiles to 36144beb2fSMichael Ellerman * be clobbered. 37b8b572e1SStephen Rothwell */ 38b1576fecSAnton Blanchard #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) 39b1576fecSAnton Blanchard #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) 403cb5f1a3SSteven Rostedt 417230c564SBenjamin Herrenschmidt /* 42de021bb7STiejun Chen * This is used by assembly code to soft-disable interrupts first and 43de021bb7STiejun Chen * reconcile irq state. 44144beb2fSMichael Ellerman * 45144beb2fSMichael Ellerman * NB: This may call C code, so the caller must be prepared for volatiles to 46144beb2fSMichael Ellerman * be clobbered. 477230c564SBenjamin Herrenschmidt */ 48de021bb7STiejun Chen #define RECONCILE_IRQ_STATE(__rA, __rB) \ 497230c564SBenjamin Herrenschmidt lbz __rA,PACASOFTIRQEN(r13); \ 507230c564SBenjamin Herrenschmidt lbz __rB,PACAIRQHAPPENED(r13); \ 517230c564SBenjamin Herrenschmidt cmpwi cr0,__rA,0; \ 527230c564SBenjamin Herrenschmidt li __rA,0; \ 537230c564SBenjamin Herrenschmidt ori __rB,__rB,PACA_IRQ_HARD_DIS; \ 547230c564SBenjamin Herrenschmidt stb __rB,PACAIRQHAPPENED(r13); \ 557230c564SBenjamin Herrenschmidt beq 44f; \ 567230c564SBenjamin Herrenschmidt stb __rA,PACASOFTIRQEN(r13); \ 577230c564SBenjamin Herrenschmidt TRACE_DISABLE_INTS; \ 587230c564SBenjamin Herrenschmidt 44: 597230c564SBenjamin Herrenschmidt 60b8b572e1SStephen Rothwell #else 61b8b572e1SStephen Rothwell #define TRACE_ENABLE_INTS 62b8b572e1SStephen Rothwell #define TRACE_DISABLE_INTS 637230c564SBenjamin Herrenschmidt 64de021bb7STiejun Chen #define RECONCILE_IRQ_STATE(__rA, __rB) \ 657230c564SBenjamin Herrenschmidt lbz __rA,PACAIRQHAPPENED(r13); \ 667230c564SBenjamin Herrenschmidt li __rB,0; \ 677230c564SBenjamin Herrenschmidt ori __rA,__rA,PACA_IRQ_HARD_DIS; \ 687230c564SBenjamin Herrenschmidt stb __rB,PACASOFTIRQEN(r13); \ 697230c564SBenjamin Herrenschmidt stb __rA,PACAIRQHAPPENED(r13) 70b8b572e1SStephen Rothwell #endif 71b8b572e1SStephen Rothwell #endif 72b8b572e1SStephen Rothwell 73b8b572e1SStephen Rothwell #endif 74