xref: /openbmc/linux/arch/x86/include/asm/hardirq.h (revision b04b4f78)
1 #ifndef _ASM_X86_HARDIRQ_H
2 #define _ASM_X86_HARDIRQ_H
3 
4 #include <linux/threads.h>
5 #include <linux/irq.h>
6 
7 typedef struct {
8 	unsigned int __softirq_pending;
9 	unsigned int __nmi_count;	/* arch dependent */
10 	unsigned int irq0_irqs;
11 #ifdef CONFIG_X86_LOCAL_APIC
12 	unsigned int apic_timer_irqs;	/* arch dependent */
13 	unsigned int irq_spurious_count;
14 #endif
15 	unsigned int generic_irqs;	/* arch dependent */
16 #ifdef CONFIG_SMP
17 	unsigned int irq_resched_count;
18 	unsigned int irq_call_count;
19 	unsigned int irq_tlb_count;
20 #endif
21 #ifdef CONFIG_X86_MCE
22 	unsigned int irq_thermal_count;
23 # ifdef CONFIG_X86_64
24 	unsigned int irq_threshold_count;
25 # endif
26 #endif
27 } ____cacheline_aligned irq_cpustat_t;
28 
29 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30 
31 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
33 
34 #define __ARCH_IRQ_STAT
35 
36 #define inc_irq_stat(member)	percpu_add(irq_stat.member, 1)
37 
38 #define local_softirq_pending()	percpu_read(irq_stat.__softirq_pending)
39 
40 #define __ARCH_SET_SOFTIRQ_PENDING
41 
42 #define set_softirq_pending(x)	percpu_write(irq_stat.__softirq_pending, (x))
43 #define or_softirq_pending(x)	percpu_or(irq_stat.__softirq_pending, (x))
44 
45 extern void ack_bad_irq(unsigned int irq);
46 
47 extern u64 arch_irq_stat_cpu(unsigned int cpu);
48 #define arch_irq_stat_cpu	arch_irq_stat_cpu
49 
50 extern u64 arch_irq_stat(void);
51 #define arch_irq_stat		arch_irq_stat
52 
53 #endif /* _ASM_X86_HARDIRQ_H */
54