xref: /openbmc/linux/arch/x86/include/asm/hardirq.h (revision 12eb4683)
1 #ifndef _ASM_X86_HARDIRQ_H
2 #define _ASM_X86_HARDIRQ_H
3 
4 #include <linux/threads.h>
5 #include <linux/irq.h>
6 
7 typedef struct {
8 	unsigned int __softirq_pending;
9 	unsigned int __nmi_count;	/* arch dependent */
10 #ifdef CONFIG_X86_LOCAL_APIC
11 	unsigned int apic_timer_irqs;	/* arch dependent */
12 	unsigned int irq_spurious_count;
13 	unsigned int icr_read_retry_count;
14 #endif
15 #ifdef CONFIG_HAVE_KVM
16 	unsigned int kvm_posted_intr_ipis;
17 #endif
18 	unsigned int x86_platform_ipis;	/* arch dependent */
19 	unsigned int apic_perf_irqs;
20 	unsigned int apic_irq_work_irqs;
21 #ifdef CONFIG_SMP
22 	unsigned int irq_resched_count;
23 	unsigned int irq_call_count;
24 	/*
25 	 * irq_tlb_count is double-counted in irq_call_count, so it must be
26 	 * subtracted from irq_call_count when displaying irq_call_count
27 	 */
28 	unsigned int irq_tlb_count;
29 #endif
30 #ifdef CONFIG_X86_THERMAL_VECTOR
31 	unsigned int irq_thermal_count;
32 #endif
33 #ifdef CONFIG_X86_MCE_THRESHOLD
34 	unsigned int irq_threshold_count;
35 #endif
36 } ____cacheline_aligned irq_cpustat_t;
37 
38 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
39 
40 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
41 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
42 
43 #define __ARCH_IRQ_STAT
44 
45 #define inc_irq_stat(member)	this_cpu_inc(irq_stat.member)
46 
47 #define local_softirq_pending()	this_cpu_read(irq_stat.__softirq_pending)
48 
49 #define __ARCH_SET_SOFTIRQ_PENDING
50 
51 #define set_softirq_pending(x)	\
52 		this_cpu_write(irq_stat.__softirq_pending, (x))
53 #define or_softirq_pending(x)	this_cpu_or(irq_stat.__softirq_pending, (x))
54 
55 extern void ack_bad_irq(unsigned int irq);
56 
57 extern u64 arch_irq_stat_cpu(unsigned int cpu);
58 #define arch_irq_stat_cpu	arch_irq_stat_cpu
59 
60 extern u64 arch_irq_stat(void);
61 #define arch_irq_stat		arch_irq_stat
62 
63 #endif /* _ASM_X86_HARDIRQ_H */
64