xref: /openbmc/linux/arch/x86/include/asm/idtentry.h (revision 3db55767)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_IDTENTRY_H
3 #define _ASM_X86_IDTENTRY_H
4 
5 /* Interrupts/Exceptions */
6 #include <asm/trapnr.h>
7 
8 #define IDT_ALIGN	(8 * (1 + HAS_KERNEL_IBT))
9 
10 #ifndef __ASSEMBLY__
11 #include <linux/entry-common.h>
12 #include <linux/hardirq.h>
13 
14 #include <asm/irq_stack.h>
15 
16 /**
17  * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
18  *		      No error code pushed by hardware
19  * @vector:	Vector number (ignored for C)
20  * @func:	Function name of the entry point
21  *
22  * Declares three functions:
23  * - The ASM entry point: asm_##func
24  * - The XEN PV trap entry point: xen_##func (maybe unused)
25  * - The C handler called from the ASM entry point
26  *
27  * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
28  * declares the entry points for usage in C code. There is an ASM variant
29  * as well which is used to emit the entry stubs in entry_32/64.S.
30  */
31 #define DECLARE_IDTENTRY(vector, func)					\
32 	asmlinkage void asm_##func(void);				\
33 	asmlinkage void xen_asm_##func(void);				\
34 	__visible void func(struct pt_regs *regs)
35 
36 /**
37  * DEFINE_IDTENTRY - Emit code for simple IDT entry points
38  * @func:	Function name of the entry point
39  *
40  * @func is called from ASM entry code with interrupts disabled.
41  *
42  * The macro is written so it acts as function definition. Append the
43  * body with a pair of curly brackets.
44  *
45  * irqentry_enter() contains common code which has to be invoked before
46  * arbitrary code in the body. irqentry_exit() contains common code
47  * which has to run before returning to the low level assembly code.
48  */
49 #define DEFINE_IDTENTRY(func)						\
50 static __always_inline void __##func(struct pt_regs *regs);		\
51 									\
52 __visible noinstr void func(struct pt_regs *regs)			\
53 {									\
54 	irqentry_state_t state = irqentry_enter(regs);			\
55 									\
56 	instrumentation_begin();					\
57 	__##func (regs);						\
58 	instrumentation_end();						\
59 	irqentry_exit(regs, state);					\
60 }									\
61 									\
62 static __always_inline void __##func(struct pt_regs *regs)
63 
64 /* Special case for 32bit IRET 'trap' */
65 #define DECLARE_IDTENTRY_SW	DECLARE_IDTENTRY
66 #define DEFINE_IDTENTRY_SW	DEFINE_IDTENTRY
67 
68 /**
69  * DECLARE_IDTENTRY_ERRORCODE - Declare functions for simple IDT entry points
70  *				Error code pushed by hardware
71  * @vector:	Vector number (ignored for C)
72  * @func:	Function name of the entry point
73  *
74  * Declares three functions:
75  * - The ASM entry point: asm_##func
76  * - The XEN PV trap entry point: xen_##func (maybe unused)
77  * - The C handler called from the ASM entry point
78  *
79  * Same as DECLARE_IDTENTRY, but has an extra error_code argument for the
80  * C-handler.
81  */
82 #define DECLARE_IDTENTRY_ERRORCODE(vector, func)			\
83 	asmlinkage void asm_##func(void);				\
84 	asmlinkage void xen_asm_##func(void);				\
85 	__visible void func(struct pt_regs *regs, unsigned long error_code)
86 
87 /**
88  * DEFINE_IDTENTRY_ERRORCODE - Emit code for simple IDT entry points
89  *			       Error code pushed by hardware
90  * @func:	Function name of the entry point
91  *
92  * Same as DEFINE_IDTENTRY, but has an extra error_code argument
93  */
94 #define DEFINE_IDTENTRY_ERRORCODE(func)					\
95 static __always_inline void __##func(struct pt_regs *regs,		\
96 				     unsigned long error_code);		\
97 									\
98 __visible noinstr void func(struct pt_regs *regs,			\
99 			    unsigned long error_code)			\
100 {									\
101 	irqentry_state_t state = irqentry_enter(regs);			\
102 									\
103 	instrumentation_begin();					\
104 	__##func (regs, error_code);					\
105 	instrumentation_end();						\
106 	irqentry_exit(regs, state);					\
107 }									\
108 									\
109 static __always_inline void __##func(struct pt_regs *regs,		\
110 				     unsigned long error_code)
111 
112 /**
113  * DECLARE_IDTENTRY_RAW - Declare functions for raw IDT entry points
114  *		      No error code pushed by hardware
115  * @vector:	Vector number (ignored for C)
116  * @func:	Function name of the entry point
117  *
118  * Maps to DECLARE_IDTENTRY().
119  */
120 #define DECLARE_IDTENTRY_RAW(vector, func)				\
121 	DECLARE_IDTENTRY(vector, func)
122 
123 /**
124  * DEFINE_IDTENTRY_RAW - Emit code for raw IDT entry points
125  * @func:	Function name of the entry point
126  *
127  * @func is called from ASM entry code with interrupts disabled.
128  *
129  * The macro is written so it acts as function definition. Append the
130  * body with a pair of curly brackets.
131  *
132  * Contrary to DEFINE_IDTENTRY() this does not invoke the
133  * idtentry_enter/exit() helpers before and after the body invocation. This
134  * needs to be done in the body itself if applicable. Use if extra work
135  * is required before the enter/exit() helpers are invoked.
136  */
137 #define DEFINE_IDTENTRY_RAW(func)					\
138 __visible noinstr void func(struct pt_regs *regs)
139 
140 /**
141  * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
142  *				    Error code pushed by hardware
143  * @vector:	Vector number (ignored for C)
144  * @func:	Function name of the entry point
145  *
146  * Maps to DECLARE_IDTENTRY_ERRORCODE()
147  */
148 #define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func)			\
149 	DECLARE_IDTENTRY_ERRORCODE(vector, func)
150 
151 /**
152  * DEFINE_IDTENTRY_RAW_ERRORCODE - Emit code for raw IDT entry points
153  * @func:	Function name of the entry point
154  *
155  * @func is called from ASM entry code with interrupts disabled.
156  *
157  * The macro is written so it acts as function definition. Append the
158  * body with a pair of curly brackets.
159  *
160  * Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the
161  * irqentry_enter/exit() helpers before and after the body invocation. This
162  * needs to be done in the body itself if applicable. Use if extra work
163  * is required before the enter/exit() helpers are invoked.
164  */
165 #define DEFINE_IDTENTRY_RAW_ERRORCODE(func)				\
166 __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
167 
168 /**
169  * DECLARE_IDTENTRY_IRQ - Declare functions for device interrupt IDT entry
170  *			  points (common/spurious)
171  * @vector:	Vector number (ignored for C)
172  * @func:	Function name of the entry point
173  *
174  * Maps to DECLARE_IDTENTRY_ERRORCODE()
175  */
176 #define DECLARE_IDTENTRY_IRQ(vector, func)				\
177 	DECLARE_IDTENTRY_ERRORCODE(vector, func)
178 
179 /**
180  * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
181  * @func:	Function name of the entry point
182  *
183  * The vector number is pushed by the low level entry stub and handed
184  * to the function as error_code argument which needs to be truncated
185  * to an u8 because the push is sign extending.
186  *
187  * irq_enter/exit_rcu() are invoked before the function body and the
188  * KVM L1D flush request is set. Stack switching to the interrupt stack
189  * has to be done in the function body if necessary.
190  */
191 #define DEFINE_IDTENTRY_IRQ(func)					\
192 static void __##func(struct pt_regs *regs, u32 vector);			\
193 									\
194 __visible noinstr void func(struct pt_regs *regs,			\
195 			    unsigned long error_code)			\
196 {									\
197 	irqentry_state_t state = irqentry_enter(regs);			\
198 	u32 vector = (u32)(u8)error_code;				\
199 									\
200 	instrumentation_begin();					\
201 	kvm_set_cpu_l1tf_flush_l1d();					\
202 	run_irq_on_irqstack_cond(__##func, regs, vector);		\
203 	instrumentation_end();						\
204 	irqentry_exit(regs, state);					\
205 }									\
206 									\
207 static noinline void __##func(struct pt_regs *regs, u32 vector)
208 
209 /**
210  * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
211  * @vector:	Vector number (ignored for C)
212  * @func:	Function name of the entry point
213  *
214  * Declares three functions:
215  * - The ASM entry point: asm_##func
216  * - The XEN PV trap entry point: xen_##func (maybe unused)
217  * - The C handler called from the ASM entry point
218  *
219  * Maps to DECLARE_IDTENTRY().
220  */
221 #define DECLARE_IDTENTRY_SYSVEC(vector, func)				\
222 	DECLARE_IDTENTRY(vector, func)
223 
224 /**
225  * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
226  * @func:	Function name of the entry point
227  *
228  * irqentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
229  * function body. KVM L1D flush request is set.
230  *
231  * Runs the function on the interrupt stack if the entry hit kernel mode
232  */
233 #define DEFINE_IDTENTRY_SYSVEC(func)					\
234 static void __##func(struct pt_regs *regs);				\
235 									\
236 __visible noinstr void func(struct pt_regs *regs)			\
237 {									\
238 	irqentry_state_t state = irqentry_enter(regs);			\
239 									\
240 	instrumentation_begin();					\
241 	kvm_set_cpu_l1tf_flush_l1d();					\
242 	run_sysvec_on_irqstack_cond(__##func, regs);			\
243 	instrumentation_end();						\
244 	irqentry_exit(regs, state);					\
245 }									\
246 									\
247 static noinline void __##func(struct pt_regs *regs)
248 
249 /**
250  * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT
251  *				   entry points
252  * @func:	Function name of the entry point
253  *
254  * Runs the function on the interrupted stack. No switch to IRQ stack and
255  * only the minimal __irq_enter/exit() handling.
256  *
257  * Only use for 'empty' vectors like reschedule IPI and KVM posted
258  * interrupt vectors.
259  */
260 #define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func)				\
261 static __always_inline void __##func(struct pt_regs *regs);		\
262 									\
263 __visible noinstr void func(struct pt_regs *regs)			\
264 {									\
265 	irqentry_state_t state = irqentry_enter(regs);			\
266 									\
267 	instrumentation_begin();					\
268 	__irq_enter_raw();						\
269 	kvm_set_cpu_l1tf_flush_l1d();					\
270 	__##func (regs);						\
271 	__irq_exit_raw();						\
272 	instrumentation_end();						\
273 	irqentry_exit(regs, state);					\
274 }									\
275 									\
276 static __always_inline void __##func(struct pt_regs *regs)
277 
278 /**
279  * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point
280  * @vector:	Vector number (ignored for C)
281  * @func:	Function name of the entry point
282  *
283  * Declares three functions:
284  * - The ASM entry point: asm_##func
285  * - The XEN PV trap entry point: xen_##func (maybe unused)
286  * - The C handler called from the ASM entry point
287  *
288  * Maps to DECLARE_IDTENTRY(). Distinct entry point to handle the 32/64-bit
289  * difference
290  */
291 #define DECLARE_IDTENTRY_XENCB(vector, func)				\
292 	DECLARE_IDTENTRY(vector, func)
293 
294 #ifdef CONFIG_X86_64
295 /**
296  * DECLARE_IDTENTRY_IST - Declare functions for IST handling IDT entry points
297  * @vector:	Vector number (ignored for C)
298  * @func:	Function name of the entry point
299  *
300  * Maps to DECLARE_IDTENTRY_RAW, but declares also the NOIST C handler
301  * which is called from the ASM entry point on user mode entry
302  */
303 #define DECLARE_IDTENTRY_IST(vector, func)				\
304 	DECLARE_IDTENTRY_RAW(vector, func);				\
305 	__visible void noist_##func(struct pt_regs *regs)
306 
307 /**
308  * DECLARE_IDTENTRY_VC - Declare functions for the VC entry point
309  * @vector:	Vector number (ignored for C)
310  * @func:	Function name of the entry point
311  *
312  * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE, but declares also the
313  * safe_stack C handler.
314  */
315 #define DECLARE_IDTENTRY_VC(vector, func)				\
316 	DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func);			\
317 	__visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code);	\
318 	__visible noinstr void   user_##func(struct pt_regs *regs, unsigned long error_code)
319 
320 /**
321  * DEFINE_IDTENTRY_IST - Emit code for IST entry points
322  * @func:	Function name of the entry point
323  *
324  * Maps to DEFINE_IDTENTRY_RAW
325  */
326 #define DEFINE_IDTENTRY_IST(func)					\
327 	DEFINE_IDTENTRY_RAW(func)
328 
329 /**
330  * DEFINE_IDTENTRY_NOIST - Emit code for NOIST entry points which
331  *			   belong to a IST entry point (MCE, DB)
332  * @func:	Function name of the entry point. Must be the same as
333  *		the function name of the corresponding IST variant
334  *
335  * Maps to DEFINE_IDTENTRY_RAW().
336  */
337 #define DEFINE_IDTENTRY_NOIST(func)					\
338 	DEFINE_IDTENTRY_RAW(noist_##func)
339 
340 /**
341  * DECLARE_IDTENTRY_DF - Declare functions for double fault
342  * @vector:	Vector number (ignored for C)
343  * @func:	Function name of the entry point
344  *
345  * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE
346  */
347 #define DECLARE_IDTENTRY_DF(vector, func)				\
348 	DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func)
349 
350 /**
351  * DEFINE_IDTENTRY_DF - Emit code for double fault
352  * @func:	Function name of the entry point
353  *
354  * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
355  */
356 #define DEFINE_IDTENTRY_DF(func)					\
357 	DEFINE_IDTENTRY_RAW_ERRORCODE(func)
358 
359 /**
360  * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
361 			       when raised from kernel mode
362  * @func:	Function name of the entry point
363  *
364  * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
365  */
366 #define DEFINE_IDTENTRY_VC_KERNEL(func)				\
367 	DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
368 
369 /**
370  * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
371 			     when raised from user mode
372  * @func:	Function name of the entry point
373  *
374  * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
375  */
376 #define DEFINE_IDTENTRY_VC_USER(func)				\
377 	DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
378 
379 #else	/* CONFIG_X86_64 */
380 
381 /**
382  * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
383  * @vector:	Vector number (ignored for C)
384  * @func:	Function name of the entry point
385  *
386  * Declares two functions:
387  * - The ASM entry point: asm_##func
388  * - The C handler called from the C shim
389  */
390 #define DECLARE_IDTENTRY_DF(vector, func)				\
391 	asmlinkage void asm_##func(void);				\
392 	__visible void func(struct pt_regs *regs,			\
393 			    unsigned long error_code,			\
394 			    unsigned long address)
395 
396 /**
397  * DEFINE_IDTENTRY_DF - Emit code for double fault on 32bit
398  * @func:	Function name of the entry point
399  *
400  * This is called through the doublefault shim which already provides
401  * cr2 in the address argument.
402  */
403 #define DEFINE_IDTENTRY_DF(func)					\
404 __visible noinstr void func(struct pt_regs *regs,			\
405 			    unsigned long error_code,			\
406 			    unsigned long address)
407 
408 #endif	/* !CONFIG_X86_64 */
409 
410 /* C-Code mapping */
411 #define DECLARE_IDTENTRY_NMI		DECLARE_IDTENTRY_RAW
412 #define DEFINE_IDTENTRY_NMI		DEFINE_IDTENTRY_RAW
413 
414 #ifdef CONFIG_X86_64
415 #define DECLARE_IDTENTRY_MCE		DECLARE_IDTENTRY_IST
416 #define DEFINE_IDTENTRY_MCE		DEFINE_IDTENTRY_IST
417 #define DEFINE_IDTENTRY_MCE_USER	DEFINE_IDTENTRY_NOIST
418 
419 #define DECLARE_IDTENTRY_DEBUG		DECLARE_IDTENTRY_IST
420 #define DEFINE_IDTENTRY_DEBUG		DEFINE_IDTENTRY_IST
421 #define DEFINE_IDTENTRY_DEBUG_USER	DEFINE_IDTENTRY_NOIST
422 #endif
423 
424 #else /* !__ASSEMBLY__ */
425 
426 /*
427  * The ASM variants for DECLARE_IDTENTRY*() which emit the ASM entry stubs.
428  */
429 #define DECLARE_IDTENTRY(vector, func)					\
430 	idtentry vector asm_##func func has_error_code=0
431 
432 #define DECLARE_IDTENTRY_ERRORCODE(vector, func)			\
433 	idtentry vector asm_##func func has_error_code=1
434 
435 /* Special case for 32bit IRET 'trap'. Do not emit ASM code */
436 #define DECLARE_IDTENTRY_SW(vector, func)
437 
438 #define DECLARE_IDTENTRY_RAW(vector, func)				\
439 	DECLARE_IDTENTRY(vector, func)
440 
441 #define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func)			\
442 	DECLARE_IDTENTRY_ERRORCODE(vector, func)
443 
444 /* Entries for common/spurious (device) interrupts */
445 #define DECLARE_IDTENTRY_IRQ(vector, func)				\
446 	idtentry_irq vector func
447 
448 /* System vector entries */
449 #define DECLARE_IDTENTRY_SYSVEC(vector, func)				\
450 	idtentry_sysvec vector func
451 
452 #ifdef CONFIG_X86_64
453 # define DECLARE_IDTENTRY_MCE(vector, func)				\
454 	idtentry_mce_db vector asm_##func func
455 
456 # define DECLARE_IDTENTRY_DEBUG(vector, func)				\
457 	idtentry_mce_db vector asm_##func func
458 
459 # define DECLARE_IDTENTRY_DF(vector, func)				\
460 	idtentry_df vector asm_##func func
461 
462 # define DECLARE_IDTENTRY_XENCB(vector, func)				\
463 	DECLARE_IDTENTRY(vector, func)
464 
465 # define DECLARE_IDTENTRY_VC(vector, func)				\
466 	idtentry_vc vector asm_##func func
467 
468 #else
469 # define DECLARE_IDTENTRY_MCE(vector, func)				\
470 	DECLARE_IDTENTRY(vector, func)
471 
472 /* No ASM emitted for DF as this goes through a C shim */
473 # define DECLARE_IDTENTRY_DF(vector, func)
474 
475 /* No ASM emitted for XEN hypervisor callback */
476 # define DECLARE_IDTENTRY_XENCB(vector, func)
477 
478 #endif
479 
480 /* No ASM code emitted for NMI */
481 #define DECLARE_IDTENTRY_NMI(vector, func)
482 
483 /*
484  * ASM code to emit the common vector entry stubs where each stub is
485  * packed into IDT_ALIGN bytes.
486  *
487  * Note, that the 'pushq imm8' is emitted via '.byte 0x6a, vector' because
488  * GCC treats the local vector variable as unsigned int and would expand
489  * all vectors above 0x7F to a 5 byte push. The original code did an
490  * adjustment of the vector number to be in the signed byte range to avoid
491  * this. While clever it's mindboggling counterintuitive and requires the
492  * odd conversion back to a real vector number in the C entry points. Using
493  * .byte achieves the same thing and the only fixup needed in the C entry
494  * point is to mask off the bits above bit 7 because the push is sign
495  * extending.
496  */
497 	.align IDT_ALIGN
498 SYM_CODE_START(irq_entries_start)
499     vector=FIRST_EXTERNAL_VECTOR
500     .rept NR_EXTERNAL_VECTORS
501 	UNWIND_HINT_IRET_REGS
502 0 :
503 	ENDBR
504 	.byte	0x6a, vector
505 	jmp	asm_common_interrupt
506 	/* Ensure that the above is IDT_ALIGN bytes max */
507 	.fill 0b + IDT_ALIGN - ., 1, 0xcc
508 	vector = vector+1
509     .endr
510 SYM_CODE_END(irq_entries_start)
511 
512 #ifdef CONFIG_X86_LOCAL_APIC
513 	.align IDT_ALIGN
514 SYM_CODE_START(spurious_entries_start)
515     vector=FIRST_SYSTEM_VECTOR
516     .rept NR_SYSTEM_VECTORS
517 	UNWIND_HINT_IRET_REGS
518 0 :
519 	ENDBR
520 	.byte	0x6a, vector
521 	jmp	asm_spurious_interrupt
522 	/* Ensure that the above is IDT_ALIGN bytes max */
523 	.fill 0b + IDT_ALIGN - ., 1, 0xcc
524 	vector = vector+1
525     .endr
526 SYM_CODE_END(spurious_entries_start)
527 #endif
528 
529 #endif /* __ASSEMBLY__ */
530 
531 /*
532  * The actual entry points. Note that DECLARE_IDTENTRY*() serves two
533  * purposes:
534  *  - provide the function declarations when included from C-Code
535  *  - emit the ASM stubs when included from entry_32/64.S
536  *
537  * This avoids duplicate defines and ensures that everything is consistent.
538  */
539 
540 /*
541  * Dummy trap number so the low level ASM macro vector number checks do not
542  * match which results in emitting plain IDTENTRY stubs without bells and
543  * whistles.
544  */
545 #define X86_TRAP_OTHER		0xFFFF
546 
547 /* Simple exception entry points. No hardware error code */
548 DECLARE_IDTENTRY(X86_TRAP_DE,		exc_divide_error);
549 DECLARE_IDTENTRY(X86_TRAP_OF,		exc_overflow);
550 DECLARE_IDTENTRY(X86_TRAP_BR,		exc_bounds);
551 DECLARE_IDTENTRY(X86_TRAP_NM,		exc_device_not_available);
552 DECLARE_IDTENTRY(X86_TRAP_OLD_MF,	exc_coproc_segment_overrun);
553 DECLARE_IDTENTRY(X86_TRAP_SPURIOUS,	exc_spurious_interrupt_bug);
554 DECLARE_IDTENTRY(X86_TRAP_MF,		exc_coprocessor_error);
555 DECLARE_IDTENTRY(X86_TRAP_XF,		exc_simd_coprocessor_error);
556 
557 /* 32bit software IRET trap. Do not emit ASM code */
558 DECLARE_IDTENTRY_SW(X86_TRAP_IRET,	iret_error);
559 
560 /* Simple exception entries with error code pushed by hardware */
561 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_TS,	exc_invalid_tss);
562 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_NP,	exc_segment_not_present);
563 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_SS,	exc_stack_segment);
564 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_GP,	exc_general_protection);
565 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_AC,	exc_alignment_check);
566 
567 /* Raw exception entries which need extra work */
568 DECLARE_IDTENTRY_RAW(X86_TRAP_UD,		exc_invalid_op);
569 DECLARE_IDTENTRY_RAW(X86_TRAP_BP,		exc_int3);
570 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,	exc_page_fault);
571 
572 #ifdef CONFIG_X86_MCE
573 #ifdef CONFIG_X86_64
574 DECLARE_IDTENTRY_MCE(X86_TRAP_MC,	exc_machine_check);
575 #else
576 DECLARE_IDTENTRY_RAW(X86_TRAP_MC,	exc_machine_check);
577 #endif
578 #ifdef CONFIG_XEN_PV
579 DECLARE_IDTENTRY_RAW(X86_TRAP_MC,	xenpv_exc_machine_check);
580 #endif
581 #endif
582 
583 /* NMI */
584 
585 #if IS_ENABLED(CONFIG_KVM_INTEL)
586 /*
587  * Special entry point for VMX which invokes this on the kernel stack, even for
588  * 64-bit, i.e. without using an IST.  asm_exc_nmi() requires an IST to work
589  * correctly vs. the NMI 'executing' marker.  Used for 32-bit kernels as well
590  * to avoid more ifdeffery.
591  */
592 DECLARE_IDTENTRY(X86_TRAP_NMI,		exc_nmi_kvm_vmx);
593 #endif
594 
595 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,	exc_nmi);
596 #ifdef CONFIG_XEN_PV
597 DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,	xenpv_exc_nmi);
598 #endif
599 
600 /* #DB */
601 #ifdef CONFIG_X86_64
602 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB,	exc_debug);
603 #else
604 DECLARE_IDTENTRY_RAW(X86_TRAP_DB,	exc_debug);
605 #endif
606 #ifdef CONFIG_XEN_PV
607 DECLARE_IDTENTRY_RAW(X86_TRAP_DB,	xenpv_exc_debug);
608 #endif
609 
610 /* #DF */
611 DECLARE_IDTENTRY_DF(X86_TRAP_DF,	exc_double_fault);
612 #ifdef CONFIG_XEN_PV
613 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_DF,	xenpv_exc_double_fault);
614 #endif
615 
616 /* #CP */
617 #ifdef CONFIG_X86_KERNEL_IBT
618 DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_CP,	exc_control_protection);
619 #endif
620 
621 /* #VC */
622 #ifdef CONFIG_AMD_MEM_ENCRYPT
623 DECLARE_IDTENTRY_VC(X86_TRAP_VC,	exc_vmm_communication);
624 #endif
625 
626 #ifdef CONFIG_XEN_PV
627 DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER,	exc_xen_hypervisor_callback);
628 DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER,	exc_xen_unknown_trap);
629 #endif
630 
631 #ifdef CONFIG_INTEL_TDX_GUEST
632 DECLARE_IDTENTRY(X86_TRAP_VE,		exc_virtualization_exception);
633 #endif
634 
635 /* Device interrupts common/spurious */
636 DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER,	common_interrupt);
637 #ifdef CONFIG_X86_LOCAL_APIC
638 DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER,	spurious_interrupt);
639 #endif
640 
641 /* System vector entry points */
642 #ifdef CONFIG_X86_LOCAL_APIC
643 DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR,		sysvec_error_interrupt);
644 DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR,		sysvec_spurious_apic_interrupt);
645 DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR,		sysvec_apic_timer_interrupt);
646 DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR,	sysvec_x86_platform_ipi);
647 #endif
648 
649 #ifdef CONFIG_SMP
650 DECLARE_IDTENTRY(RESCHEDULE_VECTOR,			sysvec_reschedule_ipi);
651 DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR,	sysvec_irq_move_cleanup);
652 DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR,			sysvec_reboot);
653 DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR,	sysvec_call_function_single);
654 DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR,		sysvec_call_function);
655 #endif
656 
657 #ifdef CONFIG_X86_LOCAL_APIC
658 # ifdef CONFIG_X86_MCE_THRESHOLD
659 DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR,		sysvec_threshold);
660 # endif
661 
662 # ifdef CONFIG_X86_MCE_AMD
663 DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR,		sysvec_deferred_error);
664 # endif
665 
666 # ifdef CONFIG_X86_THERMAL_VECTOR
667 DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR,		sysvec_thermal);
668 # endif
669 
670 # ifdef CONFIG_IRQ_WORK
671 DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR,		sysvec_irq_work);
672 # endif
673 #endif
674 
675 #ifdef CONFIG_HAVE_KVM
676 DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR,		sysvec_kvm_posted_intr_ipi);
677 DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR,	sysvec_kvm_posted_intr_wakeup_ipi);
678 DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR,	sysvec_kvm_posted_intr_nested_ipi);
679 #endif
680 
681 #if IS_ENABLED(CONFIG_HYPERV)
682 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_hyperv_callback);
683 DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR,	sysvec_hyperv_reenlightenment);
684 DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR,	sysvec_hyperv_stimer0);
685 #endif
686 
687 #if IS_ENABLED(CONFIG_ACRN_GUEST)
688 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_acrn_hv_callback);
689 #endif
690 
691 #ifdef CONFIG_XEN_PVHVM
692 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
693 #endif
694 
695 #ifdef CONFIG_KVM_GUEST
696 DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_kvm_asyncpf_interrupt);
697 #endif
698 
699 #undef X86_TRAP_OTHER
700 
701 #endif
702