1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KERNEL_VTIME_H 3 #define _LINUX_KERNEL_VTIME_H 4 5 #include <linux/context_tracking_state.h> 6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 7 #include <asm/vtime.h> 8 #endif 9 10 11 struct task_struct; 12 13 /* 14 * vtime_accounting_enabled_this_cpu() definitions/declarations 15 */ 16 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) 17 18 static inline bool vtime_accounting_enabled_this_cpu(void) { return true; } 19 extern void vtime_task_switch(struct task_struct *prev); 20 21 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) 22 23 /* 24 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 25 * in that case and compute the tickless cputime. 26 * For now vtime state is tied to context tracking. We might want to decouple 27 * those later if necessary. 28 */ 29 static inline bool vtime_accounting_enabled(void) 30 { 31 return context_tracking_enabled(); 32 } 33 34 static inline bool vtime_accounting_enabled_cpu(int cpu) 35 { 36 return context_tracking_enabled_cpu(cpu); 37 } 38 39 static inline bool vtime_accounting_enabled_this_cpu(void) 40 { 41 return context_tracking_enabled_this_cpu(); 42 } 43 44 extern void vtime_task_switch_generic(struct task_struct *prev); 45 46 static inline void vtime_task_switch(struct task_struct *prev) 47 { 48 if (vtime_accounting_enabled_this_cpu()) 49 vtime_task_switch_generic(prev); 50 } 51 52 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 53 54 static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; } 55 static inline bool vtime_accounting_enabled_this_cpu(void) { return false; } 56 static inline void vtime_task_switch(struct task_struct *prev) { } 57 58 #endif 59 60 /* 61 * Common vtime APIs 62 */ 63 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 64 extern void vtime_account_kernel(struct task_struct *tsk); 65 extern void vtime_account_idle(struct task_struct *tsk); 66 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 67 static inline void vtime_account_kernel(struct task_struct *tsk) { } 68 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 69 70 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 71 extern void arch_vtime_task_switch(struct task_struct *tsk); 72 extern void vtime_user_enter(struct task_struct *tsk); 73 extern void vtime_user_exit(struct task_struct *tsk); 74 extern void vtime_guest_enter(struct task_struct *tsk); 75 extern void vtime_guest_exit(struct task_struct *tsk); 76 extern void vtime_init_idle(struct task_struct *tsk, int cpu); 77 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 78 static inline void vtime_user_enter(struct task_struct *tsk) { } 79 static inline void vtime_user_exit(struct task_struct *tsk) { } 80 static inline void vtime_guest_enter(struct task_struct *tsk) { } 81 static inline void vtime_guest_exit(struct task_struct *tsk) { } 82 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 83 #endif 84 85 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 86 extern void vtime_account_irq_enter(struct task_struct *tsk); 87 static inline void vtime_account_irq_exit(struct task_struct *tsk) 88 { 89 /* On hard|softirq exit we always account to hard|softirq cputime */ 90 vtime_account_kernel(tsk); 91 } 92 extern void vtime_flush(struct task_struct *tsk); 93 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 94 static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 95 static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 96 static inline void vtime_flush(struct task_struct *tsk) { } 97 #endif 98 99 100 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 101 extern void irqtime_account_irq(struct task_struct *tsk); 102 #else 103 static inline void irqtime_account_irq(struct task_struct *tsk) { } 104 #endif 105 106 static inline void account_irq_enter_time(struct task_struct *tsk) 107 { 108 vtime_account_irq_enter(tsk); 109 irqtime_account_irq(tsk); 110 } 111 112 static inline void account_irq_exit_time(struct task_struct *tsk) 113 { 114 vtime_account_irq_exit(tsk); 115 irqtime_account_irq(tsk); 116 } 117 118 #endif /* _LINUX_KERNEL_VTIME_H */ 119