1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #ifndef __ASM_ARM_KVM_PMU_H 8 #define __ASM_ARM_KVM_PMU_H 9 10 #include <linux/perf_event.h> 11 #include <asm/perf_event.h> 12 13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 14 #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) 15 16 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); 17 18 static __always_inline bool kvm_arm_support_pmu_v3(void) 19 { 20 return static_branch_likely(&kvm_arm_pmu_available); 21 } 22 23 #ifdef CONFIG_HW_PERF_EVENTS 24 25 struct kvm_pmc { 26 u8 idx; /* index into the pmu->pmc array */ 27 struct perf_event *perf_event; 28 }; 29 30 struct kvm_pmu { 31 int irq_num; 32 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 33 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); 34 bool created; 35 bool irq_level; 36 struct irq_work overflow_work; 37 }; 38 39 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 40 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 41 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 42 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 43 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 44 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 45 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 46 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 47 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 48 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 49 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 50 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 51 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 52 void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 53 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 54 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 55 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 56 u64 select_idx); 57 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 58 struct kvm_device_attr *attr); 59 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 60 struct kvm_device_attr *attr); 61 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 62 struct kvm_device_attr *attr); 63 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 64 #else 65 struct kvm_pmu { 66 }; 67 68 #define kvm_arm_pmu_irq_initialized(v) (false) 69 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 70 u64 select_idx) 71 { 72 return 0; 73 } 74 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 75 u64 select_idx, u64 val) {} 76 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) 77 { 78 return 0; 79 } 80 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 81 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 82 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 83 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 84 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 85 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 86 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 87 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 88 { 89 return false; 90 } 91 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 92 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 93 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 94 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 95 u64 data, u64 select_idx) {} 96 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 97 struct kvm_device_attr *attr) 98 { 99 return -ENXIO; 100 } 101 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 102 struct kvm_device_attr *attr) 103 { 104 return -ENXIO; 105 } 106 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 107 struct kvm_device_attr *attr) 108 { 109 return -ENXIO; 110 } 111 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 112 { 113 return 0; 114 } 115 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 116 { 117 return 0; 118 } 119 120 #endif 121 122 #endif 123