1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7 #ifndef __ASM_ARM_KVM_PMU_H 8 #define __ASM_ARM_KVM_PMU_H 9 10 #include <linux/perf_event.h> 11 #include <asm/perf_event.h> 12 13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 14 #define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) 15 16 #ifdef CONFIG_KVM_ARM_PMU 17 18 struct kvm_pmc { 19 u8 idx; /* index into the pmu->pmc array */ 20 struct perf_event *perf_event; 21 }; 22 23 struct kvm_pmu { 24 int irq_num; 25 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 26 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); 27 bool ready; 28 bool created; 29 bool irq_level; 30 }; 31 32 #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) 33 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 34 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 35 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 36 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 37 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 38 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 39 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 40 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 41 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 42 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 43 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 44 void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 45 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 46 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 47 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 48 u64 select_idx); 49 bool kvm_arm_support_pmu_v3(void); 50 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 51 struct kvm_device_attr *attr); 52 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 53 struct kvm_device_attr *attr); 54 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 55 struct kvm_device_attr *attr); 56 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 57 #else 58 struct kvm_pmu { 59 }; 60 61 #define kvm_arm_pmu_v3_ready(v) (false) 62 #define kvm_arm_pmu_irq_initialized(v) (false) 63 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 64 u64 select_idx) 65 { 66 return 0; 67 } 68 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 69 u64 select_idx, u64 val) {} 70 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) 71 { 72 return 0; 73 } 74 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 75 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 76 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 77 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 78 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 79 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 80 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 81 { 82 return false; 83 } 84 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 85 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 86 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 87 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 88 u64 data, u64 select_idx) {} 89 static inline bool kvm_arm_support_pmu_v3(void) { return false; } 90 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 91 struct kvm_device_attr *attr) 92 { 93 return -ENXIO; 94 } 95 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 96 struct kvm_device_attr *attr) 97 { 98 return -ENXIO; 99 } 100 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 101 struct kvm_device_attr *attr) 102 { 103 return -ENXIO; 104 } 105 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 106 { 107 return 0; 108 } 109 #endif 110 111 #endif 112