1 /* 2 * Copyright (C) 2015 Linaro Ltd. 3 * Author: Shannon Zhao <shannon.zhao@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #ifndef __ASM_ARM_KVM_PMU_H 19 #define __ASM_ARM_KVM_PMU_H 20 21 #include <linux/perf_event.h> 22 #include <asm/perf_event.h> 23 24 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 25 26 #ifdef CONFIG_KVM_ARM_PMU 27 28 struct kvm_pmc { 29 u8 idx; /* index into the pmu->pmc array */ 30 struct perf_event *perf_event; 31 u64 bitmask; 32 }; 33 34 struct kvm_pmu { 35 int irq_num; 36 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 37 bool ready; 38 bool irq_level; 39 }; 40 41 #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) 42 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 43 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 44 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 45 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 46 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 47 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 48 void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); 49 void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); 50 void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val); 51 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 52 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 53 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 54 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 55 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 56 u64 select_idx); 57 bool kvm_arm_support_pmu_v3(void); 58 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 59 struct kvm_device_attr *attr); 60 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 61 struct kvm_device_attr *attr); 62 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 63 struct kvm_device_attr *attr); 64 #else 65 struct kvm_pmu { 66 }; 67 68 #define kvm_arm_pmu_v3_ready(v) (false) 69 #define kvm_arm_pmu_irq_initialized(v) (false) 70 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 71 u64 select_idx) 72 { 73 return 0; 74 } 75 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 76 u64 select_idx, u64 val) {} 77 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) 78 { 79 return 0; 80 } 81 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 82 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 83 static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} 84 static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} 85 static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {} 86 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 87 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 88 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 89 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 90 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 91 u64 data, u64 select_idx) {} 92 static inline bool kvm_arm_support_pmu_v3(void) { return false; } 93 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 94 struct kvm_device_attr *attr) 95 { 96 return -ENXIO; 97 } 98 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 99 struct kvm_device_attr *attr) 100 { 101 return -ENXIO; 102 } 103 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 104 struct kvm_device_attr *attr) 105 { 106 return -ENXIO; 107 } 108 #endif 109 110 #endif 111