xref: /openbmc/linux/arch/arm64/kvm/pmu.c (revision 86d6a628a281a17b8341ece99997c1251bb41a41)
1eb41238cSAndrew Murray // SPDX-License-Identifier: GPL-2.0
2eb41238cSAndrew Murray /*
3eb41238cSAndrew Murray  * Copyright 2019 Arm Limited
4eb41238cSAndrew Murray  * Author: Andrew Murray <Andrew.Murray@arm.com>
5eb41238cSAndrew Murray  */
6eb41238cSAndrew Murray #include <linux/kvm_host.h>
7eb41238cSAndrew Murray #include <linux/perf_event.h>
884d751a0SFuad Tabba 
984d751a0SFuad Tabba static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
10eb41238cSAndrew Murray 
11eb41238cSAndrew Murray /*
12435e53fbSAndrew Murray  * Given the perf event attributes and system type, determine
13435e53fbSAndrew Murray  * if we are going to need to switch counters at guest entry/exit.
14eb41238cSAndrew Murray  */
kvm_pmu_switch_needed(struct perf_event_attr * attr)15eb41238cSAndrew Murray static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
16eb41238cSAndrew Murray {
17435e53fbSAndrew Murray 	/**
18435e53fbSAndrew Murray 	 * With VHE the guest kernel runs at EL1 and the host at EL2,
19435e53fbSAndrew Murray 	 * where user (EL0) is excluded then we have no reason to switch
20435e53fbSAndrew Murray 	 * counters.
21435e53fbSAndrew Murray 	 */
22435e53fbSAndrew Murray 	if (has_vhe() && attr->exclude_user)
23435e53fbSAndrew Murray 		return false;
24435e53fbSAndrew Murray 
25eb41238cSAndrew Murray 	/* Only switch if attributes are different */
26eb41238cSAndrew Murray 	return (attr->exclude_host != attr->exclude_guest);
27eb41238cSAndrew Murray }
28eb41238cSAndrew Murray 
kvm_get_pmu_events(void)2984d751a0SFuad Tabba struct kvm_pmu_events *kvm_get_pmu_events(void)
303cb8a091SFuad Tabba {
3184d751a0SFuad Tabba 	return this_cpu_ptr(&kvm_pmu_events);
323cb8a091SFuad Tabba }
333cb8a091SFuad Tabba 
34eb41238cSAndrew Murray /*
35eb41238cSAndrew Murray  * Add events to track that we may want to switch at guest entry/exit
36eb41238cSAndrew Murray  * time.
37eb41238cSAndrew Murray  */
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)38eb41238cSAndrew Murray void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
39eb41238cSAndrew Murray {
403cb8a091SFuad Tabba 	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
41eb41238cSAndrew Murray 
42*60197a46SAnshuman Khandual 	if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
43eb41238cSAndrew Murray 		return;
44eb41238cSAndrew Murray 
45eb41238cSAndrew Murray 	if (!attr->exclude_host)
463cb8a091SFuad Tabba 		pmu->events_host |= set;
47eb41238cSAndrew Murray 	if (!attr->exclude_guest)
483cb8a091SFuad Tabba 		pmu->events_guest |= set;
49eb41238cSAndrew Murray }
50eb41238cSAndrew Murray 
51eb41238cSAndrew Murray /*
52eb41238cSAndrew Murray  * Stop tracking events
53eb41238cSAndrew Murray  */
kvm_clr_pmu_events(u32 clr)54eb41238cSAndrew Murray void kvm_clr_pmu_events(u32 clr)
55eb41238cSAndrew Murray {
563cb8a091SFuad Tabba 	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
57eb41238cSAndrew Murray 
58*60197a46SAnshuman Khandual 	if (!kvm_arm_support_pmu_v3())
5930c95391SDavid Brazdil 		return;
6030c95391SDavid Brazdil 
613cb8a091SFuad Tabba 	pmu->events_host &= ~clr;
623cb8a091SFuad Tabba 	pmu->events_guest &= ~clr;
63eb41238cSAndrew Murray }
643d91befbSAndrew Murray 
6539e3406aSAndrew Murray #define PMEVTYPER_READ_CASE(idx)				\
6639e3406aSAndrew Murray 	case idx:						\
6739e3406aSAndrew Murray 		return read_sysreg(pmevtyper##idx##_el0)
6839e3406aSAndrew Murray 
6939e3406aSAndrew Murray #define PMEVTYPER_WRITE_CASE(idx)				\
7039e3406aSAndrew Murray 	case idx:						\
7139e3406aSAndrew Murray 		write_sysreg(val, pmevtyper##idx##_el0);	\
7239e3406aSAndrew Murray 		break
7339e3406aSAndrew Murray 
7439e3406aSAndrew Murray #define PMEVTYPER_CASES(readwrite)				\
7539e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(0);			\
7639e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(1);			\
7739e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(2);			\
7839e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(3);			\
7939e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(4);			\
8039e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(5);			\
8139e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(6);			\
8239e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(7);			\
8339e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(8);			\
8439e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(9);			\
8539e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(10);			\
8639e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(11);			\
8739e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(12);			\
8839e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(13);			\
8939e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(14);			\
9039e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(15);			\
9139e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(16);			\
9239e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(17);			\
9339e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(18);			\
9439e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(19);			\
9539e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(20);			\
9639e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(21);			\
9739e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(22);			\
9839e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(23);			\
9939e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(24);			\
10039e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(25);			\
10139e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(26);			\
10239e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(27);			\
10339e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(28);			\
10439e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(29);			\
10539e3406aSAndrew Murray 	PMEVTYPER_##readwrite##_CASE(30)
10639e3406aSAndrew Murray 
10739e3406aSAndrew Murray /*
10821137301SAndrew Murray  * Read a value direct from PMEVTYPER<idx> where idx is 0-30
10921137301SAndrew Murray  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
11039e3406aSAndrew Murray  */
kvm_vcpu_pmu_read_evtype_direct(int idx)11139e3406aSAndrew Murray static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
11239e3406aSAndrew Murray {
11339e3406aSAndrew Murray 	switch (idx) {
11439e3406aSAndrew Murray 	PMEVTYPER_CASES(READ);
11521137301SAndrew Murray 	case ARMV8_PMU_CYCLE_IDX:
11621137301SAndrew Murray 		return read_sysreg(pmccfiltr_el0);
11739e3406aSAndrew Murray 	default:
11839e3406aSAndrew Murray 		WARN_ON(1);
11939e3406aSAndrew Murray 	}
12039e3406aSAndrew Murray 
12139e3406aSAndrew Murray 	return 0;
12239e3406aSAndrew Murray }
12339e3406aSAndrew Murray 
12439e3406aSAndrew Murray /*
12521137301SAndrew Murray  * Write a value direct to PMEVTYPER<idx> where idx is 0-30
12621137301SAndrew Murray  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
12739e3406aSAndrew Murray  */
kvm_vcpu_pmu_write_evtype_direct(int idx,u32 val)12839e3406aSAndrew Murray static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
12939e3406aSAndrew Murray {
13039e3406aSAndrew Murray 	switch (idx) {
13139e3406aSAndrew Murray 	PMEVTYPER_CASES(WRITE);
13221137301SAndrew Murray 	case ARMV8_PMU_CYCLE_IDX:
13321137301SAndrew Murray 		write_sysreg(val, pmccfiltr_el0);
13421137301SAndrew Murray 		break;
13539e3406aSAndrew Murray 	default:
13639e3406aSAndrew Murray 		WARN_ON(1);
13739e3406aSAndrew Murray 	}
13839e3406aSAndrew Murray }
13939e3406aSAndrew Murray 
140435e53fbSAndrew Murray /*
141435e53fbSAndrew Murray  * Modify ARMv8 PMU events to include EL0 counting
142435e53fbSAndrew Murray  */
kvm_vcpu_pmu_enable_el0(unsigned long events)143435e53fbSAndrew Murray static void kvm_vcpu_pmu_enable_el0(unsigned long events)
144435e53fbSAndrew Murray {
145435e53fbSAndrew Murray 	u64 typer;
146435e53fbSAndrew Murray 	u32 counter;
147435e53fbSAndrew Murray 
148435e53fbSAndrew Murray 	for_each_set_bit(counter, &events, 32) {
14939e3406aSAndrew Murray 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
15039e3406aSAndrew Murray 		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
15139e3406aSAndrew Murray 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
152435e53fbSAndrew Murray 	}
153435e53fbSAndrew Murray }
154435e53fbSAndrew Murray 
155435e53fbSAndrew Murray /*
156435e53fbSAndrew Murray  * Modify ARMv8 PMU events to exclude EL0 counting
157435e53fbSAndrew Murray  */
kvm_vcpu_pmu_disable_el0(unsigned long events)158435e53fbSAndrew Murray static void kvm_vcpu_pmu_disable_el0(unsigned long events)
159435e53fbSAndrew Murray {
160435e53fbSAndrew Murray 	u64 typer;
161435e53fbSAndrew Murray 	u32 counter;
162435e53fbSAndrew Murray 
163435e53fbSAndrew Murray 	for_each_set_bit(counter, &events, 32) {
16439e3406aSAndrew Murray 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
16539e3406aSAndrew Murray 		typer |= ARMV8_PMU_EXCLUDE_EL0;
16639e3406aSAndrew Murray 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
167435e53fbSAndrew Murray 	}
168435e53fbSAndrew Murray }
169435e53fbSAndrew Murray 
170435e53fbSAndrew Murray /*
171146f76ccSMarc Zyngier  * On VHE ensure that only guest events have EL0 counting enabled.
172146f76ccSMarc Zyngier  * This is called from both vcpu_{load,put} and the sysreg handling.
173146f76ccSMarc Zyngier  * Since the latter is preemptible, special care must be taken to
174146f76ccSMarc Zyngier  * disable preemption.
175435e53fbSAndrew Murray  */
kvm_vcpu_pmu_restore_guest(struct kvm_vcpu * vcpu)176435e53fbSAndrew Murray void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
177435e53fbSAndrew Murray {
1783cb8a091SFuad Tabba 	struct kvm_pmu_events *pmu;
179435e53fbSAndrew Murray 	u32 events_guest, events_host;
180435e53fbSAndrew Murray 
1819589a38cSQuentin Perret 	if (!kvm_arm_support_pmu_v3() || !has_vhe())
182435e53fbSAndrew Murray 		return;
183435e53fbSAndrew Murray 
184146f76ccSMarc Zyngier 	preempt_disable();
1853cb8a091SFuad Tabba 	pmu = kvm_get_pmu_events();
1863cb8a091SFuad Tabba 	events_guest = pmu->events_guest;
1873cb8a091SFuad Tabba 	events_host = pmu->events_host;
188435e53fbSAndrew Murray 
189435e53fbSAndrew Murray 	kvm_vcpu_pmu_enable_el0(events_guest);
190435e53fbSAndrew Murray 	kvm_vcpu_pmu_disable_el0(events_host);
191146f76ccSMarc Zyngier 	preempt_enable();
192435e53fbSAndrew Murray }
193435e53fbSAndrew Murray 
194435e53fbSAndrew Murray /*
195435e53fbSAndrew Murray  * On VHE ensure that only host events have EL0 counting enabled
196435e53fbSAndrew Murray  */
kvm_vcpu_pmu_restore_host(struct kvm_vcpu * vcpu)197435e53fbSAndrew Murray void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
198435e53fbSAndrew Murray {
1993cb8a091SFuad Tabba 	struct kvm_pmu_events *pmu;
200435e53fbSAndrew Murray 	u32 events_guest, events_host;
201435e53fbSAndrew Murray 
2029589a38cSQuentin Perret 	if (!kvm_arm_support_pmu_v3() || !has_vhe())
203435e53fbSAndrew Murray 		return;
204435e53fbSAndrew Murray 
2053cb8a091SFuad Tabba 	pmu = kvm_get_pmu_events();
2063cb8a091SFuad Tabba 	events_guest = pmu->events_guest;
2073cb8a091SFuad Tabba 	events_host = pmu->events_host;
208435e53fbSAndrew Murray 
209435e53fbSAndrew Murray 	kvm_vcpu_pmu_enable_el0(events_host);
210435e53fbSAndrew Murray 	kvm_vcpu_pmu_disable_el0(events_guest);
211435e53fbSAndrew Murray }
2120c2f9acfSReiji Watanabe 
2130c2f9acfSReiji Watanabe /*
2140c2f9acfSReiji Watanabe  * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU
2150c2f9acfSReiji Watanabe  * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched
2160c2f9acfSReiji Watanabe  * to the value for the guest on vcpu_load().  The value for the host EL0
2170c2f9acfSReiji Watanabe  * will be restored on vcpu_put(), before returning to userspace.
2180c2f9acfSReiji Watanabe  * This isn't necessary for nVHE, as the register is context switched for
2190c2f9acfSReiji Watanabe  * every guest enter/exit.
2200c2f9acfSReiji Watanabe  *
2210c2f9acfSReiji Watanabe  * Return true if KVM takes care of the register. Otherwise return false.
2220c2f9acfSReiji Watanabe  */
kvm_set_pmuserenr(u64 val)2230c2f9acfSReiji Watanabe bool kvm_set_pmuserenr(u64 val)
2240c2f9acfSReiji Watanabe {
2250c2f9acfSReiji Watanabe 	struct kvm_cpu_context *hctxt;
2260c2f9acfSReiji Watanabe 	struct kvm_vcpu *vcpu;
2270c2f9acfSReiji Watanabe 
2280c2f9acfSReiji Watanabe 	if (!kvm_arm_support_pmu_v3() || !has_vhe())
2290c2f9acfSReiji Watanabe 		return false;
2300c2f9acfSReiji Watanabe 
2310c2f9acfSReiji Watanabe 	vcpu = kvm_get_running_vcpu();
2320c2f9acfSReiji Watanabe 	if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
2330c2f9acfSReiji Watanabe 		return false;
2340c2f9acfSReiji Watanabe 
2350c2f9acfSReiji Watanabe 	hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
2360c2f9acfSReiji Watanabe 	ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
2370c2f9acfSReiji Watanabe 	return true;
2380c2f9acfSReiji Watanabe }
239b1f778a2SMarc Zyngier 
240b1f778a2SMarc Zyngier /*
241b1f778a2SMarc Zyngier  * If we interrupted the guest to update the host PMU context, make
242b1f778a2SMarc Zyngier  * sure we re-apply the guest EL0 state.
243b1f778a2SMarc Zyngier  */
kvm_vcpu_pmu_resync_el0(void)244b1f778a2SMarc Zyngier void kvm_vcpu_pmu_resync_el0(void)
245b1f778a2SMarc Zyngier {
246b1f778a2SMarc Zyngier 	struct kvm_vcpu *vcpu;
247b1f778a2SMarc Zyngier 
248b1f778a2SMarc Zyngier 	if (!has_vhe() || !in_interrupt())
249b1f778a2SMarc Zyngier 		return;
250b1f778a2SMarc Zyngier 
251b1f778a2SMarc Zyngier 	vcpu = kvm_get_running_vcpu();
252b1f778a2SMarc Zyngier 	if (!vcpu)
253b1f778a2SMarc Zyngier 		return;
254b1f778a2SMarc Zyngier 
255b1f778a2SMarc Zyngier 	kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
256b1f778a2SMarc Zyngier }
257