xref: /openbmc/linux/arch/x86/kvm/pmu.c (revision 103af0a9)
1f5132b01SGleb Natapov /*
2c7a7062fSGuo Chao  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
3f5132b01SGleb Natapov  *
4f5132b01SGleb Natapov  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5f5132b01SGleb Natapov  *
6f5132b01SGleb Natapov  * Authors:
7f5132b01SGleb Natapov  *   Avi Kivity   <avi@redhat.com>
8f5132b01SGleb Natapov  *   Gleb Natapov <gleb@redhat.com>
9f5132b01SGleb Natapov  *
10f5132b01SGleb Natapov  * This work is licensed under the terms of the GNU GPL, version 2.  See
11f5132b01SGleb Natapov  * the COPYING file in the top-level directory.
12f5132b01SGleb Natapov  *
13f5132b01SGleb Natapov  */
14f5132b01SGleb Natapov 
15f5132b01SGleb Natapov #include <linux/types.h>
16f5132b01SGleb Natapov #include <linux/kvm_host.h>
17f5132b01SGleb Natapov #include <linux/perf_event.h>
18f5132b01SGleb Natapov #include "x86.h"
19f5132b01SGleb Natapov #include "cpuid.h"
20f5132b01SGleb Natapov #include "lapic.h"
21f5132b01SGleb Natapov 
22f5132b01SGleb Natapov static struct kvm_arch_event_perf_mapping {
23f5132b01SGleb Natapov 	u8 eventsel;
24f5132b01SGleb Natapov 	u8 unit_mask;
25f5132b01SGleb Natapov 	unsigned event_type;
26f5132b01SGleb Natapov 	bool inexact;
27f5132b01SGleb Natapov } arch_events[] = {
28f5132b01SGleb Natapov 	/* Index must match CPUID 0x0A.EBX bit vector */
29f5132b01SGleb Natapov 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
30f5132b01SGleb Natapov 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
31f5132b01SGleb Natapov 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
32f5132b01SGleb Natapov 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
33f5132b01SGleb Natapov 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
34f5132b01SGleb Natapov 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
35f5132b01SGleb Natapov 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
3662079d8aSGleb Natapov 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
37f5132b01SGleb Natapov };
38f5132b01SGleb Natapov 
39f5132b01SGleb Natapov /* mapping between fixed pmc index and arch_events array */
4062079d8aSGleb Natapov int fixed_pmc_events[] = {1, 0, 7};
41f5132b01SGleb Natapov 
42f5132b01SGleb Natapov static bool pmc_is_gp(struct kvm_pmc *pmc)
43f5132b01SGleb Natapov {
44f5132b01SGleb Natapov 	return pmc->type == KVM_PMC_GP;
45f5132b01SGleb Natapov }
46f5132b01SGleb Natapov 
47f5132b01SGleb Natapov static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
48f5132b01SGleb Natapov {
49f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
50f5132b01SGleb Natapov 
51f5132b01SGleb Natapov 	return pmu->counter_bitmask[pmc->type];
52f5132b01SGleb Natapov }
53f5132b01SGleb Natapov 
54f5132b01SGleb Natapov static inline bool pmc_enabled(struct kvm_pmc *pmc)
55f5132b01SGleb Natapov {
56f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
57f5132b01SGleb Natapov 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
58f5132b01SGleb Natapov }
59f5132b01SGleb Natapov 
60f5132b01SGleb Natapov static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
61f5132b01SGleb Natapov 					 u32 base)
62f5132b01SGleb Natapov {
63f5132b01SGleb Natapov 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
64f5132b01SGleb Natapov 		return &pmu->gp_counters[msr - base];
65f5132b01SGleb Natapov 	return NULL;
66f5132b01SGleb Natapov }
67f5132b01SGleb Natapov 
68f5132b01SGleb Natapov static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
69f5132b01SGleb Natapov {
70f5132b01SGleb Natapov 	int base = MSR_CORE_PERF_FIXED_CTR0;
71f5132b01SGleb Natapov 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
72f5132b01SGleb Natapov 		return &pmu->fixed_counters[msr - base];
73f5132b01SGleb Natapov 	return NULL;
74f5132b01SGleb Natapov }
75f5132b01SGleb Natapov 
76f5132b01SGleb Natapov static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
77f5132b01SGleb Natapov {
78f5132b01SGleb Natapov 	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
79f5132b01SGleb Natapov }
80f5132b01SGleb Natapov 
81f5132b01SGleb Natapov static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
82f5132b01SGleb Natapov {
8315c7ad51SRobert Richter 	if (idx < INTEL_PMC_IDX_FIXED)
84f5132b01SGleb Natapov 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
85f5132b01SGleb Natapov 	else
8615c7ad51SRobert Richter 		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
87f5132b01SGleb Natapov }
88f5132b01SGleb Natapov 
89f5132b01SGleb Natapov void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
90f5132b01SGleb Natapov {
91f5132b01SGleb Natapov 	if (vcpu->arch.apic)
92f5132b01SGleb Natapov 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
93f5132b01SGleb Natapov }
94f5132b01SGleb Natapov 
95f5132b01SGleb Natapov static void trigger_pmi(struct irq_work *irq_work)
96f5132b01SGleb Natapov {
97f5132b01SGleb Natapov 	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
98f5132b01SGleb Natapov 			irq_work);
99f5132b01SGleb Natapov 	struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
100f5132b01SGleb Natapov 			arch.pmu);
101f5132b01SGleb Natapov 
102f5132b01SGleb Natapov 	kvm_deliver_pmi(vcpu);
103f5132b01SGleb Natapov }
104f5132b01SGleb Natapov 
105f5132b01SGleb Natapov static void kvm_perf_overflow(struct perf_event *perf_event,
106f5132b01SGleb Natapov 			      struct perf_sample_data *data,
107f5132b01SGleb Natapov 			      struct pt_regs *regs)
108f5132b01SGleb Natapov {
109f5132b01SGleb Natapov 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
110f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
111f5132b01SGleb Natapov 	__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
112f5132b01SGleb Natapov }
113f5132b01SGleb Natapov 
114f5132b01SGleb Natapov static void kvm_perf_overflow_intr(struct perf_event *perf_event,
115f5132b01SGleb Natapov 		struct perf_sample_data *data, struct pt_regs *regs)
116f5132b01SGleb Natapov {
117f5132b01SGleb Natapov 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
118f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
119f5132b01SGleb Natapov 	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
120f5132b01SGleb Natapov 		kvm_perf_overflow(perf_event, data, regs);
121f5132b01SGleb Natapov 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
122f5132b01SGleb Natapov 		/*
123f5132b01SGleb Natapov 		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
124f5132b01SGleb Natapov 		 * can be ejected on a guest mode re-entry. Otherwise we can't
125f5132b01SGleb Natapov 		 * be sure that vcpu wasn't executing hlt instruction at the
126f5132b01SGleb Natapov 		 * time of vmexit and is not going to re-enter guest mode until,
127f5132b01SGleb Natapov 		 * woken up. So we should wake it, but this is impossible from
128f5132b01SGleb Natapov 		 * NMI context. Do it from irq work instead.
129f5132b01SGleb Natapov 		 */
130f5132b01SGleb Natapov 		if (!kvm_is_in_guest())
131f5132b01SGleb Natapov 			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
132f5132b01SGleb Natapov 		else
133f5132b01SGleb Natapov 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
134f5132b01SGleb Natapov 	}
135f5132b01SGleb Natapov }
136f5132b01SGleb Natapov 
137f5132b01SGleb Natapov static u64 read_pmc(struct kvm_pmc *pmc)
138f5132b01SGleb Natapov {
139f5132b01SGleb Natapov 	u64 counter, enabled, running;
140f5132b01SGleb Natapov 
141f5132b01SGleb Natapov 	counter = pmc->counter;
142f5132b01SGleb Natapov 
143f5132b01SGleb Natapov 	if (pmc->perf_event)
144f5132b01SGleb Natapov 		counter += perf_event_read_value(pmc->perf_event,
145f5132b01SGleb Natapov 						 &enabled, &running);
146f5132b01SGleb Natapov 
147f5132b01SGleb Natapov 	/* FIXME: Scaling needed? */
148f5132b01SGleb Natapov 
149f5132b01SGleb Natapov 	return counter & pmc_bitmask(pmc);
150f5132b01SGleb Natapov }
151f5132b01SGleb Natapov 
152f5132b01SGleb Natapov static void stop_counter(struct kvm_pmc *pmc)
153f5132b01SGleb Natapov {
154f5132b01SGleb Natapov 	if (pmc->perf_event) {
155f5132b01SGleb Natapov 		pmc->counter = read_pmc(pmc);
156f5132b01SGleb Natapov 		perf_event_release_kernel(pmc->perf_event);
157f5132b01SGleb Natapov 		pmc->perf_event = NULL;
158f5132b01SGleb Natapov 	}
159f5132b01SGleb Natapov }
160f5132b01SGleb Natapov 
161f5132b01SGleb Natapov static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
162f5132b01SGleb Natapov 		unsigned config, bool exclude_user, bool exclude_kernel,
163103af0a9SAndi Kleen 		bool intr, bool in_tx, bool in_tx_cp)
164f5132b01SGleb Natapov {
165f5132b01SGleb Natapov 	struct perf_event *event;
166f5132b01SGleb Natapov 	struct perf_event_attr attr = {
167f5132b01SGleb Natapov 		.type = type,
168f5132b01SGleb Natapov 		.size = sizeof(attr),
169f5132b01SGleb Natapov 		.pinned = true,
170f5132b01SGleb Natapov 		.exclude_idle = true,
171f5132b01SGleb Natapov 		.exclude_host = 1,
172f5132b01SGleb Natapov 		.exclude_user = exclude_user,
173f5132b01SGleb Natapov 		.exclude_kernel = exclude_kernel,
174f5132b01SGleb Natapov 		.config = config,
175f5132b01SGleb Natapov 	};
176103af0a9SAndi Kleen 	if (in_tx)
177103af0a9SAndi Kleen 		attr.config |= HSW_IN_TX;
178103af0a9SAndi Kleen 	if (in_tx_cp)
179103af0a9SAndi Kleen 		attr.config |= HSW_IN_TX_CHECKPOINTED;
180f5132b01SGleb Natapov 
181f5132b01SGleb Natapov 	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
182f5132b01SGleb Natapov 
183f5132b01SGleb Natapov 	event = perf_event_create_kernel_counter(&attr, -1, current,
184f5132b01SGleb Natapov 						 intr ? kvm_perf_overflow_intr :
185f5132b01SGleb Natapov 						 kvm_perf_overflow, pmc);
186f5132b01SGleb Natapov 	if (IS_ERR(event)) {
187f5132b01SGleb Natapov 		printk_once("kvm: pmu event creation failed %ld\n",
188f5132b01SGleb Natapov 				PTR_ERR(event));
189f5132b01SGleb Natapov 		return;
190f5132b01SGleb Natapov 	}
191f5132b01SGleb Natapov 
192f5132b01SGleb Natapov 	pmc->perf_event = event;
193f5132b01SGleb Natapov 	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
194f5132b01SGleb Natapov }
195f5132b01SGleb Natapov 
196f5132b01SGleb Natapov static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
197f5132b01SGleb Natapov 		u8 unit_mask)
198f5132b01SGleb Natapov {
199f5132b01SGleb Natapov 	int i;
200f5132b01SGleb Natapov 
201f5132b01SGleb Natapov 	for (i = 0; i < ARRAY_SIZE(arch_events); i++)
202f5132b01SGleb Natapov 		if (arch_events[i].eventsel == event_select
203f5132b01SGleb Natapov 				&& arch_events[i].unit_mask == unit_mask
204f5132b01SGleb Natapov 				&& (pmu->available_event_types & (1 << i)))
205f5132b01SGleb Natapov 			break;
206f5132b01SGleb Natapov 
207f5132b01SGleb Natapov 	if (i == ARRAY_SIZE(arch_events))
208f5132b01SGleb Natapov 		return PERF_COUNT_HW_MAX;
209f5132b01SGleb Natapov 
210f5132b01SGleb Natapov 	return arch_events[i].event_type;
211f5132b01SGleb Natapov }
212f5132b01SGleb Natapov 
213f5132b01SGleb Natapov static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
214f5132b01SGleb Natapov {
215f5132b01SGleb Natapov 	unsigned config, type = PERF_TYPE_RAW;
216f5132b01SGleb Natapov 	u8 event_select, unit_mask;
217f5132b01SGleb Natapov 
218a7b9d2ccSGleb Natapov 	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
219a7b9d2ccSGleb Natapov 		printk_once("kvm pmu: pin control bit is ignored\n");
220a7b9d2ccSGleb Natapov 
221f5132b01SGleb Natapov 	pmc->eventsel = eventsel;
222f5132b01SGleb Natapov 
223f5132b01SGleb Natapov 	stop_counter(pmc);
224f5132b01SGleb Natapov 
225f5132b01SGleb Natapov 	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
226f5132b01SGleb Natapov 		return;
227f5132b01SGleb Natapov 
228f5132b01SGleb Natapov 	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
229f5132b01SGleb Natapov 	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
230f5132b01SGleb Natapov 
231fac33683SGleb Natapov 	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
232f5132b01SGleb Natapov 				ARCH_PERFMON_EVENTSEL_INV |
233103af0a9SAndi Kleen 				ARCH_PERFMON_EVENTSEL_CMASK |
234103af0a9SAndi Kleen 				HSW_IN_TX |
235103af0a9SAndi Kleen 				HSW_IN_TX_CHECKPOINTED))) {
236f5132b01SGleb Natapov 		config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
237f5132b01SGleb Natapov 				unit_mask);
238f5132b01SGleb Natapov 		if (config != PERF_COUNT_HW_MAX)
239f5132b01SGleb Natapov 			type = PERF_TYPE_HARDWARE;
240f5132b01SGleb Natapov 	}
241f5132b01SGleb Natapov 
242f5132b01SGleb Natapov 	if (type == PERF_TYPE_RAW)
243f5132b01SGleb Natapov 		config = eventsel & X86_RAW_EVENT_MASK;
244f5132b01SGleb Natapov 
245f5132b01SGleb Natapov 	reprogram_counter(pmc, type, config,
246f5132b01SGleb Natapov 			!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
247f5132b01SGleb Natapov 			!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
248103af0a9SAndi Kleen 			eventsel & ARCH_PERFMON_EVENTSEL_INT,
249103af0a9SAndi Kleen 			(eventsel & HSW_IN_TX),
250103af0a9SAndi Kleen 			(eventsel & HSW_IN_TX_CHECKPOINTED));
251f5132b01SGleb Natapov }
252f5132b01SGleb Natapov 
253f5132b01SGleb Natapov static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
254f5132b01SGleb Natapov {
255f5132b01SGleb Natapov 	unsigned en = en_pmi & 0x3;
256f5132b01SGleb Natapov 	bool pmi = en_pmi & 0x8;
257f5132b01SGleb Natapov 
258f5132b01SGleb Natapov 	stop_counter(pmc);
259f5132b01SGleb Natapov 
260f5132b01SGleb Natapov 	if (!en || !pmc_enabled(pmc))
261f5132b01SGleb Natapov 		return;
262f5132b01SGleb Natapov 
263f5132b01SGleb Natapov 	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
264f5132b01SGleb Natapov 			arch_events[fixed_pmc_events[idx]].event_type,
265f5132b01SGleb Natapov 			!(en & 0x2), /* exclude user */
266f5132b01SGleb Natapov 			!(en & 0x1), /* exclude kernel */
267103af0a9SAndi Kleen 			pmi, false, false);
268f5132b01SGleb Natapov }
269f5132b01SGleb Natapov 
270f5132b01SGleb Natapov static inline u8 fixed_en_pmi(u64 ctrl, int idx)
271f5132b01SGleb Natapov {
272f5132b01SGleb Natapov 	return (ctrl >> (idx * 4)) & 0xf;
273f5132b01SGleb Natapov }
274f5132b01SGleb Natapov 
275f5132b01SGleb Natapov static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
276f5132b01SGleb Natapov {
277f5132b01SGleb Natapov 	int i;
278f5132b01SGleb Natapov 
279f5132b01SGleb Natapov 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
280f5132b01SGleb Natapov 		u8 en_pmi = fixed_en_pmi(data, i);
281f5132b01SGleb Natapov 		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
282f5132b01SGleb Natapov 
283f5132b01SGleb Natapov 		if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
284f5132b01SGleb Natapov 			continue;
285f5132b01SGleb Natapov 
286f5132b01SGleb Natapov 		reprogram_fixed_counter(pmc, en_pmi, i);
287f5132b01SGleb Natapov 	}
288f5132b01SGleb Natapov 
289f5132b01SGleb Natapov 	pmu->fixed_ctr_ctrl = data;
290f5132b01SGleb Natapov }
291f5132b01SGleb Natapov 
292f5132b01SGleb Natapov static void reprogram_idx(struct kvm_pmu *pmu, int idx)
293f5132b01SGleb Natapov {
294f5132b01SGleb Natapov 	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
295f5132b01SGleb Natapov 
296f5132b01SGleb Natapov 	if (!pmc)
297f5132b01SGleb Natapov 		return;
298f5132b01SGleb Natapov 
299f5132b01SGleb Natapov 	if (pmc_is_gp(pmc))
300f5132b01SGleb Natapov 		reprogram_gp_counter(pmc, pmc->eventsel);
301f5132b01SGleb Natapov 	else {
30215c7ad51SRobert Richter 		int fidx = idx - INTEL_PMC_IDX_FIXED;
303f5132b01SGleb Natapov 		reprogram_fixed_counter(pmc,
304f5132b01SGleb Natapov 				fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
305f5132b01SGleb Natapov 	}
306f5132b01SGleb Natapov }
307f5132b01SGleb Natapov 
308f5132b01SGleb Natapov static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
309f5132b01SGleb Natapov {
310f5132b01SGleb Natapov 	int bit;
311f5132b01SGleb Natapov 	u64 diff = pmu->global_ctrl ^ data;
312f5132b01SGleb Natapov 
313f5132b01SGleb Natapov 	pmu->global_ctrl = data;
314f5132b01SGleb Natapov 
315f5132b01SGleb Natapov 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
316f5132b01SGleb Natapov 		reprogram_idx(pmu, bit);
317f5132b01SGleb Natapov }
318f5132b01SGleb Natapov 
319f5132b01SGleb Natapov bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
320f5132b01SGleb Natapov {
321f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
322f5132b01SGleb Natapov 	int ret;
323f5132b01SGleb Natapov 
324f5132b01SGleb Natapov 	switch (msr) {
325f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
326f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
327f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
328f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
329f5132b01SGleb Natapov 		ret = pmu->version > 1;
330f5132b01SGleb Natapov 		break;
331f5132b01SGleb Natapov 	default:
332f5132b01SGleb Natapov 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
333f5132b01SGleb Natapov 			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
334f5132b01SGleb Natapov 			|| get_fixed_pmc(pmu, msr);
335f5132b01SGleb Natapov 		break;
336f5132b01SGleb Natapov 	}
337f5132b01SGleb Natapov 	return ret;
338f5132b01SGleb Natapov }
339f5132b01SGleb Natapov 
340f5132b01SGleb Natapov int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
341f5132b01SGleb Natapov {
342f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
343f5132b01SGleb Natapov 	struct kvm_pmc *pmc;
344f5132b01SGleb Natapov 
345f5132b01SGleb Natapov 	switch (index) {
346f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
347f5132b01SGleb Natapov 		*data = pmu->fixed_ctr_ctrl;
348f5132b01SGleb Natapov 		return 0;
349f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
350f5132b01SGleb Natapov 		*data = pmu->global_status;
351f5132b01SGleb Natapov 		return 0;
352f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
353f5132b01SGleb Natapov 		*data = pmu->global_ctrl;
354f5132b01SGleb Natapov 		return 0;
355f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
356f5132b01SGleb Natapov 		*data = pmu->global_ovf_ctrl;
357f5132b01SGleb Natapov 		return 0;
358f5132b01SGleb Natapov 	default:
359f5132b01SGleb Natapov 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
360f5132b01SGleb Natapov 				(pmc = get_fixed_pmc(pmu, index))) {
361f5132b01SGleb Natapov 			*data = read_pmc(pmc);
362f5132b01SGleb Natapov 			return 0;
363f5132b01SGleb Natapov 		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
364f5132b01SGleb Natapov 			*data = pmc->eventsel;
365f5132b01SGleb Natapov 			return 0;
366f5132b01SGleb Natapov 		}
367f5132b01SGleb Natapov 	}
368f5132b01SGleb Natapov 	return 1;
369f5132b01SGleb Natapov }
370f5132b01SGleb Natapov 
371afd80d85SPaolo Bonzini int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
372f5132b01SGleb Natapov {
373f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
374f5132b01SGleb Natapov 	struct kvm_pmc *pmc;
375afd80d85SPaolo Bonzini 	u32 index = msr_info->index;
376afd80d85SPaolo Bonzini 	u64 data = msr_info->data;
377f5132b01SGleb Natapov 
378f5132b01SGleb Natapov 	switch (index) {
379f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
380f5132b01SGleb Natapov 		if (pmu->fixed_ctr_ctrl == data)
381f5132b01SGleb Natapov 			return 0;
382fea52953SSasikantha babu 		if (!(data & 0xfffffffffffff444ull)) {
383f5132b01SGleb Natapov 			reprogram_fixed_counters(pmu, data);
384f5132b01SGleb Natapov 			return 0;
385f5132b01SGleb Natapov 		}
386f5132b01SGleb Natapov 		break;
387f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
388afd80d85SPaolo Bonzini 		if (msr_info->host_initiated) {
389afd80d85SPaolo Bonzini 			pmu->global_status = data;
390afd80d85SPaolo Bonzini 			return 0;
391afd80d85SPaolo Bonzini 		}
392f5132b01SGleb Natapov 		break; /* RO MSR */
393f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
394f5132b01SGleb Natapov 		if (pmu->global_ctrl == data)
395f5132b01SGleb Natapov 			return 0;
396f5132b01SGleb Natapov 		if (!(data & pmu->global_ctrl_mask)) {
397f5132b01SGleb Natapov 			global_ctrl_changed(pmu, data);
398f5132b01SGleb Natapov 			return 0;
399f5132b01SGleb Natapov 		}
400f5132b01SGleb Natapov 		break;
401f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
402f5132b01SGleb Natapov 		if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
403afd80d85SPaolo Bonzini 			if (!msr_info->host_initiated)
404f5132b01SGleb Natapov 				pmu->global_status &= ~data;
405f5132b01SGleb Natapov 			pmu->global_ovf_ctrl = data;
406f5132b01SGleb Natapov 			return 0;
407f5132b01SGleb Natapov 		}
408f5132b01SGleb Natapov 		break;
409f5132b01SGleb Natapov 	default:
410f5132b01SGleb Natapov 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
411f5132b01SGleb Natapov 				(pmc = get_fixed_pmc(pmu, index))) {
412afd80d85SPaolo Bonzini 			if (!msr_info->host_initiated)
413f5132b01SGleb Natapov 				data = (s64)(s32)data;
414f5132b01SGleb Natapov 			pmc->counter += data - read_pmc(pmc);
415f5132b01SGleb Natapov 			return 0;
416f5132b01SGleb Natapov 		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
417f5132b01SGleb Natapov 			if (data == pmc->eventsel)
418f5132b01SGleb Natapov 				return 0;
419103af0a9SAndi Kleen 			if (!(data & pmu->reserved_bits)) {
420f5132b01SGleb Natapov 				reprogram_gp_counter(pmc, data);
421f5132b01SGleb Natapov 				return 0;
422f5132b01SGleb Natapov 			}
423f5132b01SGleb Natapov 		}
424f5132b01SGleb Natapov 	}
425f5132b01SGleb Natapov 	return 1;
426f5132b01SGleb Natapov }
427f5132b01SGleb Natapov 
428f5132b01SGleb Natapov int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
429f5132b01SGleb Natapov {
430f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
431f5132b01SGleb Natapov 	bool fast_mode = pmc & (1u << 31);
432f5132b01SGleb Natapov 	bool fixed = pmc & (1u << 30);
433f5132b01SGleb Natapov 	struct kvm_pmc *counters;
434f5132b01SGleb Natapov 	u64 ctr;
435f5132b01SGleb Natapov 
436270c6c79SGleb Natapov 	pmc &= ~(3u << 30);
437f5132b01SGleb Natapov 	if (!fixed && pmc >= pmu->nr_arch_gp_counters)
438f5132b01SGleb Natapov 		return 1;
439f5132b01SGleb Natapov 	if (fixed && pmc >= pmu->nr_arch_fixed_counters)
440f5132b01SGleb Natapov 		return 1;
441f5132b01SGleb Natapov 	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
442f5132b01SGleb Natapov 	ctr = read_pmc(&counters[pmc]);
443f5132b01SGleb Natapov 	if (fast_mode)
444f5132b01SGleb Natapov 		ctr = (u32)ctr;
445f5132b01SGleb Natapov 	*data = ctr;
446f5132b01SGleb Natapov 
447f5132b01SGleb Natapov 	return 0;
448f5132b01SGleb Natapov }
449f5132b01SGleb Natapov 
450f5132b01SGleb Natapov void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
451f5132b01SGleb Natapov {
452f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
453f5132b01SGleb Natapov 	struct kvm_cpuid_entry2 *entry;
454f5132b01SGleb Natapov 	unsigned bitmap_len;
455f5132b01SGleb Natapov 
456f5132b01SGleb Natapov 	pmu->nr_arch_gp_counters = 0;
457f5132b01SGleb Natapov 	pmu->nr_arch_fixed_counters = 0;
458f5132b01SGleb Natapov 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
459f5132b01SGleb Natapov 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
460f5132b01SGleb Natapov 	pmu->version = 0;
461103af0a9SAndi Kleen 	pmu->reserved_bits = 0xffffffff00200000ull;
462f5132b01SGleb Natapov 
463f5132b01SGleb Natapov 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
464f5132b01SGleb Natapov 	if (!entry)
465f5132b01SGleb Natapov 		return;
466f5132b01SGleb Natapov 
467f5132b01SGleb Natapov 	pmu->version = entry->eax & 0xff;
468f5132b01SGleb Natapov 	if (!pmu->version)
469f5132b01SGleb Natapov 		return;
470f5132b01SGleb Natapov 
471f5132b01SGleb Natapov 	pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
47215c7ad51SRobert Richter 			INTEL_PMC_MAX_GENERIC);
473f5132b01SGleb Natapov 	pmu->counter_bitmask[KVM_PMC_GP] =
474f5132b01SGleb Natapov 		((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
475f5132b01SGleb Natapov 	bitmap_len = (entry->eax >> 24) & 0xff;
476f5132b01SGleb Natapov 	pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
477f5132b01SGleb Natapov 
478f5132b01SGleb Natapov 	if (pmu->version == 1) {
479f19a0c2cSGleb Natapov 		pmu->nr_arch_fixed_counters = 0;
480f19a0c2cSGleb Natapov 	} else {
481f5132b01SGleb Natapov 		pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
48215c7ad51SRobert Richter 				INTEL_PMC_MAX_FIXED);
483f5132b01SGleb Natapov 		pmu->counter_bitmask[KVM_PMC_FIXED] =
484f5132b01SGleb Natapov 			((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
485f19a0c2cSGleb Natapov 	}
486f19a0c2cSGleb Natapov 
487f19a0c2cSGleb Natapov 	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
48815c7ad51SRobert Richter 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
489f19a0c2cSGleb Natapov 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
490103af0a9SAndi Kleen 
491103af0a9SAndi Kleen 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
492103af0a9SAndi Kleen 	if (entry &&
493103af0a9SAndi Kleen 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
494103af0a9SAndi Kleen 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
495103af0a9SAndi Kleen 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
496f5132b01SGleb Natapov }
497f5132b01SGleb Natapov 
498f5132b01SGleb Natapov void kvm_pmu_init(struct kvm_vcpu *vcpu)
499f5132b01SGleb Natapov {
500f5132b01SGleb Natapov 	int i;
501f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
502f5132b01SGleb Natapov 
503f5132b01SGleb Natapov 	memset(pmu, 0, sizeof(*pmu));
50415c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
505f5132b01SGleb Natapov 		pmu->gp_counters[i].type = KVM_PMC_GP;
506f5132b01SGleb Natapov 		pmu->gp_counters[i].vcpu = vcpu;
507f5132b01SGleb Natapov 		pmu->gp_counters[i].idx = i;
508f5132b01SGleb Natapov 	}
50915c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
510f5132b01SGleb Natapov 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
511f5132b01SGleb Natapov 		pmu->fixed_counters[i].vcpu = vcpu;
51215c7ad51SRobert Richter 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
513f5132b01SGleb Natapov 	}
514f5132b01SGleb Natapov 	init_irq_work(&pmu->irq_work, trigger_pmi);
515f5132b01SGleb Natapov 	kvm_pmu_cpuid_update(vcpu);
516f5132b01SGleb Natapov }
517f5132b01SGleb Natapov 
518f5132b01SGleb Natapov void kvm_pmu_reset(struct kvm_vcpu *vcpu)
519f5132b01SGleb Natapov {
520f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
521f5132b01SGleb Natapov 	int i;
522f5132b01SGleb Natapov 
523f5132b01SGleb Natapov 	irq_work_sync(&pmu->irq_work);
52415c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
525f5132b01SGleb Natapov 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
526f5132b01SGleb Natapov 		stop_counter(pmc);
527f5132b01SGleb Natapov 		pmc->counter = pmc->eventsel = 0;
528f5132b01SGleb Natapov 	}
529f5132b01SGleb Natapov 
53015c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
531f5132b01SGleb Natapov 		stop_counter(&pmu->fixed_counters[i]);
532f5132b01SGleb Natapov 
533f5132b01SGleb Natapov 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
534f5132b01SGleb Natapov 		pmu->global_ovf_ctrl = 0;
535f5132b01SGleb Natapov }
536f5132b01SGleb Natapov 
537f5132b01SGleb Natapov void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
538f5132b01SGleb Natapov {
539f5132b01SGleb Natapov 	kvm_pmu_reset(vcpu);
540f5132b01SGleb Natapov }
541f5132b01SGleb Natapov 
542f5132b01SGleb Natapov void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
543f5132b01SGleb Natapov {
544f5132b01SGleb Natapov 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
545f5132b01SGleb Natapov 	u64 bitmask;
546f5132b01SGleb Natapov 	int bit;
547f5132b01SGleb Natapov 
548f5132b01SGleb Natapov 	bitmask = pmu->reprogram_pmi;
549f5132b01SGleb Natapov 
550f5132b01SGleb Natapov 	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
551f5132b01SGleb Natapov 		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
552f5132b01SGleb Natapov 
553f5132b01SGleb Natapov 		if (unlikely(!pmc || !pmc->perf_event)) {
554f5132b01SGleb Natapov 			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
555f5132b01SGleb Natapov 			continue;
556f5132b01SGleb Natapov 		}
557f5132b01SGleb Natapov 
558f5132b01SGleb Natapov 		reprogram_idx(pmu, bit);
559f5132b01SGleb Natapov 	}
560f5132b01SGleb Natapov }
561