xref: /openbmc/linux/arch/x86/kvm/pmu.c (revision e5af058a)
1f5132b01SGleb Natapov /*
2c7a7062fSGuo Chao  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
3f5132b01SGleb Natapov  *
4f5132b01SGleb Natapov  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5f5132b01SGleb Natapov  *
6f5132b01SGleb Natapov  * Authors:
7f5132b01SGleb Natapov  *   Avi Kivity   <avi@redhat.com>
8f5132b01SGleb Natapov  *   Gleb Natapov <gleb@redhat.com>
9f5132b01SGleb Natapov  *
10f5132b01SGleb Natapov  * This work is licensed under the terms of the GNU GPL, version 2.  See
11f5132b01SGleb Natapov  * the COPYING file in the top-level directory.
12f5132b01SGleb Natapov  *
13f5132b01SGleb Natapov  */
14f5132b01SGleb Natapov 
15f5132b01SGleb Natapov #include <linux/types.h>
16f5132b01SGleb Natapov #include <linux/kvm_host.h>
17f5132b01SGleb Natapov #include <linux/perf_event.h>
18d27aa7f1SNadav Amit #include <asm/perf_event.h>
19f5132b01SGleb Natapov #include "x86.h"
20f5132b01SGleb Natapov #include "cpuid.h"
21f5132b01SGleb Natapov #include "lapic.h"
22474a5bb9SWei Huang #include "pmu.h"
23f5132b01SGleb Natapov 
24474a5bb9SWei Huang static struct kvm_event_hw_type_mapping arch_events[] = {
25f5132b01SGleb Natapov 	/* Index must match CPUID 0x0A.EBX bit vector */
26f5132b01SGleb Natapov 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
27f5132b01SGleb Natapov 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
28f5132b01SGleb Natapov 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
29f5132b01SGleb Natapov 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
30f5132b01SGleb Natapov 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
31f5132b01SGleb Natapov 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
32f5132b01SGleb Natapov 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
3362079d8aSGleb Natapov 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
34f5132b01SGleb Natapov };
35f5132b01SGleb Natapov 
36f5132b01SGleb Natapov /* mapping between fixed pmc index and arch_events array */
3752eb5a6dSXiubo Li static int fixed_pmc_events[] = {1, 0, 7};
38f5132b01SGleb Natapov 
39f5132b01SGleb Natapov static bool pmc_is_gp(struct kvm_pmc *pmc)
40f5132b01SGleb Natapov {
41f5132b01SGleb Natapov 	return pmc->type == KVM_PMC_GP;
42f5132b01SGleb Natapov }
43f5132b01SGleb Natapov 
44f5132b01SGleb Natapov static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
45f5132b01SGleb Natapov {
46212dba12SWei Huang 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
47f5132b01SGleb Natapov 
48f5132b01SGleb Natapov 	return pmu->counter_bitmask[pmc->type];
49f5132b01SGleb Natapov }
50f5132b01SGleb Natapov 
51c6702c9dSWei Huang static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
52f5132b01SGleb Natapov {
53212dba12SWei Huang 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
54f5132b01SGleb Natapov 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
55f5132b01SGleb Natapov }
56f5132b01SGleb Natapov 
57f5132b01SGleb Natapov static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
58f5132b01SGleb Natapov 					 u32 base)
59f5132b01SGleb Natapov {
60f5132b01SGleb Natapov 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
61f5132b01SGleb Natapov 		return &pmu->gp_counters[msr - base];
62f5132b01SGleb Natapov 	return NULL;
63f5132b01SGleb Natapov }
64f5132b01SGleb Natapov 
65f5132b01SGleb Natapov static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
66f5132b01SGleb Natapov {
67f5132b01SGleb Natapov 	int base = MSR_CORE_PERF_FIXED_CTR0;
68f5132b01SGleb Natapov 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
69f5132b01SGleb Natapov 		return &pmu->fixed_counters[msr - base];
70f5132b01SGleb Natapov 	return NULL;
71f5132b01SGleb Natapov }
72f5132b01SGleb Natapov 
73f5132b01SGleb Natapov static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
74f5132b01SGleb Natapov {
75f5132b01SGleb Natapov 	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
76f5132b01SGleb Natapov }
77f5132b01SGleb Natapov 
78f5132b01SGleb Natapov static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
79f5132b01SGleb Natapov {
8015c7ad51SRobert Richter 	if (idx < INTEL_PMC_IDX_FIXED)
81f5132b01SGleb Natapov 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
82f5132b01SGleb Natapov 	else
8315c7ad51SRobert Richter 		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
84f5132b01SGleb Natapov }
85f5132b01SGleb Natapov 
86c6702c9dSWei Huang static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
87f5132b01SGleb Natapov {
88212dba12SWei Huang 	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
89212dba12SWei Huang 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
90f5132b01SGleb Natapov 
91c6702c9dSWei Huang 	kvm_pmu_deliver_pmi(vcpu);
92f5132b01SGleb Natapov }
93f5132b01SGleb Natapov 
94f5132b01SGleb Natapov static void kvm_perf_overflow(struct perf_event *perf_event,
95f5132b01SGleb Natapov 			      struct perf_sample_data *data,
96f5132b01SGleb Natapov 			      struct pt_regs *regs)
97f5132b01SGleb Natapov {
98f5132b01SGleb Natapov 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
99212dba12SWei Huang 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100e84cfe4cSWei Huang 
101e84cfe4cSWei Huang 	if (!test_and_set_bit(pmc->idx,
102e84cfe4cSWei Huang 			      (unsigned long *)&pmu->reprogram_pmi)) {
103f5132b01SGleb Natapov 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
104671bd993SNadav Amit 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
105671bd993SNadav Amit 	}
106f5132b01SGleb Natapov }
107f5132b01SGleb Natapov 
108f5132b01SGleb Natapov static void kvm_perf_overflow_intr(struct perf_event *perf_event,
109e84cfe4cSWei Huang 				   struct perf_sample_data *data,
110e84cfe4cSWei Huang 				   struct pt_regs *regs)
111f5132b01SGleb Natapov {
112f5132b01SGleb Natapov 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
113212dba12SWei Huang 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
114e84cfe4cSWei Huang 
115e84cfe4cSWei Huang 	if (!test_and_set_bit(pmc->idx,
116e84cfe4cSWei Huang 			      (unsigned long *)&pmu->reprogram_pmi)) {
117671bd993SNadav Amit 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
118f5132b01SGleb Natapov 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
119e84cfe4cSWei Huang 
120f5132b01SGleb Natapov 		/*
121f5132b01SGleb Natapov 		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
122f5132b01SGleb Natapov 		 * can be ejected on a guest mode re-entry. Otherwise we can't
123f5132b01SGleb Natapov 		 * be sure that vcpu wasn't executing hlt instruction at the
124e84cfe4cSWei Huang 		 * time of vmexit and is not going to re-enter guest mode until
125f5132b01SGleb Natapov 		 * woken up. So we should wake it, but this is impossible from
126f5132b01SGleb Natapov 		 * NMI context. Do it from irq work instead.
127f5132b01SGleb Natapov 		 */
128f5132b01SGleb Natapov 		if (!kvm_is_in_guest())
129212dba12SWei Huang 			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
130f5132b01SGleb Natapov 		else
131f5132b01SGleb Natapov 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
132f5132b01SGleb Natapov 	}
133f5132b01SGleb Natapov }
134f5132b01SGleb Natapov 
135c6702c9dSWei Huang static u64 pmc_read_counter(struct kvm_pmc *pmc)
136f5132b01SGleb Natapov {
137f5132b01SGleb Natapov 	u64 counter, enabled, running;
138f5132b01SGleb Natapov 
139f5132b01SGleb Natapov 	counter = pmc->counter;
140f5132b01SGleb Natapov 
141f5132b01SGleb Natapov 	if (pmc->perf_event)
142f5132b01SGleb Natapov 		counter += perf_event_read_value(pmc->perf_event,
143f5132b01SGleb Natapov 						 &enabled, &running);
144f5132b01SGleb Natapov 
145f5132b01SGleb Natapov 	/* FIXME: Scaling needed? */
146f5132b01SGleb Natapov 
147f5132b01SGleb Natapov 	return counter & pmc_bitmask(pmc);
148f5132b01SGleb Natapov }
149f5132b01SGleb Natapov 
150c6702c9dSWei Huang static void pmc_stop_counter(struct kvm_pmc *pmc)
151f5132b01SGleb Natapov {
152f5132b01SGleb Natapov 	if (pmc->perf_event) {
153c6702c9dSWei Huang 		pmc->counter = pmc_read_counter(pmc);
154f5132b01SGleb Natapov 		perf_event_release_kernel(pmc->perf_event);
155f5132b01SGleb Natapov 		pmc->perf_event = NULL;
156f5132b01SGleb Natapov 	}
157f5132b01SGleb Natapov }
158f5132b01SGleb Natapov 
159c6702c9dSWei Huang static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
160e84cfe4cSWei Huang 				  unsigned config, bool exclude_user,
161e84cfe4cSWei Huang 				  bool exclude_kernel, bool intr,
162e84cfe4cSWei Huang 				  bool in_tx, bool in_tx_cp)
163f5132b01SGleb Natapov {
164f5132b01SGleb Natapov 	struct perf_event *event;
165f5132b01SGleb Natapov 	struct perf_event_attr attr = {
166f5132b01SGleb Natapov 		.type = type,
167f5132b01SGleb Natapov 		.size = sizeof(attr),
168f5132b01SGleb Natapov 		.pinned = true,
169f5132b01SGleb Natapov 		.exclude_idle = true,
170f5132b01SGleb Natapov 		.exclude_host = 1,
171f5132b01SGleb Natapov 		.exclude_user = exclude_user,
172f5132b01SGleb Natapov 		.exclude_kernel = exclude_kernel,
173f5132b01SGleb Natapov 		.config = config,
174f5132b01SGleb Natapov 	};
175e84cfe4cSWei Huang 
176103af0a9SAndi Kleen 	if (in_tx)
177103af0a9SAndi Kleen 		attr.config |= HSW_IN_TX;
178103af0a9SAndi Kleen 	if (in_tx_cp)
179103af0a9SAndi Kleen 		attr.config |= HSW_IN_TX_CHECKPOINTED;
180f5132b01SGleb Natapov 
181f5132b01SGleb Natapov 	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
182f5132b01SGleb Natapov 
183f5132b01SGleb Natapov 	event = perf_event_create_kernel_counter(&attr, -1, current,
184f5132b01SGleb Natapov 						 intr ? kvm_perf_overflow_intr :
185f5132b01SGleb Natapov 						 kvm_perf_overflow, pmc);
186f5132b01SGleb Natapov 	if (IS_ERR(event)) {
187e84cfe4cSWei Huang 		printk_once("kvm_pmu: event creation failed %ld\n",
188f5132b01SGleb Natapov 			    PTR_ERR(event));
189f5132b01SGleb Natapov 		return;
190f5132b01SGleb Natapov 	}
191f5132b01SGleb Natapov 
192f5132b01SGleb Natapov 	pmc->perf_event = event;
193212dba12SWei Huang 	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
194f5132b01SGleb Natapov }
195f5132b01SGleb Natapov 
196f5132b01SGleb Natapov static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
197f5132b01SGleb Natapov 		u8 unit_mask)
198f5132b01SGleb Natapov {
199f5132b01SGleb Natapov 	int i;
200f5132b01SGleb Natapov 
201f5132b01SGleb Natapov 	for (i = 0; i < ARRAY_SIZE(arch_events); i++)
202f5132b01SGleb Natapov 		if (arch_events[i].eventsel == event_select
203f5132b01SGleb Natapov 				&& arch_events[i].unit_mask == unit_mask
204f5132b01SGleb Natapov 				&& (pmu->available_event_types & (1 << i)))
205f5132b01SGleb Natapov 			break;
206f5132b01SGleb Natapov 
207f5132b01SGleb Natapov 	if (i == ARRAY_SIZE(arch_events))
208f5132b01SGleb Natapov 		return PERF_COUNT_HW_MAX;
209f5132b01SGleb Natapov 
210f5132b01SGleb Natapov 	return arch_events[i].event_type;
211f5132b01SGleb Natapov }
212f5132b01SGleb Natapov 
213f5132b01SGleb Natapov static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
214f5132b01SGleb Natapov {
215f5132b01SGleb Natapov 	unsigned config, type = PERF_TYPE_RAW;
216f5132b01SGleb Natapov 	u8 event_select, unit_mask;
217f5132b01SGleb Natapov 
218a7b9d2ccSGleb Natapov 	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
219a7b9d2ccSGleb Natapov 		printk_once("kvm pmu: pin control bit is ignored\n");
220a7b9d2ccSGleb Natapov 
221f5132b01SGleb Natapov 	pmc->eventsel = eventsel;
222f5132b01SGleb Natapov 
223c6702c9dSWei Huang 	pmc_stop_counter(pmc);
224f5132b01SGleb Natapov 
225c6702c9dSWei Huang 	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
226f5132b01SGleb Natapov 		return;
227f5132b01SGleb Natapov 
228f5132b01SGleb Natapov 	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
229f5132b01SGleb Natapov 	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
230f5132b01SGleb Natapov 
231fac33683SGleb Natapov 	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
232f5132b01SGleb Natapov 			  ARCH_PERFMON_EVENTSEL_INV |
233103af0a9SAndi Kleen 			  ARCH_PERFMON_EVENTSEL_CMASK |
234103af0a9SAndi Kleen 			  HSW_IN_TX |
235103af0a9SAndi Kleen 			  HSW_IN_TX_CHECKPOINTED))) {
236212dba12SWei Huang 		config = find_arch_event(pmc_to_pmu(pmc), event_select,
237f5132b01SGleb Natapov 				unit_mask);
238f5132b01SGleb Natapov 		if (config != PERF_COUNT_HW_MAX)
239f5132b01SGleb Natapov 			type = PERF_TYPE_HARDWARE;
240f5132b01SGleb Natapov 	}
241f5132b01SGleb Natapov 
242f5132b01SGleb Natapov 	if (type == PERF_TYPE_RAW)
243f5132b01SGleb Natapov 		config = eventsel & X86_RAW_EVENT_MASK;
244f5132b01SGleb Natapov 
245c6702c9dSWei Huang 	pmc_reprogram_counter(pmc, type, config,
246f5132b01SGleb Natapov 			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
247f5132b01SGleb Natapov 			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
248103af0a9SAndi Kleen 			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
249103af0a9SAndi Kleen 			      (eventsel & HSW_IN_TX),
250103af0a9SAndi Kleen 			      (eventsel & HSW_IN_TX_CHECKPOINTED));
251f5132b01SGleb Natapov }
252f5132b01SGleb Natapov 
253e84cfe4cSWei Huang static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
254f5132b01SGleb Natapov {
255e84cfe4cSWei Huang 	unsigned en_field = ctrl & 0x3;
256e84cfe4cSWei Huang 	bool pmi = ctrl & 0x8;
257f5132b01SGleb Natapov 
258c6702c9dSWei Huang 	pmc_stop_counter(pmc);
259f5132b01SGleb Natapov 
260e84cfe4cSWei Huang 	if (!en_field || !pmc_is_enabled(pmc))
261f5132b01SGleb Natapov 		return;
262f5132b01SGleb Natapov 
263c6702c9dSWei Huang 	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
264f5132b01SGleb Natapov 			      arch_events[fixed_pmc_events[idx]].event_type,
265e84cfe4cSWei Huang 			      !(en_field & 0x2), /* exclude user */
266e84cfe4cSWei Huang 			      !(en_field & 0x1), /* exclude kernel */
267103af0a9SAndi Kleen 			      pmi, false, false);
268f5132b01SGleb Natapov }
269f5132b01SGleb Natapov 
270c6702c9dSWei Huang static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
271f5132b01SGleb Natapov {
272f5132b01SGleb Natapov 	return (ctrl >> (idx * 4)) & 0xf;
273f5132b01SGleb Natapov }
274f5132b01SGleb Natapov 
275f5132b01SGleb Natapov static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
276f5132b01SGleb Natapov {
277f5132b01SGleb Natapov 	int i;
278f5132b01SGleb Natapov 
279f5132b01SGleb Natapov 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
280e84cfe4cSWei Huang 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
281e84cfe4cSWei Huang 		u8 new_ctrl = fixed_ctrl_field(data, i);
282f5132b01SGleb Natapov 		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
283f5132b01SGleb Natapov 
284e84cfe4cSWei Huang 		if (old_ctrl == new_ctrl)
285f5132b01SGleb Natapov 			continue;
286f5132b01SGleb Natapov 
287e84cfe4cSWei Huang 		reprogram_fixed_counter(pmc, new_ctrl, i);
288f5132b01SGleb Natapov 	}
289f5132b01SGleb Natapov 
290f5132b01SGleb Natapov 	pmu->fixed_ctr_ctrl = data;
291f5132b01SGleb Natapov }
292f5132b01SGleb Natapov 
293e84cfe4cSWei Huang static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
294f5132b01SGleb Natapov {
295e84cfe4cSWei Huang 	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
296f5132b01SGleb Natapov 
297f5132b01SGleb Natapov 	if (!pmc)
298f5132b01SGleb Natapov 		return;
299f5132b01SGleb Natapov 
300f5132b01SGleb Natapov 	if (pmc_is_gp(pmc))
301f5132b01SGleb Natapov 		reprogram_gp_counter(pmc, pmc->eventsel);
302f5132b01SGleb Natapov 	else {
303e84cfe4cSWei Huang 		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
304e84cfe4cSWei Huang 		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
305e84cfe4cSWei Huang 
306e84cfe4cSWei Huang 		reprogram_fixed_counter(pmc, ctrl, idx);
307f5132b01SGleb Natapov 	}
308f5132b01SGleb Natapov }
309f5132b01SGleb Natapov 
310f5132b01SGleb Natapov static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
311f5132b01SGleb Natapov {
312f5132b01SGleb Natapov 	int bit;
313f5132b01SGleb Natapov 	u64 diff = pmu->global_ctrl ^ data;
314f5132b01SGleb Natapov 
315f5132b01SGleb Natapov 	pmu->global_ctrl = data;
316f5132b01SGleb Natapov 
317f5132b01SGleb Natapov 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
318c6702c9dSWei Huang 		reprogram_counter(pmu, bit);
319f5132b01SGleb Natapov }
320f5132b01SGleb Natapov 
321e5af058aSWei Huang void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
322e5af058aSWei Huang {
323e5af058aSWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
324e5af058aSWei Huang 	u64 bitmask;
325e5af058aSWei Huang 	int bit;
326e5af058aSWei Huang 
327e5af058aSWei Huang 	bitmask = pmu->reprogram_pmi;
328e5af058aSWei Huang 
329e5af058aSWei Huang 	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
330e5af058aSWei Huang 		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
331e5af058aSWei Huang 
332e5af058aSWei Huang 		if (unlikely(!pmc || !pmc->perf_event)) {
333e5af058aSWei Huang 			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
334e5af058aSWei Huang 			continue;
335e5af058aSWei Huang 		}
336e5af058aSWei Huang 
337e5af058aSWei Huang 		reprogram_counter(pmu, bit);
338e5af058aSWei Huang 	}
339e5af058aSWei Huang }
340e5af058aSWei Huang 
341e5af058aSWei Huang /* check if idx is a valid index to access PMU */
342e5af058aSWei Huang int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
343e5af058aSWei Huang {
344e5af058aSWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
345e5af058aSWei Huang 	bool fixed = idx & (1u << 30);
346e5af058aSWei Huang 	idx &= ~(3u << 30);
347e5af058aSWei Huang 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
348e5af058aSWei Huang 		(fixed && idx >= pmu->nr_arch_fixed_counters);
349e5af058aSWei Huang }
350e5af058aSWei Huang 
351e5af058aSWei Huang int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
352e5af058aSWei Huang {
353e5af058aSWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
354e5af058aSWei Huang 	bool fast_mode = idx & (1u << 31);
355e5af058aSWei Huang 	bool fixed = idx & (1u << 30);
356e5af058aSWei Huang 	struct kvm_pmc *counters;
357e5af058aSWei Huang 	u64 ctr_val;
358e5af058aSWei Huang 
359e5af058aSWei Huang 	idx &= ~(3u << 30);
360e5af058aSWei Huang 	if (!fixed && idx >= pmu->nr_arch_gp_counters)
361e5af058aSWei Huang 		return 1;
362e5af058aSWei Huang 	if (fixed && idx >= pmu->nr_arch_fixed_counters)
363e5af058aSWei Huang 		return 1;
364e5af058aSWei Huang 	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
365e5af058aSWei Huang 
366e5af058aSWei Huang 	ctr_val = pmc_read_counter(&counters[idx]);
367e5af058aSWei Huang 	if (fast_mode)
368e5af058aSWei Huang 		ctr_val = (u32)ctr_val;
369e5af058aSWei Huang 
370e5af058aSWei Huang 	*data = ctr_val;
371e5af058aSWei Huang 	return 0;
372e5af058aSWei Huang }
373e5af058aSWei Huang 
374e5af058aSWei Huang void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
375e5af058aSWei Huang {
376e5af058aSWei Huang 	if (vcpu->arch.apic)
377e5af058aSWei Huang 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
378e5af058aSWei Huang }
379e5af058aSWei Huang 
380c6702c9dSWei Huang bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
381f5132b01SGleb Natapov {
382212dba12SWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
383f5132b01SGleb Natapov 	int ret;
384f5132b01SGleb Natapov 
385f5132b01SGleb Natapov 	switch (msr) {
386f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
387f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
388f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
389f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
390f5132b01SGleb Natapov 		ret = pmu->version > 1;
391f5132b01SGleb Natapov 		break;
392f5132b01SGleb Natapov 	default:
393f5132b01SGleb Natapov 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
394f5132b01SGleb Natapov 			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
395f5132b01SGleb Natapov 			|| get_fixed_pmc(pmu, msr);
396f5132b01SGleb Natapov 		break;
397f5132b01SGleb Natapov 	}
398f5132b01SGleb Natapov 	return ret;
399f5132b01SGleb Natapov }
400f5132b01SGleb Natapov 
401f5132b01SGleb Natapov int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
402f5132b01SGleb Natapov {
403212dba12SWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
404f5132b01SGleb Natapov 	struct kvm_pmc *pmc;
405f5132b01SGleb Natapov 
406f5132b01SGleb Natapov 	switch (index) {
407f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
408f5132b01SGleb Natapov 		*data = pmu->fixed_ctr_ctrl;
409f5132b01SGleb Natapov 		return 0;
410f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
411f5132b01SGleb Natapov 		*data = pmu->global_status;
412f5132b01SGleb Natapov 		return 0;
413f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
414f5132b01SGleb Natapov 		*data = pmu->global_ctrl;
415f5132b01SGleb Natapov 		return 0;
416f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
417f5132b01SGleb Natapov 		*data = pmu->global_ovf_ctrl;
418f5132b01SGleb Natapov 		return 0;
419f5132b01SGleb Natapov 	default:
420f5132b01SGleb Natapov 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
421f5132b01SGleb Natapov 				(pmc = get_fixed_pmc(pmu, index))) {
422c6702c9dSWei Huang 			*data = pmc_read_counter(pmc);
423f5132b01SGleb Natapov 			return 0;
424f5132b01SGleb Natapov 		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
425f5132b01SGleb Natapov 			*data = pmc->eventsel;
426f5132b01SGleb Natapov 			return 0;
427f5132b01SGleb Natapov 		}
428f5132b01SGleb Natapov 	}
429f5132b01SGleb Natapov 	return 1;
430f5132b01SGleb Natapov }
431f5132b01SGleb Natapov 
432afd80d85SPaolo Bonzini int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
433f5132b01SGleb Natapov {
434212dba12SWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
435f5132b01SGleb Natapov 	struct kvm_pmc *pmc;
436afd80d85SPaolo Bonzini 	u32 index = msr_info->index;
437afd80d85SPaolo Bonzini 	u64 data = msr_info->data;
438f5132b01SGleb Natapov 
439f5132b01SGleb Natapov 	switch (index) {
440f5132b01SGleb Natapov 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
441f5132b01SGleb Natapov 		if (pmu->fixed_ctr_ctrl == data)
442f5132b01SGleb Natapov 			return 0;
443fea52953SSasikantha babu 		if (!(data & 0xfffffffffffff444ull)) {
444f5132b01SGleb Natapov 			reprogram_fixed_counters(pmu, data);
445f5132b01SGleb Natapov 			return 0;
446f5132b01SGleb Natapov 		}
447f5132b01SGleb Natapov 		break;
448f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_STATUS:
449afd80d85SPaolo Bonzini 		if (msr_info->host_initiated) {
450afd80d85SPaolo Bonzini 			pmu->global_status = data;
451afd80d85SPaolo Bonzini 			return 0;
452afd80d85SPaolo Bonzini 		}
453f5132b01SGleb Natapov 		break; /* RO MSR */
454f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_CTRL:
455f5132b01SGleb Natapov 		if (pmu->global_ctrl == data)
456f5132b01SGleb Natapov 			return 0;
457f5132b01SGleb Natapov 		if (!(data & pmu->global_ctrl_mask)) {
458f5132b01SGleb Natapov 			global_ctrl_changed(pmu, data);
459f5132b01SGleb Natapov 			return 0;
460f5132b01SGleb Natapov 		}
461f5132b01SGleb Natapov 		break;
462f5132b01SGleb Natapov 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
463f5132b01SGleb Natapov 		if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
464afd80d85SPaolo Bonzini 			if (!msr_info->host_initiated)
465f5132b01SGleb Natapov 				pmu->global_status &= ~data;
466f5132b01SGleb Natapov 			pmu->global_ovf_ctrl = data;
467f5132b01SGleb Natapov 			return 0;
468f5132b01SGleb Natapov 		}
469f5132b01SGleb Natapov 		break;
470f5132b01SGleb Natapov 	default:
471f5132b01SGleb Natapov 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
472f5132b01SGleb Natapov 				(pmc = get_fixed_pmc(pmu, index))) {
473afd80d85SPaolo Bonzini 			if (!msr_info->host_initiated)
474f5132b01SGleb Natapov 				data = (s64)(s32)data;
475c6702c9dSWei Huang 			pmc->counter += data - pmc_read_counter(pmc);
476f5132b01SGleb Natapov 			return 0;
477f5132b01SGleb Natapov 		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
478f5132b01SGleb Natapov 			if (data == pmc->eventsel)
479f5132b01SGleb Natapov 				return 0;
480103af0a9SAndi Kleen 			if (!(data & pmu->reserved_bits)) {
481f5132b01SGleb Natapov 				reprogram_gp_counter(pmc, data);
482f5132b01SGleb Natapov 				return 0;
483f5132b01SGleb Natapov 			}
484f5132b01SGleb Natapov 		}
485f5132b01SGleb Natapov 	}
486f5132b01SGleb Natapov 	return 1;
487f5132b01SGleb Natapov }
488f5132b01SGleb Natapov 
489e84cfe4cSWei Huang /* refresh PMU settings. This function generally is called when underlying
490e84cfe4cSWei Huang  * settings are changed (such as changes of PMU CPUID by guest VMs), which
491e84cfe4cSWei Huang  * should rarely happen.
492e84cfe4cSWei Huang  */
493c6702c9dSWei Huang void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
494f5132b01SGleb Natapov {
495212dba12SWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
496f5132b01SGleb Natapov 	struct kvm_cpuid_entry2 *entry;
497d27aa7f1SNadav Amit 	union cpuid10_eax eax;
498d27aa7f1SNadav Amit 	union cpuid10_edx edx;
499f5132b01SGleb Natapov 
500f5132b01SGleb Natapov 	pmu->nr_arch_gp_counters = 0;
501f5132b01SGleb Natapov 	pmu->nr_arch_fixed_counters = 0;
502f5132b01SGleb Natapov 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
503f5132b01SGleb Natapov 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
504f5132b01SGleb Natapov 	pmu->version = 0;
505103af0a9SAndi Kleen 	pmu->reserved_bits = 0xffffffff00200000ull;
506f5132b01SGleb Natapov 
507f5132b01SGleb Natapov 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
508f5132b01SGleb Natapov 	if (!entry)
509f5132b01SGleb Natapov 		return;
510d27aa7f1SNadav Amit 	eax.full = entry->eax;
511d27aa7f1SNadav Amit 	edx.full = entry->edx;
512f5132b01SGleb Natapov 
513d27aa7f1SNadav Amit 	pmu->version = eax.split.version_id;
514f5132b01SGleb Natapov 	if (!pmu->version)
515f5132b01SGleb Natapov 		return;
516f5132b01SGleb Natapov 
517d27aa7f1SNadav Amit 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
51815c7ad51SRobert Richter 					INTEL_PMC_MAX_GENERIC);
519d27aa7f1SNadav Amit 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
520d27aa7f1SNadav Amit 	pmu->available_event_types = ~entry->ebx &
521d27aa7f1SNadav Amit 					((1ull << eax.split.mask_length) - 1);
522f5132b01SGleb Natapov 
523f5132b01SGleb Natapov 	if (pmu->version == 1) {
524f19a0c2cSGleb Natapov 		pmu->nr_arch_fixed_counters = 0;
525f19a0c2cSGleb Natapov 	} else {
526d27aa7f1SNadav Amit 		pmu->nr_arch_fixed_counters =
527d27aa7f1SNadav Amit 			min_t(int, edx.split.num_counters_fixed,
52815c7ad51SRobert Richter 				INTEL_PMC_MAX_FIXED);
529f5132b01SGleb Natapov 		pmu->counter_bitmask[KVM_PMC_FIXED] =
530d27aa7f1SNadav Amit 			((u64)1 << edx.split.bit_width_fixed) - 1;
531f19a0c2cSGleb Natapov 	}
532f19a0c2cSGleb Natapov 
533f19a0c2cSGleb Natapov 	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
53415c7ad51SRobert Richter 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
535f19a0c2cSGleb Natapov 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
536103af0a9SAndi Kleen 
537103af0a9SAndi Kleen 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
538103af0a9SAndi Kleen 	if (entry &&
539103af0a9SAndi Kleen 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
540103af0a9SAndi Kleen 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
541103af0a9SAndi Kleen 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
542f5132b01SGleb Natapov }
543f5132b01SGleb Natapov 
544e5af058aSWei Huang void kvm_pmu_reset(struct kvm_vcpu *vcpu)
545e5af058aSWei Huang {
546e5af058aSWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
547e5af058aSWei Huang 	int i;
548e5af058aSWei Huang 
549e5af058aSWei Huang 	irq_work_sync(&pmu->irq_work);
550e5af058aSWei Huang 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
551e5af058aSWei Huang 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
552e5af058aSWei Huang 		pmc_stop_counter(pmc);
553e5af058aSWei Huang 		pmc->counter = pmc->eventsel = 0;
554e5af058aSWei Huang 	}
555e5af058aSWei Huang 
556e5af058aSWei Huang 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
557e5af058aSWei Huang 		pmc_stop_counter(&pmu->fixed_counters[i]);
558e5af058aSWei Huang 
559e5af058aSWei Huang 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
560e5af058aSWei Huang 		pmu->global_ovf_ctrl = 0;
561e5af058aSWei Huang }
562e5af058aSWei Huang 
563f5132b01SGleb Natapov void kvm_pmu_init(struct kvm_vcpu *vcpu)
564f5132b01SGleb Natapov {
565f5132b01SGleb Natapov 	int i;
566212dba12SWei Huang 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
567f5132b01SGleb Natapov 
568f5132b01SGleb Natapov 	memset(pmu, 0, sizeof(*pmu));
56915c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
570f5132b01SGleb Natapov 		pmu->gp_counters[i].type = KVM_PMC_GP;
571f5132b01SGleb Natapov 		pmu->gp_counters[i].vcpu = vcpu;
572f5132b01SGleb Natapov 		pmu->gp_counters[i].idx = i;
573f5132b01SGleb Natapov 	}
57415c7ad51SRobert Richter 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
575f5132b01SGleb Natapov 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
576f5132b01SGleb Natapov 		pmu->fixed_counters[i].vcpu = vcpu;
57715c7ad51SRobert Richter 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
578f5132b01SGleb Natapov 	}
579c6702c9dSWei Huang 	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
580c6702c9dSWei Huang 	kvm_pmu_refresh(vcpu);
581f5132b01SGleb Natapov }
582f5132b01SGleb Natapov 
583f5132b01SGleb Natapov void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
584f5132b01SGleb Natapov {
585f5132b01SGleb Natapov 	kvm_pmu_reset(vcpu);
586f5132b01SGleb Natapov }
587