1 /*
2 * QEMU ARM CP Register PMU insns
3 * SPDX-License-Identifier: GPL-2.0-or-later
4 */
5
6 #include "qemu/osdep.h"
7 #include "qemu/timer.h"
8 #include "exec/icount.h"
9 #include "hw/irq.h"
10 #include "cpu.h"
11 #include "cpu-features.h"
12 #include "cpregs.h"
13 #include "internals.h"
14
15
16 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
17
18 /*
19 * Check for traps to performance monitor registers, which are controlled
20 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
21 */
access_tpm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)22 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
23 bool isread)
24 {
25 int el = arm_current_el(env);
26 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
27
28 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
29 return CP_ACCESS_TRAP_EL2;
30 }
31 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
32 return CP_ACCESS_TRAP_EL3;
33 }
34 return CP_ACCESS_OK;
35 }
36
37 typedef struct pm_event {
38 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
39 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
40 bool (*supported)(CPUARMState *);
41 /*
42 * Retrieve the current count of the underlying event. The programmed
43 * counters hold a difference from the return value from this function
44 */
45 uint64_t (*get_count)(CPUARMState *);
46 /*
47 * Return how many nanoseconds it will take (at a minimum) for count events
48 * to occur. A negative value indicates the counter will never overflow, or
49 * that the counter has otherwise arranged for the overflow bit to be set
50 * and the PMU interrupt to be raised on overflow.
51 */
52 int64_t (*ns_per_count)(uint64_t);
53 } pm_event;
54
event_always_supported(CPUARMState * env)55 static bool event_always_supported(CPUARMState *env)
56 {
57 return true;
58 }
59
swinc_get_count(CPUARMState * env)60 static uint64_t swinc_get_count(CPUARMState *env)
61 {
62 /*
63 * SW_INCR events are written directly to the pmevcntr's by writes to
64 * PMSWINC, so there is no underlying count maintained by the PMU itself
65 */
66 return 0;
67 }
68
swinc_ns_per(uint64_t ignored)69 static int64_t swinc_ns_per(uint64_t ignored)
70 {
71 return -1;
72 }
73
74 /*
75 * Return the underlying cycle count for the PMU cycle counters. If we're in
76 * usermode, simply return 0.
77 */
cycles_get_count(CPUARMState * env)78 static uint64_t cycles_get_count(CPUARMState *env)
79 {
80 #ifndef CONFIG_USER_ONLY
81 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
82 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
83 #else
84 return cpu_get_host_ticks();
85 #endif
86 }
87
88 #ifndef CONFIG_USER_ONLY
cycles_ns_per(uint64_t cycles)89 static int64_t cycles_ns_per(uint64_t cycles)
90 {
91 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
92 }
93
instructions_supported(CPUARMState * env)94 static bool instructions_supported(CPUARMState *env)
95 {
96 /* Precise instruction counting */
97 return icount_enabled() == ICOUNT_PRECISE;
98 }
99
instructions_get_count(CPUARMState * env)100 static uint64_t instructions_get_count(CPUARMState *env)
101 {
102 assert(icount_enabled() == ICOUNT_PRECISE);
103 return (uint64_t)icount_get_raw();
104 }
105
instructions_ns_per(uint64_t icount)106 static int64_t instructions_ns_per(uint64_t icount)
107 {
108 assert(icount_enabled() == ICOUNT_PRECISE);
109 return icount_to_ns((int64_t)icount);
110 }
111 #endif
112
pmuv3p1_events_supported(CPUARMState * env)113 static bool pmuv3p1_events_supported(CPUARMState *env)
114 {
115 /* For events which are supported in any v8.1 PMU */
116 return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
117 }
118
pmuv3p4_events_supported(CPUARMState * env)119 static bool pmuv3p4_events_supported(CPUARMState *env)
120 {
121 /* For events which are supported in any v8.1 PMU */
122 return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
123 }
124
zero_event_get_count(CPUARMState * env)125 static uint64_t zero_event_get_count(CPUARMState *env)
126 {
127 /* For events which on QEMU never fire, so their count is always zero */
128 return 0;
129 }
130
zero_event_ns_per(uint64_t cycles)131 static int64_t zero_event_ns_per(uint64_t cycles)
132 {
133 /* An event which never fires can never overflow */
134 return -1;
135 }
136
137 static const pm_event pm_events[] = {
138 { .number = 0x000, /* SW_INCR */
139 .supported = event_always_supported,
140 .get_count = swinc_get_count,
141 .ns_per_count = swinc_ns_per,
142 },
143 #ifndef CONFIG_USER_ONLY
144 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
145 .supported = instructions_supported,
146 .get_count = instructions_get_count,
147 .ns_per_count = instructions_ns_per,
148 },
149 { .number = 0x011, /* CPU_CYCLES, Cycle */
150 .supported = event_always_supported,
151 .get_count = cycles_get_count,
152 .ns_per_count = cycles_ns_per,
153 },
154 #endif
155 { .number = 0x023, /* STALL_FRONTEND */
156 .supported = pmuv3p1_events_supported,
157 .get_count = zero_event_get_count,
158 .ns_per_count = zero_event_ns_per,
159 },
160 { .number = 0x024, /* STALL_BACKEND */
161 .supported = pmuv3p1_events_supported,
162 .get_count = zero_event_get_count,
163 .ns_per_count = zero_event_ns_per,
164 },
165 { .number = 0x03c, /* STALL */
166 .supported = pmuv3p4_events_supported,
167 .get_count = zero_event_get_count,
168 .ns_per_count = zero_event_ns_per,
169 },
170 };
171
172 /*
173 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
174 * events (i.e. the statistical profiling extension), this implementation
175 * should first be updated to something sparse instead of the current
176 * supported_event_map[] array.
177 */
178 #define MAX_EVENT_ID 0x3c
179 #define UNSUPPORTED_EVENT UINT16_MAX
180 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
181
182 /*
183 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
184 * of ARM event numbers to indices in our pm_events array.
185 *
186 * Note: Events in the 0x40XX range are not currently supported.
187 */
pmu_init(ARMCPU * cpu)188 void pmu_init(ARMCPU *cpu)
189 {
190 unsigned int i;
191
192 /*
193 * Empty supported_event_map and cpu->pmceid[01] before adding supported
194 * events to them
195 */
196 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
197 supported_event_map[i] = UNSUPPORTED_EVENT;
198 }
199 cpu->pmceid0 = 0;
200 cpu->pmceid1 = 0;
201
202 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
203 const pm_event *cnt = &pm_events[i];
204 assert(cnt->number <= MAX_EVENT_ID);
205 /* We do not currently support events in the 0x40xx range */
206 assert(cnt->number <= 0x3f);
207
208 if (cnt->supported(&cpu->env)) {
209 supported_event_map[cnt->number] = i;
210 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
211 if (cnt->number & 0x20) {
212 cpu->pmceid1 |= event_mask;
213 } else {
214 cpu->pmceid0 |= event_mask;
215 }
216 }
217 }
218 }
219
220 /*
221 * Check at runtime whether a PMU event is supported for the current machine
222 */
event_supported(uint16_t number)223 static bool event_supported(uint16_t number)
224 {
225 if (number > MAX_EVENT_ID) {
226 return false;
227 }
228 return supported_event_map[number] != UNSUPPORTED_EVENT;
229 }
230
do_pmreg_access(CPUARMState * env,bool is_pmcr)231 static CPAccessResult do_pmreg_access(CPUARMState *env, bool is_pmcr)
232 {
233 /*
234 * Performance monitor registers user accessibility is controlled
235 * by PMUSERENR. MDCR_EL2.TPM/TPMCR and MDCR_EL3.TPM allow configurable
236 * trapping to EL2 or EL3 for other accesses.
237 */
238 int el = arm_current_el(env);
239
240 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
241 return CP_ACCESS_TRAP_EL1;
242 }
243 if (el < 2) {
244 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
245
246 if (mdcr_el2 & MDCR_TPM) {
247 return CP_ACCESS_TRAP_EL2;
248 }
249 if (is_pmcr && (mdcr_el2 & MDCR_TPMCR)) {
250 return CP_ACCESS_TRAP_EL2;
251 }
252 }
253 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
254 return CP_ACCESS_TRAP_EL3;
255 }
256
257 return CP_ACCESS_OK;
258 }
259
pmreg_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)260 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
261 bool isread)
262 {
263 return do_pmreg_access(env, false);
264 }
265
pmreg_access_pmcr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)266 static CPAccessResult pmreg_access_pmcr(CPUARMState *env,
267 const ARMCPRegInfo *ri,
268 bool isread)
269 {
270 return do_pmreg_access(env, true);
271 }
272
pmreg_access_xevcntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)273 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
274 const ARMCPRegInfo *ri,
275 bool isread)
276 {
277 /* ER: event counter read trap control */
278 if (arm_feature(env, ARM_FEATURE_V8)
279 && arm_current_el(env) == 0
280 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
281 && isread) {
282 return CP_ACCESS_OK;
283 }
284
285 return pmreg_access(env, ri, isread);
286 }
287
pmreg_access_swinc(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)288 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
289 const ARMCPRegInfo *ri,
290 bool isread)
291 {
292 /* SW: software increment write trap control */
293 if (arm_feature(env, ARM_FEATURE_V8)
294 && arm_current_el(env) == 0
295 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
296 && !isread) {
297 return CP_ACCESS_OK;
298 }
299
300 return pmreg_access(env, ri, isread);
301 }
302
pmreg_access_selr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)303 static CPAccessResult pmreg_access_selr(CPUARMState *env,
304 const ARMCPRegInfo *ri,
305 bool isread)
306 {
307 /* ER: event counter read trap control */
308 if (arm_feature(env, ARM_FEATURE_V8)
309 && arm_current_el(env) == 0
310 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
311 return CP_ACCESS_OK;
312 }
313
314 return pmreg_access(env, ri, isread);
315 }
316
pmreg_access_ccntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)317 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
318 const ARMCPRegInfo *ri,
319 bool isread)
320 {
321 /* CR: cycle counter read trap control */
322 if (arm_feature(env, ARM_FEATURE_V8)
323 && arm_current_el(env) == 0
324 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
325 && isread) {
326 return CP_ACCESS_OK;
327 }
328
329 return pmreg_access(env, ri, isread);
330 }
331
332 /*
333 * Returns true if the counter (pass 31 for PMCCNTR) should count events using
334 * the current EL, security state, and register configuration.
335 */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)336 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
337 {
338 uint64_t filter;
339 bool e, p, u, nsk, nsu, nsh, m;
340 bool enabled, prohibited = false, filtered;
341 bool secure = arm_is_secure(env);
342 int el = arm_current_el(env);
343 uint64_t mdcr_el2;
344 uint8_t hpmn;
345
346 /*
347 * We might be called for M-profile cores where MDCR_EL2 doesn't
348 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
349 * must be before we read that value.
350 */
351 if (!arm_feature(env, ARM_FEATURE_PMU)) {
352 return false;
353 }
354
355 mdcr_el2 = arm_mdcr_el2_eff(env);
356 hpmn = mdcr_el2 & MDCR_HPMN;
357
358 if (!arm_feature(env, ARM_FEATURE_EL2) ||
359 (counter < hpmn || counter == 31)) {
360 e = env->cp15.c9_pmcr & PMCRE;
361 } else {
362 e = mdcr_el2 & MDCR_HPME;
363 }
364 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
365
366 /* Is event counting prohibited? */
367 if (el == 2 && (counter < hpmn || counter == 31)) {
368 prohibited = mdcr_el2 & MDCR_HPMD;
369 }
370 if (secure) {
371 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
372 }
373
374 if (counter == 31) {
375 /*
376 * The cycle counter defaults to running. PMCR.DP says "disable
377 * the cycle counter when event counting is prohibited".
378 * Some MDCR bits disable the cycle counter specifically.
379 */
380 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
381 if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
382 if (secure) {
383 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
384 }
385 if (el == 2) {
386 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
387 }
388 }
389 }
390
391 if (counter == 31) {
392 filter = env->cp15.pmccfiltr_el0;
393 } else {
394 filter = env->cp15.c14_pmevtyper[counter];
395 }
396
397 p = filter & PMXEVTYPER_P;
398 u = filter & PMXEVTYPER_U;
399 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
400 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
401 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
402 m = arm_el_is_aa64(env, 1) &&
403 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
404
405 if (el == 0) {
406 filtered = secure ? u : u != nsu;
407 } else if (el == 1) {
408 filtered = secure ? p : p != nsk;
409 } else if (el == 2) {
410 filtered = !nsh;
411 } else { /* EL3 */
412 filtered = m != p;
413 }
414
415 if (counter != 31) {
416 /*
417 * If not checking PMCCNTR, ensure the counter is setup to an event we
418 * support
419 */
420 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
421 if (!event_supported(event)) {
422 return false;
423 }
424 }
425
426 return enabled && !prohibited && !filtered;
427 }
428
pmu_update_irq(CPUARMState * env)429 static void pmu_update_irq(CPUARMState *env)
430 {
431 ARMCPU *cpu = env_archcpu(env);
432 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
433 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
434 }
435
pmccntr_clockdiv_enabled(CPUARMState * env)436 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
437 {
438 /*
439 * Return true if the clock divider is enabled and the cycle counter
440 * is supposed to tick only once every 64 clock cycles. This is
441 * controlled by PMCR.D, but if PMCR.LC is set to enable the long
442 * (64-bit) cycle counter PMCR.D has no effect.
443 */
444 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
445 }
446
pmevcntr_is_64_bit(CPUARMState * env,int counter)447 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
448 {
449 /* Return true if the specified event counter is configured to be 64 bit */
450
451 /* This isn't intended to be used with the cycle counter */
452 assert(counter < 31);
453
454 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
455 return false;
456 }
457
458 if (arm_feature(env, ARM_FEATURE_EL2)) {
459 /*
460 * MDCR_EL2.HLP still applies even when EL2 is disabled in the
461 * current security state, so we don't use arm_mdcr_el2_eff() here.
462 */
463 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
464 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
465
466 if (counter >= hpmn) {
467 return hlp;
468 }
469 }
470 return env->cp15.c9_pmcr & PMCRLP;
471 }
472
473 /*
474 * Ensure c15_ccnt is the guest-visible count so that operations such as
475 * enabling/disabling the counter or filtering, modifying the count itself,
476 * etc. can be done logically. This is essentially a no-op if the counter is
477 * not enabled at the time of the call.
478 */
pmccntr_op_start(CPUARMState * env)479 static void pmccntr_op_start(CPUARMState *env)
480 {
481 uint64_t cycles = cycles_get_count(env);
482
483 if (pmu_counter_enabled(env, 31)) {
484 uint64_t eff_cycles = cycles;
485 if (pmccntr_clockdiv_enabled(env)) {
486 eff_cycles /= 64;
487 }
488
489 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
490
491 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
492 1ull << 63 : 1ull << 31;
493 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
494 env->cp15.c9_pmovsr |= (1ULL << 31);
495 pmu_update_irq(env);
496 }
497
498 env->cp15.c15_ccnt = new_pmccntr;
499 }
500 env->cp15.c15_ccnt_delta = cycles;
501 }
502
503 /*
504 * If PMCCNTR is enabled, recalculate the delta between the clock and the
505 * guest-visible count. A call to pmccntr_op_finish should follow every call to
506 * pmccntr_op_start.
507 */
pmccntr_op_finish(CPUARMState * env)508 static void pmccntr_op_finish(CPUARMState *env)
509 {
510 if (pmu_counter_enabled(env, 31)) {
511 #ifndef CONFIG_USER_ONLY
512 /* Calculate when the counter will next overflow */
513 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
514 if (!(env->cp15.c9_pmcr & PMCRLC)) {
515 remaining_cycles = (uint32_t)remaining_cycles;
516 }
517 int64_t overflow_in = cycles_ns_per(remaining_cycles);
518
519 if (overflow_in > 0) {
520 int64_t overflow_at;
521
522 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
523 overflow_in, &overflow_at)) {
524 ARMCPU *cpu = env_archcpu(env);
525 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
526 }
527 }
528 #endif
529
530 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
531 if (pmccntr_clockdiv_enabled(env)) {
532 prev_cycles /= 64;
533 }
534 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
535 }
536 }
537
pmevcntr_op_start(CPUARMState * env,uint8_t counter)538 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
539 {
540
541 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
542 uint64_t count = 0;
543 if (event_supported(event)) {
544 uint16_t event_idx = supported_event_map[event];
545 count = pm_events[event_idx].get_count(env);
546 }
547
548 if (pmu_counter_enabled(env, counter)) {
549 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
550 uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
551 1ULL << 63 : 1ULL << 31;
552
553 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
554 env->cp15.c9_pmovsr |= (1 << counter);
555 pmu_update_irq(env);
556 }
557 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
558 }
559 env->cp15.c14_pmevcntr_delta[counter] = count;
560 }
561
pmevcntr_op_finish(CPUARMState * env,uint8_t counter)562 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
563 {
564 if (pmu_counter_enabled(env, counter)) {
565 #ifndef CONFIG_USER_ONLY
566 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
567 uint16_t event_idx = supported_event_map[event];
568 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
569 int64_t overflow_in;
570
571 if (!pmevcntr_is_64_bit(env, counter)) {
572 delta = (uint32_t)delta;
573 }
574 overflow_in = pm_events[event_idx].ns_per_count(delta);
575
576 if (overflow_in > 0) {
577 int64_t overflow_at;
578
579 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
580 overflow_in, &overflow_at)) {
581 ARMCPU *cpu = env_archcpu(env);
582 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
583 }
584 }
585 #endif
586
587 env->cp15.c14_pmevcntr_delta[counter] -=
588 env->cp15.c14_pmevcntr[counter];
589 }
590 }
591
pmu_op_start(CPUARMState * env)592 void pmu_op_start(CPUARMState *env)
593 {
594 unsigned int i;
595 pmccntr_op_start(env);
596 for (i = 0; i < pmu_num_counters(env); i++) {
597 pmevcntr_op_start(env, i);
598 }
599 }
600
pmu_op_finish(CPUARMState * env)601 void pmu_op_finish(CPUARMState *env)
602 {
603 unsigned int i;
604 pmccntr_op_finish(env);
605 for (i = 0; i < pmu_num_counters(env); i++) {
606 pmevcntr_op_finish(env, i);
607 }
608 }
609
pmu_pre_el_change(ARMCPU * cpu,void * ignored)610 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
611 {
612 pmu_op_start(&cpu->env);
613 }
614
pmu_post_el_change(ARMCPU * cpu,void * ignored)615 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
616 {
617 pmu_op_finish(&cpu->env);
618 }
619
arm_pmu_timer_cb(void * opaque)620 void arm_pmu_timer_cb(void *opaque)
621 {
622 ARMCPU *cpu = opaque;
623
624 /*
625 * Update all the counter values based on the current underlying counts,
626 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
627 * has the effect of setting the cpu->pmu_timer to the next earliest time a
628 * counter may expire.
629 */
630 pmu_op_start(&cpu->env);
631 pmu_op_finish(&cpu->env);
632 }
633
pmcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)634 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
635 uint64_t value)
636 {
637 pmu_op_start(env);
638
639 if (value & PMCRC) {
640 /* The counter has been reset */
641 env->cp15.c15_ccnt = 0;
642 }
643
644 if (value & PMCRP) {
645 unsigned int i;
646 for (i = 0; i < pmu_num_counters(env); i++) {
647 env->cp15.c14_pmevcntr[i] = 0;
648 }
649 }
650
651 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
652 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
653
654 pmu_op_finish(env);
655 }
656
pmcr_read(CPUARMState * env,const ARMCPRegInfo * ri)657 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
658 {
659 uint64_t pmcr = env->cp15.c9_pmcr;
660
661 /*
662 * If EL2 is implemented and enabled for the current security state, reads
663 * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
664 */
665 if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
666 pmcr &= ~PMCRN_MASK;
667 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
668 }
669
670 return pmcr;
671 }
672
pmswinc_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)673 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
674 uint64_t value)
675 {
676 unsigned int i;
677 uint64_t overflow_mask, new_pmswinc;
678
679 for (i = 0; i < pmu_num_counters(env); i++) {
680 /* Increment a counter's count iff: */
681 if ((value & (1 << i)) && /* counter's bit is set */
682 /* counter is enabled and not filtered */
683 pmu_counter_enabled(env, i) &&
684 /* counter is SW_INCR */
685 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
686 pmevcntr_op_start(env, i);
687
688 /*
689 * Detect if this write causes an overflow since we can't predict
690 * PMSWINC overflows like we can for other events
691 */
692 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
693
694 overflow_mask = pmevcntr_is_64_bit(env, i) ?
695 1ULL << 63 : 1ULL << 31;
696
697 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
698 env->cp15.c9_pmovsr |= (1 << i);
699 pmu_update_irq(env);
700 }
701
702 env->cp15.c14_pmevcntr[i] = new_pmswinc;
703
704 pmevcntr_op_finish(env, i);
705 }
706 }
707 }
708
pmccntr_read(CPUARMState * env,const ARMCPRegInfo * ri)709 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
710 {
711 uint64_t ret;
712 pmccntr_op_start(env);
713 ret = env->cp15.c15_ccnt;
714 pmccntr_op_finish(env);
715 return ret;
716 }
717
pmselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)718 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
719 uint64_t value)
720 {
721 /*
722 * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
723 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
724 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
725 * accessed.
726 */
727 env->cp15.c9_pmselr = value & 0x1f;
728 }
729
pmccntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)730 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
731 uint64_t value)
732 {
733 pmccntr_op_start(env);
734 env->cp15.c15_ccnt = value;
735 pmccntr_op_finish(env);
736 }
737
pmccntr_write32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)738 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
739 uint64_t value)
740 {
741 uint64_t cur_val = pmccntr_read(env, NULL);
742
743 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
744 }
745
pmccfiltr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)746 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
747 uint64_t value)
748 {
749 pmccntr_op_start(env);
750 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
751 pmccntr_op_finish(env);
752 }
753
pmccfiltr_write_a32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)754 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
755 uint64_t value)
756 {
757 pmccntr_op_start(env);
758 /* M is not accessible from AArch32 */
759 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
760 (value & PMCCFILTR);
761 pmccntr_op_finish(env);
762 }
763
pmccfiltr_read_a32(CPUARMState * env,const ARMCPRegInfo * ri)764 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
765 {
766 /* M is not visible in AArch32 */
767 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
768 }
769
pmcntenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)770 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
771 uint64_t value)
772 {
773 pmu_op_start(env);
774 value &= pmu_counter_mask(env);
775 env->cp15.c9_pmcnten |= value;
776 pmu_op_finish(env);
777 }
778
pmcntenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)779 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
780 uint64_t value)
781 {
782 pmu_op_start(env);
783 value &= pmu_counter_mask(env);
784 env->cp15.c9_pmcnten &= ~value;
785 pmu_op_finish(env);
786 }
787
pmovsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)788 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
789 uint64_t value)
790 {
791 value &= pmu_counter_mask(env);
792 env->cp15.c9_pmovsr &= ~value;
793 pmu_update_irq(env);
794 }
795
pmovsset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)796 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
797 uint64_t value)
798 {
799 value &= pmu_counter_mask(env);
800 env->cp15.c9_pmovsr |= value;
801 pmu_update_irq(env);
802 }
803
pmevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,const uint8_t counter)804 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
805 uint64_t value, const uint8_t counter)
806 {
807 if (counter == 31) {
808 pmccfiltr_write(env, ri, value);
809 } else if (counter < pmu_num_counters(env)) {
810 pmevcntr_op_start(env, counter);
811
812 /*
813 * If this counter's event type is changing, store the current
814 * underlying count for the new type in c14_pmevcntr_delta[counter] so
815 * pmevcntr_op_finish has the correct baseline when it converts back to
816 * a delta.
817 */
818 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
819 PMXEVTYPER_EVTCOUNT;
820 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
821 if (old_event != new_event) {
822 uint64_t count = 0;
823 if (event_supported(new_event)) {
824 uint16_t event_idx = supported_event_map[new_event];
825 count = pm_events[event_idx].get_count(env);
826 }
827 env->cp15.c14_pmevcntr_delta[counter] = count;
828 }
829
830 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
831 pmevcntr_op_finish(env, counter);
832 }
833 /*
834 * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
835 * PMSELR value is equal to or greater than the number of implemented
836 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
837 */
838 }
839
pmevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri,const uint8_t counter)840 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
841 const uint8_t counter)
842 {
843 if (counter == 31) {
844 return env->cp15.pmccfiltr_el0;
845 } else if (counter < pmu_num_counters(env)) {
846 return env->cp15.c14_pmevtyper[counter];
847 } else {
848 /*
849 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
850 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
851 */
852 return 0;
853 }
854 }
855
pmevtyper_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)856 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
857 uint64_t value)
858 {
859 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
860 pmevtyper_write(env, ri, value, counter);
861 }
862
pmevtyper_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)863 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
864 uint64_t value)
865 {
866 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
867 env->cp15.c14_pmevtyper[counter] = value;
868
869 /*
870 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
871 * pmu_op_finish calls when loading saved state for a migration. Because
872 * we're potentially updating the type of event here, the value written to
873 * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
874 * different counter type. Therefore, we need to set this value to the
875 * current count for the counter type we're writing so that pmu_op_finish
876 * has the correct count for its calculation.
877 */
878 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
879 if (event_supported(event)) {
880 uint16_t event_idx = supported_event_map[event];
881 env->cp15.c14_pmevcntr_delta[counter] =
882 pm_events[event_idx].get_count(env);
883 }
884 }
885
pmevtyper_readfn(CPUARMState * env,const ARMCPRegInfo * ri)886 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
887 {
888 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
889 return pmevtyper_read(env, ri, counter);
890 }
891
pmxevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)892 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
893 uint64_t value)
894 {
895 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
896 }
897
pmxevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri)898 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
899 {
900 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
901 }
902
pmevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,uint8_t counter)903 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
904 uint64_t value, uint8_t counter)
905 {
906 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
907 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
908 value &= MAKE_64BIT_MASK(0, 32);
909 }
910 if (counter < pmu_num_counters(env)) {
911 pmevcntr_op_start(env, counter);
912 env->cp15.c14_pmevcntr[counter] = value;
913 pmevcntr_op_finish(env, counter);
914 }
915 /*
916 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
917 * are CONSTRAINED UNPREDICTABLE.
918 */
919 }
920
pmevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri,uint8_t counter)921 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
922 uint8_t counter)
923 {
924 if (counter < pmu_num_counters(env)) {
925 uint64_t ret;
926 pmevcntr_op_start(env, counter);
927 ret = env->cp15.c14_pmevcntr[counter];
928 pmevcntr_op_finish(env, counter);
929 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
930 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
931 ret &= MAKE_64BIT_MASK(0, 32);
932 }
933 return ret;
934 } else {
935 /*
936 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
937 * are CONSTRAINED UNPREDICTABLE.
938 */
939 return 0;
940 }
941 }
942
pmevcntr_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)943 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
944 uint64_t value)
945 {
946 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
947 pmevcntr_write(env, ri, value, counter);
948 }
949
pmevcntr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)950 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
951 {
952 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
953 return pmevcntr_read(env, ri, counter);
954 }
955
pmevcntr_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)956 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
957 uint64_t value)
958 {
959 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
960 assert(counter < pmu_num_counters(env));
961 env->cp15.c14_pmevcntr[counter] = value;
962 pmevcntr_write(env, ri, value, counter);
963 }
964
pmevcntr_rawread(CPUARMState * env,const ARMCPRegInfo * ri)965 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
966 {
967 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
968 assert(counter < pmu_num_counters(env));
969 return env->cp15.c14_pmevcntr[counter];
970 }
971
pmxevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)972 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
973 uint64_t value)
974 {
975 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
976 }
977
pmxevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri)978 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
979 {
980 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
981 }
982
pmuserenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)983 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
984 uint64_t value)
985 {
986 if (arm_feature(env, ARM_FEATURE_V8)) {
987 env->cp15.c9_pmuserenr = value & 0xf;
988 } else {
989 env->cp15.c9_pmuserenr = value & 1;
990 }
991 }
992
pmintenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)993 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
994 uint64_t value)
995 {
996 /* We have no event counters so only the C bit can be changed */
997 value &= pmu_counter_mask(env);
998 env->cp15.c9_pminten |= value;
999 pmu_update_irq(env);
1000 }
1001
pmintenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)1002 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1003 uint64_t value)
1004 {
1005 value &= pmu_counter_mask(env);
1006 env->cp15.c9_pminten &= ~value;
1007 pmu_update_irq(env);
1008 }
1009
1010 static const ARMCPRegInfo v7_pm_reginfo[] = {
1011 /*
1012 * Performance monitors are implementation defined in v7,
1013 * but with an ARM recommended set of registers, which we
1014 * follow.
1015 *
1016 * Performance registers fall into three categories:
1017 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1018 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1019 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1020 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1021 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1022 */
1023 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1024 .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
1025 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1026 .writefn = pmcntenset_write,
1027 .accessfn = pmreg_access,
1028 .fgt = FGT_PMCNTEN,
1029 .raw_writefn = raw_write },
1030 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
1031 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1032 .access = PL0_RW, .accessfn = pmreg_access,
1033 .fgt = FGT_PMCNTEN,
1034 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1035 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1036 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1037 .access = PL0_RW,
1038 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1039 .accessfn = pmreg_access,
1040 .fgt = FGT_PMCNTEN,
1041 .writefn = pmcntenclr_write, .raw_writefn = raw_write,
1042 .type = ARM_CP_ALIAS | ARM_CP_IO },
1043 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1044 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1045 .access = PL0_RW, .accessfn = pmreg_access,
1046 .fgt = FGT_PMCNTEN,
1047 .type = ARM_CP_ALIAS | ARM_CP_IO,
1048 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1049 .writefn = pmcntenclr_write, .raw_writefn = raw_write },
1050 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1051 .access = PL0_RW, .type = ARM_CP_IO,
1052 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1053 .accessfn = pmreg_access,
1054 .fgt = FGT_PMOVS,
1055 .writefn = pmovsr_write,
1056 .raw_writefn = raw_write },
1057 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1058 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1059 .access = PL0_RW, .accessfn = pmreg_access,
1060 .fgt = FGT_PMOVS,
1061 .type = ARM_CP_ALIAS | ARM_CP_IO,
1062 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1063 .writefn = pmovsr_write,
1064 .raw_writefn = raw_write },
1065 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1066 .access = PL0_W, .accessfn = pmreg_access_swinc,
1067 .fgt = FGT_PMSWINC_EL0,
1068 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1069 .writefn = pmswinc_write },
1070 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1071 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1072 .access = PL0_W, .accessfn = pmreg_access_swinc,
1073 .fgt = FGT_PMSWINC_EL0,
1074 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1075 .writefn = pmswinc_write },
1076 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1077 .access = PL0_RW, .type = ARM_CP_ALIAS,
1078 .fgt = FGT_PMSELR_EL0,
1079 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1080 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1081 .raw_writefn = raw_write},
1082 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1083 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1084 .access = PL0_RW, .accessfn = pmreg_access_selr,
1085 .fgt = FGT_PMSELR_EL0,
1086 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1087 .writefn = pmselr_write, .raw_writefn = raw_write, },
1088 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1089 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1090 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1091 .fgt = FGT_PMCCNTR_EL0,
1092 .type = ARM_CP_IO,
1093 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1094 .readfn = pmccntr_read, .writefn = pmccntr_write,
1095 .raw_readfn = raw_read, .raw_writefn = raw_write, },
1096 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1097 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1098 .access = PL0_RW, .accessfn = pmreg_access,
1099 .fgt = FGT_PMCCFILTR_EL0,
1100 .type = ARM_CP_ALIAS | ARM_CP_IO,
1101 .resetvalue = 0, },
1102 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1103 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1104 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
1105 .access = PL0_RW, .accessfn = pmreg_access,
1106 .fgt = FGT_PMCCFILTR_EL0,
1107 .type = ARM_CP_IO,
1108 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1109 .resetvalue = 0, },
1110 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1111 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1112 .accessfn = pmreg_access,
1113 .fgt = FGT_PMEVTYPERN_EL0,
1114 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1115 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1116 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1117 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1118 .accessfn = pmreg_access,
1119 .fgt = FGT_PMEVTYPERN_EL0,
1120 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1121 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1122 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1123 .accessfn = pmreg_access_xevcntr,
1124 .fgt = FGT_PMEVCNTRN_EL0,
1125 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1126 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1127 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
1128 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1129 .accessfn = pmreg_access_xevcntr,
1130 .fgt = FGT_PMEVCNTRN_EL0,
1131 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1132 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1133 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1134 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1135 .resetvalue = 0,
1136 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1137 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1138 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1139 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1140 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1141 .resetvalue = 0,
1142 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1143 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1144 .access = PL1_RW, .accessfn = access_tpm,
1145 .fgt = FGT_PMINTEN,
1146 .type = ARM_CP_ALIAS | ARM_CP_IO,
1147 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1148 .resetvalue = 0,
1149 .writefn = pmintenset_write, .raw_writefn = raw_write },
1150 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1151 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1152 .access = PL1_RW, .accessfn = access_tpm,
1153 .fgt = FGT_PMINTEN,
1154 .type = ARM_CP_IO,
1155 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1156 .writefn = pmintenset_write, .raw_writefn = raw_write,
1157 .resetvalue = 0x0 },
1158 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1159 .access = PL1_RW, .accessfn = access_tpm,
1160 .fgt = FGT_PMINTEN,
1161 .type = ARM_CP_ALIAS | ARM_CP_IO,
1162 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1163 .writefn = pmintenclr_write, .raw_writefn = raw_write },
1164 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1165 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1166 .access = PL1_RW, .accessfn = access_tpm,
1167 .fgt = FGT_PMINTEN,
1168 .type = ARM_CP_ALIAS | ARM_CP_IO,
1169 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1170 .writefn = pmintenclr_write, .raw_writefn = raw_write },
1171 };
1172
1173 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
1174 /* PMOVSSET is not implemented in v7 before v7ve */
1175 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
1176 .access = PL0_RW, .accessfn = pmreg_access,
1177 .fgt = FGT_PMOVS,
1178 .type = ARM_CP_ALIAS | ARM_CP_IO,
1179 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1180 .writefn = pmovsset_write,
1181 .raw_writefn = raw_write },
1182 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
1183 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
1184 .access = PL0_RW, .accessfn = pmreg_access,
1185 .fgt = FGT_PMOVS,
1186 .type = ARM_CP_ALIAS | ARM_CP_IO,
1187 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1188 .writefn = pmovsset_write,
1189 .raw_writefn = raw_write },
1190 };
1191
define_pm_cpregs(ARMCPU * cpu)1192 void define_pm_cpregs(ARMCPU *cpu)
1193 {
1194 CPUARMState *env = &cpu->env;
1195
1196 if (arm_feature(env, ARM_FEATURE_V7)) {
1197 /*
1198 * v7 performance monitor control register: same implementor
1199 * field as main ID register, and we implement four counters in
1200 * addition to the cycle count register.
1201 */
1202 static const ARMCPRegInfo pmcr = {
1203 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
1204 .access = PL0_RW,
1205 .fgt = FGT_PMCR_EL0,
1206 .type = ARM_CP_IO | ARM_CP_ALIAS,
1207 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
1208 .accessfn = pmreg_access_pmcr,
1209 .readfn = pmcr_read, .raw_readfn = raw_read,
1210 .writefn = pmcr_write, .raw_writefn = raw_write,
1211 };
1212 const ARMCPRegInfo pmcr64 = {
1213 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
1214 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
1215 .access = PL0_RW, .accessfn = pmreg_access_pmcr,
1216 .fgt = FGT_PMCR_EL0,
1217 .type = ARM_CP_IO,
1218 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
1219 .resetvalue = cpu->isar.reset_pmcr_el0,
1220 .readfn = pmcr_read, .raw_readfn = raw_read,
1221 .writefn = pmcr_write, .raw_writefn = raw_write,
1222 };
1223
1224 define_one_arm_cp_reg(cpu, &pmcr);
1225 define_one_arm_cp_reg(cpu, &pmcr64);
1226 define_arm_cp_regs(cpu, v7_pm_reginfo);
1227 /*
1228 * 32-bit AArch32 PMCCNTR. We don't expose this to GDB if the
1229 * new-in-v8 PMUv3 64-bit AArch32 PMCCNTR register is implemented
1230 * (as that will provide the GDB user's view of "PMCCNTR").
1231 */
1232 ARMCPRegInfo pmccntr = {
1233 .name = "PMCCNTR",
1234 .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1235 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1236 .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1237 .fgt = FGT_PMCCNTR_EL0,
1238 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1239 };
1240 if (arm_feature(env, ARM_FEATURE_V8)) {
1241 pmccntr.type |= ARM_CP_NO_GDB;
1242 }
1243 define_one_arm_cp_reg(cpu, &pmccntr);
1244
1245 for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) {
1246 g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
1247 g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
1248 g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
1249 g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
1250
1251 ARMCPRegInfo pmev_regs[] = {
1252 { .name = pmevcntr_name, .cp = 15, .crn = 14,
1253 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
1254 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
1255 .fgt = FGT_PMEVCNTRN_EL0,
1256 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
1257 .accessfn = pmreg_access_xevcntr },
1258 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
1259 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
1260 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
1261 .type = ARM_CP_IO,
1262 .fgt = FGT_PMEVCNTRN_EL0,
1263 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
1264 .raw_readfn = pmevcntr_rawread,
1265 .raw_writefn = pmevcntr_rawwrite },
1266 { .name = pmevtyper_name, .cp = 15, .crn = 14,
1267 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
1268 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
1269 .fgt = FGT_PMEVTYPERN_EL0,
1270 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
1271 .accessfn = pmreg_access },
1272 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
1273 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
1274 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
1275 .fgt = FGT_PMEVTYPERN_EL0,
1276 .type = ARM_CP_IO,
1277 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
1278 .raw_writefn = pmevtyper_rawwrite },
1279 };
1280 define_arm_cp_regs(cpu, pmev_regs);
1281 }
1282 }
1283 if (arm_feature(env, ARM_FEATURE_V7VE)) {
1284 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
1285 }
1286
1287 if (arm_feature(env, ARM_FEATURE_V8)) {
1288 const ARMCPRegInfo v8_pm_reginfo[] = {
1289 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
1290 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
1291 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1292 .fgt = FGT_PMCEIDN_EL0,
1293 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
1294 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
1295 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
1296 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1297 .fgt = FGT_PMCEIDN_EL0,
1298 .resetvalue = cpu->pmceid0 },
1299 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
1300 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
1301 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1302 .fgt = FGT_PMCEIDN_EL0,
1303 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
1304 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
1305 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
1306 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1307 .fgt = FGT_PMCEIDN_EL0,
1308 .resetvalue = cpu->pmceid1 },
1309 /* AArch32 64-bit PMCCNTR view: added in PMUv3 with Armv8 */
1310 { .name = "PMCCNTR", .state = ARM_CP_STATE_AA32,
1311 .cp = 15, .crm = 9, .opc1 = 0,
1312 .access = PL0_RW, .accessfn = pmreg_access_ccntr, .resetvalue = 0,
1313 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_64BIT,
1314 .fgt = FGT_PMCCNTR_EL0, .readfn = pmccntr_read,
1315 .writefn = pmccntr_write, },
1316 };
1317 define_arm_cp_regs(cpu, v8_pm_reginfo);
1318 }
1319
1320 if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
1321 ARMCPRegInfo v81_pmu_regs[] = {
1322 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
1323 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
1324 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1325 .fgt = FGT_PMCEIDN_EL0,
1326 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
1327 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
1328 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
1329 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1330 .fgt = FGT_PMCEIDN_EL0,
1331 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
1332 };
1333 define_arm_cp_regs(cpu, v81_pmu_regs);
1334 }
1335
1336 if (cpu_isar_feature(any_pmuv3p4, cpu)) {
1337 static const ARMCPRegInfo v84_pmmir = {
1338 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
1339 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
1340 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1341 .fgt = FGT_PMMIR_EL1,
1342 .resetvalue = 0
1343 };
1344 define_one_arm_cp_reg(cpu, &v84_pmmir);
1345 }
1346 }
1347