1 /*
2 * QEMU ARM CP Register PMU insns
3 * SPDX-License-Identifier: GPL-2.0-or-later
4 */
5
6 #include "qemu/osdep.h"
7 #include "qemu/timer.h"
8 #include "exec/icount.h"
9 #include "hw/irq.h"
10 #include "cpu.h"
11 #include "cpu-features.h"
12 #include "cpregs.h"
13 #include "internals.h"
14
15
16 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
17
18 /*
19 * Check for traps to performance monitor registers, which are controlled
20 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
21 */
access_tpm(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)22 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
23 bool isread)
24 {
25 int el = arm_current_el(env);
26 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
27
28 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
29 return CP_ACCESS_TRAP_EL2;
30 }
31 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
32 return CP_ACCESS_TRAP_EL3;
33 }
34 return CP_ACCESS_OK;
35 }
36
37 typedef struct pm_event {
38 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
39 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
40 bool (*supported)(CPUARMState *);
41 /*
42 * Retrieve the current count of the underlying event. The programmed
43 * counters hold a difference from the return value from this function
44 */
45 uint64_t (*get_count)(CPUARMState *);
46 /*
47 * Return how many nanoseconds it will take (at a minimum) for count events
48 * to occur. A negative value indicates the counter will never overflow, or
49 * that the counter has otherwise arranged for the overflow bit to be set
50 * and the PMU interrupt to be raised on overflow.
51 */
52 int64_t (*ns_per_count)(uint64_t);
53 } pm_event;
54
event_always_supported(CPUARMState * env)55 static bool event_always_supported(CPUARMState *env)
56 {
57 return true;
58 }
59
swinc_get_count(CPUARMState * env)60 static uint64_t swinc_get_count(CPUARMState *env)
61 {
62 /*
63 * SW_INCR events are written directly to the pmevcntr's by writes to
64 * PMSWINC, so there is no underlying count maintained by the PMU itself
65 */
66 return 0;
67 }
68
swinc_ns_per(uint64_t ignored)69 static int64_t swinc_ns_per(uint64_t ignored)
70 {
71 return -1;
72 }
73
74 /*
75 * Return the underlying cycle count for the PMU cycle counters. If we're in
76 * usermode, simply return 0.
77 */
cycles_get_count(CPUARMState * env)78 static uint64_t cycles_get_count(CPUARMState *env)
79 {
80 #ifndef CONFIG_USER_ONLY
81 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
82 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
83 #else
84 return cpu_get_host_ticks();
85 #endif
86 }
87
88 #ifndef CONFIG_USER_ONLY
cycles_ns_per(uint64_t cycles)89 static int64_t cycles_ns_per(uint64_t cycles)
90 {
91 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
92 }
93
instructions_supported(CPUARMState * env)94 static bool instructions_supported(CPUARMState *env)
95 {
96 /* Precise instruction counting */
97 return icount_enabled() == ICOUNT_PRECISE;
98 }
99
instructions_get_count(CPUARMState * env)100 static uint64_t instructions_get_count(CPUARMState *env)
101 {
102 assert(icount_enabled() == ICOUNT_PRECISE);
103 return (uint64_t)icount_get_raw();
104 }
105
instructions_ns_per(uint64_t icount)106 static int64_t instructions_ns_per(uint64_t icount)
107 {
108 assert(icount_enabled() == ICOUNT_PRECISE);
109 return icount_to_ns((int64_t)icount);
110 }
111 #endif
112
pmuv3p1_events_supported(CPUARMState * env)113 static bool pmuv3p1_events_supported(CPUARMState *env)
114 {
115 /* For events which are supported in any v8.1 PMU */
116 return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
117 }
118
pmuv3p4_events_supported(CPUARMState * env)119 static bool pmuv3p4_events_supported(CPUARMState *env)
120 {
121 /* For events which are supported in any v8.1 PMU */
122 return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
123 }
124
zero_event_get_count(CPUARMState * env)125 static uint64_t zero_event_get_count(CPUARMState *env)
126 {
127 /* For events which on QEMU never fire, so their count is always zero */
128 return 0;
129 }
130
zero_event_ns_per(uint64_t cycles)131 static int64_t zero_event_ns_per(uint64_t cycles)
132 {
133 /* An event which never fires can never overflow */
134 return -1;
135 }
136
137 static const pm_event pm_events[] = {
138 { .number = 0x000, /* SW_INCR */
139 .supported = event_always_supported,
140 .get_count = swinc_get_count,
141 .ns_per_count = swinc_ns_per,
142 },
143 #ifndef CONFIG_USER_ONLY
144 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
145 .supported = instructions_supported,
146 .get_count = instructions_get_count,
147 .ns_per_count = instructions_ns_per,
148 },
149 { .number = 0x011, /* CPU_CYCLES, Cycle */
150 .supported = event_always_supported,
151 .get_count = cycles_get_count,
152 .ns_per_count = cycles_ns_per,
153 },
154 #endif
155 { .number = 0x023, /* STALL_FRONTEND */
156 .supported = pmuv3p1_events_supported,
157 .get_count = zero_event_get_count,
158 .ns_per_count = zero_event_ns_per,
159 },
160 { .number = 0x024, /* STALL_BACKEND */
161 .supported = pmuv3p1_events_supported,
162 .get_count = zero_event_get_count,
163 .ns_per_count = zero_event_ns_per,
164 },
165 { .number = 0x03c, /* STALL */
166 .supported = pmuv3p4_events_supported,
167 .get_count = zero_event_get_count,
168 .ns_per_count = zero_event_ns_per,
169 },
170 };
171
172 /*
173 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
174 * events (i.e. the statistical profiling extension), this implementation
175 * should first be updated to something sparse instead of the current
176 * supported_event_map[] array.
177 */
178 #define MAX_EVENT_ID 0x3c
179 #define UNSUPPORTED_EVENT UINT16_MAX
180 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
181
182 /*
183 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
184 * of ARM event numbers to indices in our pm_events array.
185 *
186 * Note: Events in the 0x40XX range are not currently supported.
187 */
pmu_init(ARMCPU * cpu)188 void pmu_init(ARMCPU *cpu)
189 {
190 unsigned int i;
191
192 /*
193 * Empty supported_event_map and cpu->pmceid[01] before adding supported
194 * events to them
195 */
196 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
197 supported_event_map[i] = UNSUPPORTED_EVENT;
198 }
199 cpu->pmceid0 = 0;
200 cpu->pmceid1 = 0;
201
202 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
203 const pm_event *cnt = &pm_events[i];
204 assert(cnt->number <= MAX_EVENT_ID);
205 /* We do not currently support events in the 0x40xx range */
206 assert(cnt->number <= 0x3f);
207
208 if (cnt->supported(&cpu->env)) {
209 supported_event_map[cnt->number] = i;
210 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
211 if (cnt->number & 0x20) {
212 cpu->pmceid1 |= event_mask;
213 } else {
214 cpu->pmceid0 |= event_mask;
215 }
216 }
217 }
218 }
219
220 /*
221 * Check at runtime whether a PMU event is supported for the current machine
222 */
event_supported(uint16_t number)223 static bool event_supported(uint16_t number)
224 {
225 if (number > MAX_EVENT_ID) {
226 return false;
227 }
228 return supported_event_map[number] != UNSUPPORTED_EVENT;
229 }
230
pmreg_access(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)231 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
232 bool isread)
233 {
234 /*
235 * Performance monitor registers user accessibility is controlled
236 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
237 * trapping to EL2 or EL3 for other accesses.
238 */
239 int el = arm_current_el(env);
240 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
241
242 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
243 return CP_ACCESS_TRAP_EL1;
244 }
245 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
246 return CP_ACCESS_TRAP_EL2;
247 }
248 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
249 return CP_ACCESS_TRAP_EL3;
250 }
251
252 return CP_ACCESS_OK;
253 }
254
pmreg_access_xevcntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)255 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
256 const ARMCPRegInfo *ri,
257 bool isread)
258 {
259 /* ER: event counter read trap control */
260 if (arm_feature(env, ARM_FEATURE_V8)
261 && arm_current_el(env) == 0
262 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
263 && isread) {
264 return CP_ACCESS_OK;
265 }
266
267 return pmreg_access(env, ri, isread);
268 }
269
pmreg_access_swinc(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)270 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
271 const ARMCPRegInfo *ri,
272 bool isread)
273 {
274 /* SW: software increment write trap control */
275 if (arm_feature(env, ARM_FEATURE_V8)
276 && arm_current_el(env) == 0
277 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
278 && !isread) {
279 return CP_ACCESS_OK;
280 }
281
282 return pmreg_access(env, ri, isread);
283 }
284
pmreg_access_selr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)285 static CPAccessResult pmreg_access_selr(CPUARMState *env,
286 const ARMCPRegInfo *ri,
287 bool isread)
288 {
289 /* ER: event counter read trap control */
290 if (arm_feature(env, ARM_FEATURE_V8)
291 && arm_current_el(env) == 0
292 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
293 return CP_ACCESS_OK;
294 }
295
296 return pmreg_access(env, ri, isread);
297 }
298
pmreg_access_ccntr(CPUARMState * env,const ARMCPRegInfo * ri,bool isread)299 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
300 const ARMCPRegInfo *ri,
301 bool isread)
302 {
303 /* CR: cycle counter read trap control */
304 if (arm_feature(env, ARM_FEATURE_V8)
305 && arm_current_el(env) == 0
306 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
307 && isread) {
308 return CP_ACCESS_OK;
309 }
310
311 return pmreg_access(env, ri, isread);
312 }
313
314 /*
315 * Returns true if the counter (pass 31 for PMCCNTR) should count events using
316 * the current EL, security state, and register configuration.
317 */
pmu_counter_enabled(CPUARMState * env,uint8_t counter)318 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
319 {
320 uint64_t filter;
321 bool e, p, u, nsk, nsu, nsh, m;
322 bool enabled, prohibited = false, filtered;
323 bool secure = arm_is_secure(env);
324 int el = arm_current_el(env);
325 uint64_t mdcr_el2;
326 uint8_t hpmn;
327
328 /*
329 * We might be called for M-profile cores where MDCR_EL2 doesn't
330 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
331 * must be before we read that value.
332 */
333 if (!arm_feature(env, ARM_FEATURE_PMU)) {
334 return false;
335 }
336
337 mdcr_el2 = arm_mdcr_el2_eff(env);
338 hpmn = mdcr_el2 & MDCR_HPMN;
339
340 if (!arm_feature(env, ARM_FEATURE_EL2) ||
341 (counter < hpmn || counter == 31)) {
342 e = env->cp15.c9_pmcr & PMCRE;
343 } else {
344 e = mdcr_el2 & MDCR_HPME;
345 }
346 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
347
348 /* Is event counting prohibited? */
349 if (el == 2 && (counter < hpmn || counter == 31)) {
350 prohibited = mdcr_el2 & MDCR_HPMD;
351 }
352 if (secure) {
353 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
354 }
355
356 if (counter == 31) {
357 /*
358 * The cycle counter defaults to running. PMCR.DP says "disable
359 * the cycle counter when event counting is prohibited".
360 * Some MDCR bits disable the cycle counter specifically.
361 */
362 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
363 if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
364 if (secure) {
365 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
366 }
367 if (el == 2) {
368 prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
369 }
370 }
371 }
372
373 if (counter == 31) {
374 filter = env->cp15.pmccfiltr_el0;
375 } else {
376 filter = env->cp15.c14_pmevtyper[counter];
377 }
378
379 p = filter & PMXEVTYPER_P;
380 u = filter & PMXEVTYPER_U;
381 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
382 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
383 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
384 m = arm_el_is_aa64(env, 1) &&
385 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
386
387 if (el == 0) {
388 filtered = secure ? u : u != nsu;
389 } else if (el == 1) {
390 filtered = secure ? p : p != nsk;
391 } else if (el == 2) {
392 filtered = !nsh;
393 } else { /* EL3 */
394 filtered = m != p;
395 }
396
397 if (counter != 31) {
398 /*
399 * If not checking PMCCNTR, ensure the counter is setup to an event we
400 * support
401 */
402 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
403 if (!event_supported(event)) {
404 return false;
405 }
406 }
407
408 return enabled && !prohibited && !filtered;
409 }
410
pmu_update_irq(CPUARMState * env)411 static void pmu_update_irq(CPUARMState *env)
412 {
413 ARMCPU *cpu = env_archcpu(env);
414 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
415 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
416 }
417
pmccntr_clockdiv_enabled(CPUARMState * env)418 static bool pmccntr_clockdiv_enabled(CPUARMState *env)
419 {
420 /*
421 * Return true if the clock divider is enabled and the cycle counter
422 * is supposed to tick only once every 64 clock cycles. This is
423 * controlled by PMCR.D, but if PMCR.LC is set to enable the long
424 * (64-bit) cycle counter PMCR.D has no effect.
425 */
426 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
427 }
428
pmevcntr_is_64_bit(CPUARMState * env,int counter)429 static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
430 {
431 /* Return true if the specified event counter is configured to be 64 bit */
432
433 /* This isn't intended to be used with the cycle counter */
434 assert(counter < 31);
435
436 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
437 return false;
438 }
439
440 if (arm_feature(env, ARM_FEATURE_EL2)) {
441 /*
442 * MDCR_EL2.HLP still applies even when EL2 is disabled in the
443 * current security state, so we don't use arm_mdcr_el2_eff() here.
444 */
445 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
446 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
447
448 if (counter >= hpmn) {
449 return hlp;
450 }
451 }
452 return env->cp15.c9_pmcr & PMCRLP;
453 }
454
455 /*
456 * Ensure c15_ccnt is the guest-visible count so that operations such as
457 * enabling/disabling the counter or filtering, modifying the count itself,
458 * etc. can be done logically. This is essentially a no-op if the counter is
459 * not enabled at the time of the call.
460 */
pmccntr_op_start(CPUARMState * env)461 static void pmccntr_op_start(CPUARMState *env)
462 {
463 uint64_t cycles = cycles_get_count(env);
464
465 if (pmu_counter_enabled(env, 31)) {
466 uint64_t eff_cycles = cycles;
467 if (pmccntr_clockdiv_enabled(env)) {
468 eff_cycles /= 64;
469 }
470
471 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
472
473 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
474 1ull << 63 : 1ull << 31;
475 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
476 env->cp15.c9_pmovsr |= (1ULL << 31);
477 pmu_update_irq(env);
478 }
479
480 env->cp15.c15_ccnt = new_pmccntr;
481 }
482 env->cp15.c15_ccnt_delta = cycles;
483 }
484
485 /*
486 * If PMCCNTR is enabled, recalculate the delta between the clock and the
487 * guest-visible count. A call to pmccntr_op_finish should follow every call to
488 * pmccntr_op_start.
489 */
pmccntr_op_finish(CPUARMState * env)490 static void pmccntr_op_finish(CPUARMState *env)
491 {
492 if (pmu_counter_enabled(env, 31)) {
493 #ifndef CONFIG_USER_ONLY
494 /* Calculate when the counter will next overflow */
495 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
496 if (!(env->cp15.c9_pmcr & PMCRLC)) {
497 remaining_cycles = (uint32_t)remaining_cycles;
498 }
499 int64_t overflow_in = cycles_ns_per(remaining_cycles);
500
501 if (overflow_in > 0) {
502 int64_t overflow_at;
503
504 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
505 overflow_in, &overflow_at)) {
506 ARMCPU *cpu = env_archcpu(env);
507 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
508 }
509 }
510 #endif
511
512 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
513 if (pmccntr_clockdiv_enabled(env)) {
514 prev_cycles /= 64;
515 }
516 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
517 }
518 }
519
pmevcntr_op_start(CPUARMState * env,uint8_t counter)520 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
521 {
522
523 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
524 uint64_t count = 0;
525 if (event_supported(event)) {
526 uint16_t event_idx = supported_event_map[event];
527 count = pm_events[event_idx].get_count(env);
528 }
529
530 if (pmu_counter_enabled(env, counter)) {
531 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
532 uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
533 1ULL << 63 : 1ULL << 31;
534
535 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
536 env->cp15.c9_pmovsr |= (1 << counter);
537 pmu_update_irq(env);
538 }
539 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
540 }
541 env->cp15.c14_pmevcntr_delta[counter] = count;
542 }
543
pmevcntr_op_finish(CPUARMState * env,uint8_t counter)544 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
545 {
546 if (pmu_counter_enabled(env, counter)) {
547 #ifndef CONFIG_USER_ONLY
548 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
549 uint16_t event_idx = supported_event_map[event];
550 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
551 int64_t overflow_in;
552
553 if (!pmevcntr_is_64_bit(env, counter)) {
554 delta = (uint32_t)delta;
555 }
556 overflow_in = pm_events[event_idx].ns_per_count(delta);
557
558 if (overflow_in > 0) {
559 int64_t overflow_at;
560
561 if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
562 overflow_in, &overflow_at)) {
563 ARMCPU *cpu = env_archcpu(env);
564 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
565 }
566 }
567 #endif
568
569 env->cp15.c14_pmevcntr_delta[counter] -=
570 env->cp15.c14_pmevcntr[counter];
571 }
572 }
573
pmu_op_start(CPUARMState * env)574 void pmu_op_start(CPUARMState *env)
575 {
576 unsigned int i;
577 pmccntr_op_start(env);
578 for (i = 0; i < pmu_num_counters(env); i++) {
579 pmevcntr_op_start(env, i);
580 }
581 }
582
pmu_op_finish(CPUARMState * env)583 void pmu_op_finish(CPUARMState *env)
584 {
585 unsigned int i;
586 pmccntr_op_finish(env);
587 for (i = 0; i < pmu_num_counters(env); i++) {
588 pmevcntr_op_finish(env, i);
589 }
590 }
591
pmu_pre_el_change(ARMCPU * cpu,void * ignored)592 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
593 {
594 pmu_op_start(&cpu->env);
595 }
596
pmu_post_el_change(ARMCPU * cpu,void * ignored)597 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
598 {
599 pmu_op_finish(&cpu->env);
600 }
601
arm_pmu_timer_cb(void * opaque)602 void arm_pmu_timer_cb(void *opaque)
603 {
604 ARMCPU *cpu = opaque;
605
606 /*
607 * Update all the counter values based on the current underlying counts,
608 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
609 * has the effect of setting the cpu->pmu_timer to the next earliest time a
610 * counter may expire.
611 */
612 pmu_op_start(&cpu->env);
613 pmu_op_finish(&cpu->env);
614 }
615
pmcr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)616 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
617 uint64_t value)
618 {
619 pmu_op_start(env);
620
621 if (value & PMCRC) {
622 /* The counter has been reset */
623 env->cp15.c15_ccnt = 0;
624 }
625
626 if (value & PMCRP) {
627 unsigned int i;
628 for (i = 0; i < pmu_num_counters(env); i++) {
629 env->cp15.c14_pmevcntr[i] = 0;
630 }
631 }
632
633 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
634 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
635
636 pmu_op_finish(env);
637 }
638
pmcr_read(CPUARMState * env,const ARMCPRegInfo * ri)639 static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
640 {
641 uint64_t pmcr = env->cp15.c9_pmcr;
642
643 /*
644 * If EL2 is implemented and enabled for the current security state, reads
645 * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
646 */
647 if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
648 pmcr &= ~PMCRN_MASK;
649 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
650 }
651
652 return pmcr;
653 }
654
pmswinc_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)655 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
656 uint64_t value)
657 {
658 unsigned int i;
659 uint64_t overflow_mask, new_pmswinc;
660
661 for (i = 0; i < pmu_num_counters(env); i++) {
662 /* Increment a counter's count iff: */
663 if ((value & (1 << i)) && /* counter's bit is set */
664 /* counter is enabled and not filtered */
665 pmu_counter_enabled(env, i) &&
666 /* counter is SW_INCR */
667 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
668 pmevcntr_op_start(env, i);
669
670 /*
671 * Detect if this write causes an overflow since we can't predict
672 * PMSWINC overflows like we can for other events
673 */
674 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
675
676 overflow_mask = pmevcntr_is_64_bit(env, i) ?
677 1ULL << 63 : 1ULL << 31;
678
679 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
680 env->cp15.c9_pmovsr |= (1 << i);
681 pmu_update_irq(env);
682 }
683
684 env->cp15.c14_pmevcntr[i] = new_pmswinc;
685
686 pmevcntr_op_finish(env, i);
687 }
688 }
689 }
690
pmccntr_read(CPUARMState * env,const ARMCPRegInfo * ri)691 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
692 {
693 uint64_t ret;
694 pmccntr_op_start(env);
695 ret = env->cp15.c15_ccnt;
696 pmccntr_op_finish(env);
697 return ret;
698 }
699
pmselr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)700 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
701 uint64_t value)
702 {
703 /*
704 * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
705 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
706 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
707 * accessed.
708 */
709 env->cp15.c9_pmselr = value & 0x1f;
710 }
711
pmccntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)712 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
713 uint64_t value)
714 {
715 pmccntr_op_start(env);
716 env->cp15.c15_ccnt = value;
717 pmccntr_op_finish(env);
718 }
719
pmccntr_write32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)720 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
721 uint64_t value)
722 {
723 uint64_t cur_val = pmccntr_read(env, NULL);
724
725 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
726 }
727
pmccfiltr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)728 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
729 uint64_t value)
730 {
731 pmccntr_op_start(env);
732 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
733 pmccntr_op_finish(env);
734 }
735
pmccfiltr_write_a32(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)736 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
737 uint64_t value)
738 {
739 pmccntr_op_start(env);
740 /* M is not accessible from AArch32 */
741 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
742 (value & PMCCFILTR);
743 pmccntr_op_finish(env);
744 }
745
pmccfiltr_read_a32(CPUARMState * env,const ARMCPRegInfo * ri)746 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
747 {
748 /* M is not visible in AArch32 */
749 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
750 }
751
pmcntenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)752 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
753 uint64_t value)
754 {
755 pmu_op_start(env);
756 value &= pmu_counter_mask(env);
757 env->cp15.c9_pmcnten |= value;
758 pmu_op_finish(env);
759 }
760
pmcntenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)761 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
762 uint64_t value)
763 {
764 pmu_op_start(env);
765 value &= pmu_counter_mask(env);
766 env->cp15.c9_pmcnten &= ~value;
767 pmu_op_finish(env);
768 }
769
pmovsr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)770 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
771 uint64_t value)
772 {
773 value &= pmu_counter_mask(env);
774 env->cp15.c9_pmovsr &= ~value;
775 pmu_update_irq(env);
776 }
777
pmovsset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)778 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
779 uint64_t value)
780 {
781 value &= pmu_counter_mask(env);
782 env->cp15.c9_pmovsr |= value;
783 pmu_update_irq(env);
784 }
785
pmevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,const uint8_t counter)786 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
787 uint64_t value, const uint8_t counter)
788 {
789 if (counter == 31) {
790 pmccfiltr_write(env, ri, value);
791 } else if (counter < pmu_num_counters(env)) {
792 pmevcntr_op_start(env, counter);
793
794 /*
795 * If this counter's event type is changing, store the current
796 * underlying count for the new type in c14_pmevcntr_delta[counter] so
797 * pmevcntr_op_finish has the correct baseline when it converts back to
798 * a delta.
799 */
800 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
801 PMXEVTYPER_EVTCOUNT;
802 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
803 if (old_event != new_event) {
804 uint64_t count = 0;
805 if (event_supported(new_event)) {
806 uint16_t event_idx = supported_event_map[new_event];
807 count = pm_events[event_idx].get_count(env);
808 }
809 env->cp15.c14_pmevcntr_delta[counter] = count;
810 }
811
812 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
813 pmevcntr_op_finish(env, counter);
814 }
815 /*
816 * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
817 * PMSELR value is equal to or greater than the number of implemented
818 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
819 */
820 }
821
pmevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri,const uint8_t counter)822 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
823 const uint8_t counter)
824 {
825 if (counter == 31) {
826 return env->cp15.pmccfiltr_el0;
827 } else if (counter < pmu_num_counters(env)) {
828 return env->cp15.c14_pmevtyper[counter];
829 } else {
830 /*
831 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
832 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
833 */
834 return 0;
835 }
836 }
837
pmevtyper_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)838 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
839 uint64_t value)
840 {
841 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
842 pmevtyper_write(env, ri, value, counter);
843 }
844
pmevtyper_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)845 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
846 uint64_t value)
847 {
848 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
849 env->cp15.c14_pmevtyper[counter] = value;
850
851 /*
852 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
853 * pmu_op_finish calls when loading saved state for a migration. Because
854 * we're potentially updating the type of event here, the value written to
855 * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
856 * different counter type. Therefore, we need to set this value to the
857 * current count for the counter type we're writing so that pmu_op_finish
858 * has the correct count for its calculation.
859 */
860 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
861 if (event_supported(event)) {
862 uint16_t event_idx = supported_event_map[event];
863 env->cp15.c14_pmevcntr_delta[counter] =
864 pm_events[event_idx].get_count(env);
865 }
866 }
867
pmevtyper_readfn(CPUARMState * env,const ARMCPRegInfo * ri)868 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
869 {
870 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
871 return pmevtyper_read(env, ri, counter);
872 }
873
pmxevtyper_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)874 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
875 uint64_t value)
876 {
877 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
878 }
879
pmxevtyper_read(CPUARMState * env,const ARMCPRegInfo * ri)880 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
881 {
882 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
883 }
884
pmevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value,uint8_t counter)885 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
886 uint64_t value, uint8_t counter)
887 {
888 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
889 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
890 value &= MAKE_64BIT_MASK(0, 32);
891 }
892 if (counter < pmu_num_counters(env)) {
893 pmevcntr_op_start(env, counter);
894 env->cp15.c14_pmevcntr[counter] = value;
895 pmevcntr_op_finish(env, counter);
896 }
897 /*
898 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
899 * are CONSTRAINED UNPREDICTABLE.
900 */
901 }
902
pmevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri,uint8_t counter)903 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
904 uint8_t counter)
905 {
906 if (counter < pmu_num_counters(env)) {
907 uint64_t ret;
908 pmevcntr_op_start(env, counter);
909 ret = env->cp15.c14_pmevcntr[counter];
910 pmevcntr_op_finish(env, counter);
911 if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
912 /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
913 ret &= MAKE_64BIT_MASK(0, 32);
914 }
915 return ret;
916 } else {
917 /*
918 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
919 * are CONSTRAINED UNPREDICTABLE.
920 */
921 return 0;
922 }
923 }
924
pmevcntr_writefn(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)925 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
926 uint64_t value)
927 {
928 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
929 pmevcntr_write(env, ri, value, counter);
930 }
931
pmevcntr_readfn(CPUARMState * env,const ARMCPRegInfo * ri)932 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
933 {
934 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
935 return pmevcntr_read(env, ri, counter);
936 }
937
pmevcntr_rawwrite(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)938 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
939 uint64_t value)
940 {
941 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
942 assert(counter < pmu_num_counters(env));
943 env->cp15.c14_pmevcntr[counter] = value;
944 pmevcntr_write(env, ri, value, counter);
945 }
946
pmevcntr_rawread(CPUARMState * env,const ARMCPRegInfo * ri)947 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
948 {
949 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
950 assert(counter < pmu_num_counters(env));
951 return env->cp15.c14_pmevcntr[counter];
952 }
953
pmxevcntr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)954 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
955 uint64_t value)
956 {
957 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
958 }
959
pmxevcntr_read(CPUARMState * env,const ARMCPRegInfo * ri)960 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
961 {
962 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
963 }
964
pmuserenr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)965 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
966 uint64_t value)
967 {
968 if (arm_feature(env, ARM_FEATURE_V8)) {
969 env->cp15.c9_pmuserenr = value & 0xf;
970 } else {
971 env->cp15.c9_pmuserenr = value & 1;
972 }
973 }
974
pmintenset_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)975 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
976 uint64_t value)
977 {
978 /* We have no event counters so only the C bit can be changed */
979 value &= pmu_counter_mask(env);
980 env->cp15.c9_pminten |= value;
981 pmu_update_irq(env);
982 }
983
pmintenclr_write(CPUARMState * env,const ARMCPRegInfo * ri,uint64_t value)984 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
985 uint64_t value)
986 {
987 value &= pmu_counter_mask(env);
988 env->cp15.c9_pminten &= ~value;
989 pmu_update_irq(env);
990 }
991
992 static const ARMCPRegInfo v7_pm_reginfo[] = {
993 /*
994 * Performance monitors are implementation defined in v7,
995 * but with an ARM recommended set of registers, which we
996 * follow.
997 *
998 * Performance registers fall into three categories:
999 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1000 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1001 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1002 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1003 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1004 */
1005 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1006 .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
1007 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1008 .writefn = pmcntenset_write,
1009 .accessfn = pmreg_access,
1010 .fgt = FGT_PMCNTEN,
1011 .raw_writefn = raw_write },
1012 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
1013 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1014 .access = PL0_RW, .accessfn = pmreg_access,
1015 .fgt = FGT_PMCNTEN,
1016 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1017 .writefn = pmcntenset_write, .raw_writefn = raw_write },
1018 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1019 .access = PL0_RW,
1020 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1021 .accessfn = pmreg_access,
1022 .fgt = FGT_PMCNTEN,
1023 .writefn = pmcntenclr_write, .raw_writefn = raw_write,
1024 .type = ARM_CP_ALIAS | ARM_CP_IO },
1025 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1026 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1027 .access = PL0_RW, .accessfn = pmreg_access,
1028 .fgt = FGT_PMCNTEN,
1029 .type = ARM_CP_ALIAS | ARM_CP_IO,
1030 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1031 .writefn = pmcntenclr_write, .raw_writefn = raw_write },
1032 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1033 .access = PL0_RW, .type = ARM_CP_IO,
1034 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1035 .accessfn = pmreg_access,
1036 .fgt = FGT_PMOVS,
1037 .writefn = pmovsr_write,
1038 .raw_writefn = raw_write },
1039 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1040 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1041 .access = PL0_RW, .accessfn = pmreg_access,
1042 .fgt = FGT_PMOVS,
1043 .type = ARM_CP_ALIAS | ARM_CP_IO,
1044 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1045 .writefn = pmovsr_write,
1046 .raw_writefn = raw_write },
1047 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1048 .access = PL0_W, .accessfn = pmreg_access_swinc,
1049 .fgt = FGT_PMSWINC_EL0,
1050 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1051 .writefn = pmswinc_write },
1052 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1053 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
1054 .access = PL0_W, .accessfn = pmreg_access_swinc,
1055 .fgt = FGT_PMSWINC_EL0,
1056 .type = ARM_CP_NO_RAW | ARM_CP_IO,
1057 .writefn = pmswinc_write },
1058 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1059 .access = PL0_RW, .type = ARM_CP_ALIAS,
1060 .fgt = FGT_PMSELR_EL0,
1061 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1062 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1063 .raw_writefn = raw_write},
1064 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1065 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1066 .access = PL0_RW, .accessfn = pmreg_access_selr,
1067 .fgt = FGT_PMSELR_EL0,
1068 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1069 .writefn = pmselr_write, .raw_writefn = raw_write, },
1070 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1071 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1072 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1073 .fgt = FGT_PMCCNTR_EL0,
1074 .type = ARM_CP_IO,
1075 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1076 .readfn = pmccntr_read, .writefn = pmccntr_write,
1077 .raw_readfn = raw_read, .raw_writefn = raw_write, },
1078 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1079 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1080 .access = PL0_RW, .accessfn = pmreg_access,
1081 .fgt = FGT_PMCCFILTR_EL0,
1082 .type = ARM_CP_ALIAS | ARM_CP_IO,
1083 .resetvalue = 0, },
1084 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1085 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1086 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
1087 .access = PL0_RW, .accessfn = pmreg_access,
1088 .fgt = FGT_PMCCFILTR_EL0,
1089 .type = ARM_CP_IO,
1090 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1091 .resetvalue = 0, },
1092 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1093 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1094 .accessfn = pmreg_access,
1095 .fgt = FGT_PMEVTYPERN_EL0,
1096 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1097 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1098 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1099 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1100 .accessfn = pmreg_access,
1101 .fgt = FGT_PMEVTYPERN_EL0,
1102 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1103 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1104 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1105 .accessfn = pmreg_access_xevcntr,
1106 .fgt = FGT_PMEVCNTRN_EL0,
1107 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1108 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1109 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
1110 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1111 .accessfn = pmreg_access_xevcntr,
1112 .fgt = FGT_PMEVCNTRN_EL0,
1113 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1114 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1115 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1116 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1117 .resetvalue = 0,
1118 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1119 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1120 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1121 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1122 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1123 .resetvalue = 0,
1124 .writefn = pmuserenr_write, .raw_writefn = raw_write },
1125 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1126 .access = PL1_RW, .accessfn = access_tpm,
1127 .fgt = FGT_PMINTEN,
1128 .type = ARM_CP_ALIAS | ARM_CP_IO,
1129 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1130 .resetvalue = 0,
1131 .writefn = pmintenset_write, .raw_writefn = raw_write },
1132 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1133 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1134 .access = PL1_RW, .accessfn = access_tpm,
1135 .fgt = FGT_PMINTEN,
1136 .type = ARM_CP_IO,
1137 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1138 .writefn = pmintenset_write, .raw_writefn = raw_write,
1139 .resetvalue = 0x0 },
1140 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1141 .access = PL1_RW, .accessfn = access_tpm,
1142 .fgt = FGT_PMINTEN,
1143 .type = ARM_CP_ALIAS | ARM_CP_IO,
1144 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1145 .writefn = pmintenclr_write, .raw_writefn = raw_write },
1146 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1147 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1148 .access = PL1_RW, .accessfn = access_tpm,
1149 .fgt = FGT_PMINTEN,
1150 .type = ARM_CP_ALIAS | ARM_CP_IO,
1151 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1152 .writefn = pmintenclr_write, .raw_writefn = raw_write },
1153 };
1154
1155 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
1156 /* PMOVSSET is not implemented in v7 before v7ve */
1157 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
1158 .access = PL0_RW, .accessfn = pmreg_access,
1159 .fgt = FGT_PMOVS,
1160 .type = ARM_CP_ALIAS | ARM_CP_IO,
1161 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1162 .writefn = pmovsset_write,
1163 .raw_writefn = raw_write },
1164 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
1165 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
1166 .access = PL0_RW, .accessfn = pmreg_access,
1167 .fgt = FGT_PMOVS,
1168 .type = ARM_CP_ALIAS | ARM_CP_IO,
1169 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1170 .writefn = pmovsset_write,
1171 .raw_writefn = raw_write },
1172 };
1173
define_pm_cpregs(ARMCPU * cpu)1174 void define_pm_cpregs(ARMCPU *cpu)
1175 {
1176 CPUARMState *env = &cpu->env;
1177
1178 if (arm_feature(env, ARM_FEATURE_V7)) {
1179 /*
1180 * v7 performance monitor control register: same implementor
1181 * field as main ID register, and we implement four counters in
1182 * addition to the cycle count register.
1183 */
1184 static const ARMCPRegInfo pmcr = {
1185 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
1186 .access = PL0_RW,
1187 .fgt = FGT_PMCR_EL0,
1188 .type = ARM_CP_IO | ARM_CP_ALIAS,
1189 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
1190 .accessfn = pmreg_access,
1191 .readfn = pmcr_read, .raw_readfn = raw_read,
1192 .writefn = pmcr_write, .raw_writefn = raw_write,
1193 };
1194 const ARMCPRegInfo pmcr64 = {
1195 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
1196 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
1197 .access = PL0_RW, .accessfn = pmreg_access,
1198 .fgt = FGT_PMCR_EL0,
1199 .type = ARM_CP_IO,
1200 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
1201 .resetvalue = cpu->isar.reset_pmcr_el0,
1202 .readfn = pmcr_read, .raw_readfn = raw_read,
1203 .writefn = pmcr_write, .raw_writefn = raw_write,
1204 };
1205
1206 define_one_arm_cp_reg(cpu, &pmcr);
1207 define_one_arm_cp_reg(cpu, &pmcr64);
1208 define_arm_cp_regs(cpu, v7_pm_reginfo);
1209 /*
1210 * 32-bit AArch32 PMCCNTR. We don't expose this to GDB if the
1211 * new-in-v8 PMUv3 64-bit AArch32 PMCCNTR register is implemented
1212 * (as that will provide the GDB user's view of "PMCCNTR").
1213 */
1214 ARMCPRegInfo pmccntr = {
1215 .name = "PMCCNTR",
1216 .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1217 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1218 .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1219 .fgt = FGT_PMCCNTR_EL0,
1220 .readfn = pmccntr_read, .writefn = pmccntr_write32,
1221 };
1222 if (arm_feature(env, ARM_FEATURE_V8)) {
1223 pmccntr.type |= ARM_CP_NO_GDB;
1224 }
1225 define_one_arm_cp_reg(cpu, &pmccntr);
1226
1227 for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) {
1228 g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
1229 g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
1230 g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
1231 g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
1232
1233 ARMCPRegInfo pmev_regs[] = {
1234 { .name = pmevcntr_name, .cp = 15, .crn = 14,
1235 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
1236 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
1237 .fgt = FGT_PMEVCNTRN_EL0,
1238 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
1239 .accessfn = pmreg_access_xevcntr },
1240 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
1241 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
1242 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
1243 .type = ARM_CP_IO,
1244 .fgt = FGT_PMEVCNTRN_EL0,
1245 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
1246 .raw_readfn = pmevcntr_rawread,
1247 .raw_writefn = pmevcntr_rawwrite },
1248 { .name = pmevtyper_name, .cp = 15, .crn = 14,
1249 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
1250 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
1251 .fgt = FGT_PMEVTYPERN_EL0,
1252 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
1253 .accessfn = pmreg_access },
1254 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
1255 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
1256 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
1257 .fgt = FGT_PMEVTYPERN_EL0,
1258 .type = ARM_CP_IO,
1259 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
1260 .raw_writefn = pmevtyper_rawwrite },
1261 };
1262 define_arm_cp_regs(cpu, pmev_regs);
1263 }
1264 }
1265 if (arm_feature(env, ARM_FEATURE_V7VE)) {
1266 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
1267 }
1268
1269 if (arm_feature(env, ARM_FEATURE_V8)) {
1270 const ARMCPRegInfo v8_pm_reginfo[] = {
1271 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
1272 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
1273 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1274 .fgt = FGT_PMCEIDN_EL0,
1275 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
1276 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
1277 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
1278 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1279 .fgt = FGT_PMCEIDN_EL0,
1280 .resetvalue = cpu->pmceid0 },
1281 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
1282 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
1283 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1284 .fgt = FGT_PMCEIDN_EL0,
1285 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
1286 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
1287 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
1288 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1289 .fgt = FGT_PMCEIDN_EL0,
1290 .resetvalue = cpu->pmceid1 },
1291 /* AArch32 64-bit PMCCNTR view: added in PMUv3 with Armv8 */
1292 { .name = "PMCCNTR", .state = ARM_CP_STATE_AA32,
1293 .cp = 15, .crm = 9, .opc1 = 0,
1294 .access = PL0_RW, .accessfn = pmreg_access_ccntr, .resetvalue = 0,
1295 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_64BIT,
1296 .fgt = FGT_PMCCNTR_EL0, .readfn = pmccntr_read,
1297 .writefn = pmccntr_write, },
1298 };
1299 define_arm_cp_regs(cpu, v8_pm_reginfo);
1300 }
1301
1302 if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
1303 ARMCPRegInfo v81_pmu_regs[] = {
1304 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
1305 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
1306 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1307 .fgt = FGT_PMCEIDN_EL0,
1308 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
1309 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
1310 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
1311 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1312 .fgt = FGT_PMCEIDN_EL0,
1313 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
1314 };
1315 define_arm_cp_regs(cpu, v81_pmu_regs);
1316 }
1317
1318 if (cpu_isar_feature(any_pmuv3p4, cpu)) {
1319 static const ARMCPRegInfo v84_pmmir = {
1320 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
1321 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
1322 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
1323 .fgt = FGT_PMMIR_EL1,
1324 .resetvalue = 0
1325 };
1326 define_one_arm_cp_reg(cpu, &v84_pmmir);
1327 }
1328 }
1329