1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
29bd46da4SMax Filippov /*
39bd46da4SMax Filippov * Xtensa Performance Monitor Module driver
49bd46da4SMax Filippov * See Tensilica Debug User's Guide for PMU registers documentation.
59bd46da4SMax Filippov *
69bd46da4SMax Filippov * Copyright (C) 2015 Cadence Design Systems Inc.
79bd46da4SMax Filippov */
89bd46da4SMax Filippov
99bd46da4SMax Filippov #include <linux/interrupt.h>
109bd46da4SMax Filippov #include <linux/irqdomain.h>
119bd46da4SMax Filippov #include <linux/module.h>
129bd46da4SMax Filippov #include <linux/of.h>
139bd46da4SMax Filippov #include <linux/perf_event.h>
149bd46da4SMax Filippov #include <linux/platform_device.h>
159bd46da4SMax Filippov
16*687eb3c4SMax Filippov #include <asm/core.h>
179bd46da4SMax Filippov #include <asm/processor.h>
189bd46da4SMax Filippov #include <asm/stacktrace.h>
199bd46da4SMax Filippov
20*687eb3c4SMax Filippov #define XTENSA_HWVERSION_RG_2015_0 260000
21*687eb3c4SMax Filippov
22*687eb3c4SMax Filippov #if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
23*687eb3c4SMax Filippov #define XTENSA_PMU_ERI_BASE 0x00101000
24*687eb3c4SMax Filippov #else
25*687eb3c4SMax Filippov #define XTENSA_PMU_ERI_BASE 0x00001000
26*687eb3c4SMax Filippov #endif
27*687eb3c4SMax Filippov
289bd46da4SMax Filippov /* Global control/status for all perf counters */
29*687eb3c4SMax Filippov #define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE
309bd46da4SMax Filippov /* Perf counter values */
31*687eb3c4SMax Filippov #define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
329bd46da4SMax Filippov /* Perf counter control registers */
33*687eb3c4SMax Filippov #define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
349bd46da4SMax Filippov /* Perf counter status registers */
35*687eb3c4SMax Filippov #define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
369bd46da4SMax Filippov
379bd46da4SMax Filippov #define XTENSA_PMU_PMG_PMEN 0x1
389bd46da4SMax Filippov
399bd46da4SMax Filippov #define XTENSA_PMU_COUNTER_MASK 0xffffffffULL
409bd46da4SMax Filippov #define XTENSA_PMU_COUNTER_MAX 0x7fffffff
419bd46da4SMax Filippov
429bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_INTEN 0x00000001
439bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008
449bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0
459bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8
469bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_SELECT 0x00001f00
479bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_MASK_SHIFT 16
489bd46da4SMax Filippov #define XTENSA_PMU_PMCTRL_MASK 0xffff0000
499bd46da4SMax Filippov
509bd46da4SMax Filippov #define XTENSA_PMU_MASK(select, mask) \
519bd46da4SMax Filippov (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
529bd46da4SMax Filippov ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
539bd46da4SMax Filippov XTENSA_PMU_PMCTRL_TRACELEVEL | \
549bd46da4SMax Filippov XTENSA_PMU_PMCTRL_INTEN)
559bd46da4SMax Filippov
569bd46da4SMax Filippov #define XTENSA_PMU_PMSTAT_OVFL 0x00000001
579bd46da4SMax Filippov #define XTENSA_PMU_PMSTAT_INTASRT 0x00000010
589bd46da4SMax Filippov
599bd46da4SMax Filippov struct xtensa_pmu_events {
609bd46da4SMax Filippov /* Array of events currently on this core */
619bd46da4SMax Filippov struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
629bd46da4SMax Filippov /* Bitmap of used hardware counters */
639bd46da4SMax Filippov unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
649bd46da4SMax Filippov };
659bd46da4SMax Filippov static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
669bd46da4SMax Filippov
679bd46da4SMax Filippov static const u32 xtensa_hw_ctl[] = {
689bd46da4SMax Filippov [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1),
699bd46da4SMax Filippov [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff),
709bd46da4SMax Filippov [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1),
719bd46da4SMax Filippov [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1),
729bd46da4SMax Filippov /* Taken and non-taken branches + taken loop ends */
739bd46da4SMax Filippov [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490),
749bd46da4SMax Filippov /* Instruction-related + other global stall cycles */
759bd46da4SMax Filippov [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff),
769bd46da4SMax Filippov /* Data-related global stall cycles */
779bd46da4SMax Filippov [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff),
789bd46da4SMax Filippov };
799bd46da4SMax Filippov
809bd46da4SMax Filippov #define C(_x) PERF_COUNT_HW_CACHE_##_x
819bd46da4SMax Filippov
829bd46da4SMax Filippov static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
839bd46da4SMax Filippov [C(L1D)] = {
849bd46da4SMax Filippov [C(OP_READ)] = {
859bd46da4SMax Filippov [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1),
869bd46da4SMax Filippov [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2),
879bd46da4SMax Filippov },
889bd46da4SMax Filippov [C(OP_WRITE)] = {
899bd46da4SMax Filippov [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1),
909bd46da4SMax Filippov [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2),
919bd46da4SMax Filippov },
929bd46da4SMax Filippov },
939bd46da4SMax Filippov [C(L1I)] = {
949bd46da4SMax Filippov [C(OP_READ)] = {
959bd46da4SMax Filippov [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1),
969bd46da4SMax Filippov [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2),
979bd46da4SMax Filippov },
989bd46da4SMax Filippov },
999bd46da4SMax Filippov [C(DTLB)] = {
1009bd46da4SMax Filippov [C(OP_READ)] = {
1019bd46da4SMax Filippov [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1),
1029bd46da4SMax Filippov [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8),
1039bd46da4SMax Filippov },
1049bd46da4SMax Filippov },
1059bd46da4SMax Filippov [C(ITLB)] = {
1069bd46da4SMax Filippov [C(OP_READ)] = {
1079bd46da4SMax Filippov [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1),
1089bd46da4SMax Filippov [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8),
1099bd46da4SMax Filippov },
1109bd46da4SMax Filippov },
1119bd46da4SMax Filippov };
1129bd46da4SMax Filippov
xtensa_pmu_cache_event(u64 config)1139bd46da4SMax Filippov static int xtensa_pmu_cache_event(u64 config)
1149bd46da4SMax Filippov {
1159bd46da4SMax Filippov unsigned int cache_type, cache_op, cache_result;
1169bd46da4SMax Filippov int ret;
1179bd46da4SMax Filippov
1189bd46da4SMax Filippov cache_type = (config >> 0) & 0xff;
1199bd46da4SMax Filippov cache_op = (config >> 8) & 0xff;
1209bd46da4SMax Filippov cache_result = (config >> 16) & 0xff;
1219bd46da4SMax Filippov
1229bd46da4SMax Filippov if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
1239bd46da4SMax Filippov cache_op >= C(OP_MAX) ||
1249bd46da4SMax Filippov cache_result >= C(RESULT_MAX))
1259bd46da4SMax Filippov return -EINVAL;
1269bd46da4SMax Filippov
1279bd46da4SMax Filippov ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
1289bd46da4SMax Filippov
1299bd46da4SMax Filippov if (ret == 0)
1309bd46da4SMax Filippov return -EINVAL;
1319bd46da4SMax Filippov
1329bd46da4SMax Filippov return ret;
1339bd46da4SMax Filippov }
1349bd46da4SMax Filippov
xtensa_pmu_read_counter(int idx)1359bd46da4SMax Filippov static inline uint32_t xtensa_pmu_read_counter(int idx)
1369bd46da4SMax Filippov {
1379bd46da4SMax Filippov return get_er(XTENSA_PMU_PM(idx));
1389bd46da4SMax Filippov }
1399bd46da4SMax Filippov
xtensa_pmu_write_counter(int idx,uint32_t v)1409bd46da4SMax Filippov static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
1419bd46da4SMax Filippov {
1429bd46da4SMax Filippov set_er(v, XTENSA_PMU_PM(idx));
1439bd46da4SMax Filippov }
1449bd46da4SMax Filippov
xtensa_perf_event_update(struct perf_event * event,struct hw_perf_event * hwc,int idx)1459bd46da4SMax Filippov static void xtensa_perf_event_update(struct perf_event *event,
1469bd46da4SMax Filippov struct hw_perf_event *hwc, int idx)
1479bd46da4SMax Filippov {
1489bd46da4SMax Filippov uint64_t prev_raw_count, new_raw_count;
1499bd46da4SMax Filippov int64_t delta;
1509bd46da4SMax Filippov
1519bd46da4SMax Filippov do {
1529bd46da4SMax Filippov prev_raw_count = local64_read(&hwc->prev_count);
1539bd46da4SMax Filippov new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
1549bd46da4SMax Filippov } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
1559bd46da4SMax Filippov new_raw_count) != prev_raw_count);
1569bd46da4SMax Filippov
1579bd46da4SMax Filippov delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
1589bd46da4SMax Filippov
1599bd46da4SMax Filippov local64_add(delta, &event->count);
1609bd46da4SMax Filippov local64_sub(delta, &hwc->period_left);
1619bd46da4SMax Filippov }
1629bd46da4SMax Filippov
xtensa_perf_event_set_period(struct perf_event * event,struct hw_perf_event * hwc,int idx)1639bd46da4SMax Filippov static bool xtensa_perf_event_set_period(struct perf_event *event,
1649bd46da4SMax Filippov struct hw_perf_event *hwc, int idx)
1659bd46da4SMax Filippov {
1669bd46da4SMax Filippov bool rc = false;
1679bd46da4SMax Filippov s64 left;
1689bd46da4SMax Filippov
1699bd46da4SMax Filippov if (!is_sampling_event(event)) {
1709bd46da4SMax Filippov left = XTENSA_PMU_COUNTER_MAX;
1719bd46da4SMax Filippov } else {
1729bd46da4SMax Filippov s64 period = hwc->sample_period;
1739bd46da4SMax Filippov
1749bd46da4SMax Filippov left = local64_read(&hwc->period_left);
1759bd46da4SMax Filippov if (left <= -period) {
1769bd46da4SMax Filippov left = period;
1779bd46da4SMax Filippov local64_set(&hwc->period_left, left);
1789bd46da4SMax Filippov hwc->last_period = period;
1799bd46da4SMax Filippov rc = true;
1809bd46da4SMax Filippov } else if (left <= 0) {
1819bd46da4SMax Filippov left += period;
1829bd46da4SMax Filippov local64_set(&hwc->period_left, left);
1839bd46da4SMax Filippov hwc->last_period = period;
1849bd46da4SMax Filippov rc = true;
1859bd46da4SMax Filippov }
1869bd46da4SMax Filippov if (left > XTENSA_PMU_COUNTER_MAX)
1879bd46da4SMax Filippov left = XTENSA_PMU_COUNTER_MAX;
1889bd46da4SMax Filippov }
1899bd46da4SMax Filippov
1909bd46da4SMax Filippov local64_set(&hwc->prev_count, -left);
1919bd46da4SMax Filippov xtensa_pmu_write_counter(idx, -left);
1929bd46da4SMax Filippov perf_event_update_userpage(event);
1939bd46da4SMax Filippov
1949bd46da4SMax Filippov return rc;
1959bd46da4SMax Filippov }
1969bd46da4SMax Filippov
xtensa_pmu_enable(struct pmu * pmu)1979bd46da4SMax Filippov static void xtensa_pmu_enable(struct pmu *pmu)
1989bd46da4SMax Filippov {
1999bd46da4SMax Filippov set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
2009bd46da4SMax Filippov }
2019bd46da4SMax Filippov
xtensa_pmu_disable(struct pmu * pmu)2029bd46da4SMax Filippov static void xtensa_pmu_disable(struct pmu *pmu)
2039bd46da4SMax Filippov {
2049bd46da4SMax Filippov set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
2059bd46da4SMax Filippov }
2069bd46da4SMax Filippov
xtensa_pmu_event_init(struct perf_event * event)2079bd46da4SMax Filippov static int xtensa_pmu_event_init(struct perf_event *event)
2089bd46da4SMax Filippov {
2099bd46da4SMax Filippov int ret;
2109bd46da4SMax Filippov
2119bd46da4SMax Filippov switch (event->attr.type) {
2129bd46da4SMax Filippov case PERF_TYPE_HARDWARE:
2139bd46da4SMax Filippov if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
2149bd46da4SMax Filippov xtensa_hw_ctl[event->attr.config] == 0)
2159bd46da4SMax Filippov return -EINVAL;
2169bd46da4SMax Filippov event->hw.config = xtensa_hw_ctl[event->attr.config];
2179bd46da4SMax Filippov return 0;
2189bd46da4SMax Filippov
2199bd46da4SMax Filippov case PERF_TYPE_HW_CACHE:
2209bd46da4SMax Filippov ret = xtensa_pmu_cache_event(event->attr.config);
2219bd46da4SMax Filippov if (ret < 0)
2229bd46da4SMax Filippov return ret;
2239bd46da4SMax Filippov event->hw.config = ret;
2249bd46da4SMax Filippov return 0;
2259bd46da4SMax Filippov
2269bd46da4SMax Filippov case PERF_TYPE_RAW:
2279bd46da4SMax Filippov /* Not 'previous counter' select */
2289bd46da4SMax Filippov if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
2299bd46da4SMax Filippov (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
2309bd46da4SMax Filippov return -EINVAL;
2319bd46da4SMax Filippov event->hw.config = (event->attr.config &
2329bd46da4SMax Filippov (XTENSA_PMU_PMCTRL_KRNLCNT |
2339bd46da4SMax Filippov XTENSA_PMU_PMCTRL_TRACELEVEL |
2349bd46da4SMax Filippov XTENSA_PMU_PMCTRL_SELECT |
2359bd46da4SMax Filippov XTENSA_PMU_PMCTRL_MASK)) |
2369bd46da4SMax Filippov XTENSA_PMU_PMCTRL_INTEN;
2379bd46da4SMax Filippov return 0;
2389bd46da4SMax Filippov
2399bd46da4SMax Filippov default:
2409bd46da4SMax Filippov return -ENOENT;
2419bd46da4SMax Filippov }
2429bd46da4SMax Filippov }
2439bd46da4SMax Filippov
2449bd46da4SMax Filippov /*
2459bd46da4SMax Filippov * Starts/Stops a counter present on the PMU. The PMI handler
2469bd46da4SMax Filippov * should stop the counter when perf_event_overflow() returns
2479bd46da4SMax Filippov * !0. ->start() will be used to continue.
2489bd46da4SMax Filippov */
xtensa_pmu_start(struct perf_event * event,int flags)2499bd46da4SMax Filippov static void xtensa_pmu_start(struct perf_event *event, int flags)
2509bd46da4SMax Filippov {
2519bd46da4SMax Filippov struct hw_perf_event *hwc = &event->hw;
2529bd46da4SMax Filippov int idx = hwc->idx;
2539bd46da4SMax Filippov
2549bd46da4SMax Filippov if (WARN_ON_ONCE(idx == -1))
2559bd46da4SMax Filippov return;
2569bd46da4SMax Filippov
2579bd46da4SMax Filippov if (flags & PERF_EF_RELOAD) {
2589bd46da4SMax Filippov WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
2599bd46da4SMax Filippov xtensa_perf_event_set_period(event, hwc, idx);
2609bd46da4SMax Filippov }
2619bd46da4SMax Filippov
2629bd46da4SMax Filippov hwc->state = 0;
2639bd46da4SMax Filippov
2649bd46da4SMax Filippov set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
2659bd46da4SMax Filippov }
2669bd46da4SMax Filippov
xtensa_pmu_stop(struct perf_event * event,int flags)2679bd46da4SMax Filippov static void xtensa_pmu_stop(struct perf_event *event, int flags)
2689bd46da4SMax Filippov {
2699bd46da4SMax Filippov struct hw_perf_event *hwc = &event->hw;
2709bd46da4SMax Filippov int idx = hwc->idx;
2719bd46da4SMax Filippov
2729bd46da4SMax Filippov if (!(hwc->state & PERF_HES_STOPPED)) {
2739bd46da4SMax Filippov set_er(0, XTENSA_PMU_PMCTRL(idx));
2749bd46da4SMax Filippov set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
2759bd46da4SMax Filippov XTENSA_PMU_PMSTAT(idx));
2769bd46da4SMax Filippov hwc->state |= PERF_HES_STOPPED;
2779bd46da4SMax Filippov }
2789bd46da4SMax Filippov
2799bd46da4SMax Filippov if ((flags & PERF_EF_UPDATE) &&
2809bd46da4SMax Filippov !(event->hw.state & PERF_HES_UPTODATE)) {
2819bd46da4SMax Filippov xtensa_perf_event_update(event, &event->hw, idx);
2829bd46da4SMax Filippov event->hw.state |= PERF_HES_UPTODATE;
2839bd46da4SMax Filippov }
2849bd46da4SMax Filippov }
2859bd46da4SMax Filippov
2869bd46da4SMax Filippov /*
2879bd46da4SMax Filippov * Adds/Removes a counter to/from the PMU, can be done inside
2889bd46da4SMax Filippov * a transaction, see the ->*_txn() methods.
2899bd46da4SMax Filippov */
xtensa_pmu_add(struct perf_event * event,int flags)2909bd46da4SMax Filippov static int xtensa_pmu_add(struct perf_event *event, int flags)
2919bd46da4SMax Filippov {
2929bd46da4SMax Filippov struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
2939bd46da4SMax Filippov struct hw_perf_event *hwc = &event->hw;
2949bd46da4SMax Filippov int idx = hwc->idx;
2959bd46da4SMax Filippov
2969bd46da4SMax Filippov if (__test_and_set_bit(idx, ev->used_mask)) {
2979bd46da4SMax Filippov idx = find_first_zero_bit(ev->used_mask,
2989bd46da4SMax Filippov XCHAL_NUM_PERF_COUNTERS);
2999bd46da4SMax Filippov if (idx == XCHAL_NUM_PERF_COUNTERS)
3009bd46da4SMax Filippov return -EAGAIN;
3019bd46da4SMax Filippov
3029bd46da4SMax Filippov __set_bit(idx, ev->used_mask);
3039bd46da4SMax Filippov hwc->idx = idx;
3049bd46da4SMax Filippov }
3059bd46da4SMax Filippov ev->event[idx] = event;
3069bd46da4SMax Filippov
3079bd46da4SMax Filippov hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
3089bd46da4SMax Filippov
3099bd46da4SMax Filippov if (flags & PERF_EF_START)
3109bd46da4SMax Filippov xtensa_pmu_start(event, PERF_EF_RELOAD);
3119bd46da4SMax Filippov
3129bd46da4SMax Filippov perf_event_update_userpage(event);
3139bd46da4SMax Filippov return 0;
3149bd46da4SMax Filippov }
3159bd46da4SMax Filippov
xtensa_pmu_del(struct perf_event * event,int flags)3169bd46da4SMax Filippov static void xtensa_pmu_del(struct perf_event *event, int flags)
3179bd46da4SMax Filippov {
3189bd46da4SMax Filippov struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
3199bd46da4SMax Filippov
3209bd46da4SMax Filippov xtensa_pmu_stop(event, PERF_EF_UPDATE);
3219bd46da4SMax Filippov __clear_bit(event->hw.idx, ev->used_mask);
3229bd46da4SMax Filippov perf_event_update_userpage(event);
3239bd46da4SMax Filippov }
3249bd46da4SMax Filippov
xtensa_pmu_read(struct perf_event * event)3259bd46da4SMax Filippov static void xtensa_pmu_read(struct perf_event *event)
3269bd46da4SMax Filippov {
3279bd46da4SMax Filippov xtensa_perf_event_update(event, &event->hw, event->hw.idx);
3289bd46da4SMax Filippov }
3299bd46da4SMax Filippov
callchain_trace(struct stackframe * frame,void * data)3309bd46da4SMax Filippov static int callchain_trace(struct stackframe *frame, void *data)
3319bd46da4SMax Filippov {
332cfbcf468SArnaldo Carvalho de Melo struct perf_callchain_entry_ctx *entry = data;
3339bd46da4SMax Filippov
3349bd46da4SMax Filippov perf_callchain_store(entry, frame->pc);
3359bd46da4SMax Filippov return 0;
3369bd46da4SMax Filippov }
3379bd46da4SMax Filippov
perf_callchain_kernel(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)338cfbcf468SArnaldo Carvalho de Melo void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
3399bd46da4SMax Filippov struct pt_regs *regs)
3409bd46da4SMax Filippov {
341cfbcf468SArnaldo Carvalho de Melo xtensa_backtrace_kernel(regs, entry->max_stack,
3429bd46da4SMax Filippov callchain_trace, NULL, entry);
3439bd46da4SMax Filippov }
3449bd46da4SMax Filippov
perf_callchain_user(struct perf_callchain_entry_ctx * entry,struct pt_regs * regs)345cfbcf468SArnaldo Carvalho de Melo void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
3469bd46da4SMax Filippov struct pt_regs *regs)
3479bd46da4SMax Filippov {
348cfbcf468SArnaldo Carvalho de Melo xtensa_backtrace_user(regs, entry->max_stack,
3499bd46da4SMax Filippov callchain_trace, entry);
3509bd46da4SMax Filippov }
3519bd46da4SMax Filippov
perf_event_print_debug(void)3529bd46da4SMax Filippov void perf_event_print_debug(void)
3539bd46da4SMax Filippov {
3549bd46da4SMax Filippov unsigned long flags;
3559bd46da4SMax Filippov unsigned i;
3569bd46da4SMax Filippov
3579bd46da4SMax Filippov local_irq_save(flags);
3589bd46da4SMax Filippov pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
3599bd46da4SMax Filippov get_er(XTENSA_PMU_PMG));
3609bd46da4SMax Filippov for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
3619bd46da4SMax Filippov pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
3629bd46da4SMax Filippov i, get_er(XTENSA_PMU_PM(i)),
3639bd46da4SMax Filippov i, get_er(XTENSA_PMU_PMCTRL(i)),
3649bd46da4SMax Filippov i, get_er(XTENSA_PMU_PMSTAT(i)));
3659bd46da4SMax Filippov local_irq_restore(flags);
3669bd46da4SMax Filippov }
3679bd46da4SMax Filippov
xtensa_pmu_irq_handler(int irq,void * dev_id)36838fef73cSMax Filippov irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
3699bd46da4SMax Filippov {
3709bd46da4SMax Filippov irqreturn_t rc = IRQ_NONE;
3719bd46da4SMax Filippov struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
3729bd46da4SMax Filippov unsigned i;
3739bd46da4SMax Filippov
374ee769ebbSXu Wang for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
3759bd46da4SMax Filippov uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
3769bd46da4SMax Filippov struct perf_event *event = ev->event[i];
3779bd46da4SMax Filippov struct hw_perf_event *hwc = &event->hw;
3789bd46da4SMax Filippov u64 last_period;
3799bd46da4SMax Filippov
3809bd46da4SMax Filippov if (!(v & XTENSA_PMU_PMSTAT_OVFL))
3819bd46da4SMax Filippov continue;
3829bd46da4SMax Filippov
3839bd46da4SMax Filippov set_er(v, XTENSA_PMU_PMSTAT(i));
3849bd46da4SMax Filippov xtensa_perf_event_update(event, hwc, i);
3859bd46da4SMax Filippov last_period = hwc->last_period;
3869bd46da4SMax Filippov if (xtensa_perf_event_set_period(event, hwc, i)) {
3879bd46da4SMax Filippov struct perf_sample_data data;
3889bd46da4SMax Filippov struct pt_regs *regs = get_irq_regs();
3899bd46da4SMax Filippov
3909bd46da4SMax Filippov perf_sample_data_init(&data, 0, last_period);
3919bd46da4SMax Filippov if (perf_event_overflow(event, &data, regs))
3929bd46da4SMax Filippov xtensa_pmu_stop(event, 0);
3939bd46da4SMax Filippov }
3949bd46da4SMax Filippov
3959bd46da4SMax Filippov rc = IRQ_HANDLED;
3969bd46da4SMax Filippov }
3979bd46da4SMax Filippov return rc;
3989bd46da4SMax Filippov }
3999bd46da4SMax Filippov
4009bd46da4SMax Filippov static struct pmu xtensa_pmu = {
4019bd46da4SMax Filippov .pmu_enable = xtensa_pmu_enable,
4029bd46da4SMax Filippov .pmu_disable = xtensa_pmu_disable,
4039bd46da4SMax Filippov .event_init = xtensa_pmu_event_init,
4049bd46da4SMax Filippov .add = xtensa_pmu_add,
4059bd46da4SMax Filippov .del = xtensa_pmu_del,
4069bd46da4SMax Filippov .start = xtensa_pmu_start,
4079bd46da4SMax Filippov .stop = xtensa_pmu_stop,
4089bd46da4SMax Filippov .read = xtensa_pmu_read,
4099bd46da4SMax Filippov };
4109bd46da4SMax Filippov
xtensa_pmu_setup(unsigned int cpu)4116d65d376SMax Filippov static int xtensa_pmu_setup(unsigned int cpu)
4129bd46da4SMax Filippov {
4139bd46da4SMax Filippov unsigned i;
4149bd46da4SMax Filippov
4159bd46da4SMax Filippov set_er(0, XTENSA_PMU_PMG);
4169bd46da4SMax Filippov for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
4179bd46da4SMax Filippov set_er(0, XTENSA_PMU_PMCTRL(i));
4189bd46da4SMax Filippov set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
4199bd46da4SMax Filippov }
42025a77b55SSebastian Andrzej Siewior return 0;
4219bd46da4SMax Filippov }
4229bd46da4SMax Filippov
xtensa_pmu_init(void)4239bd46da4SMax Filippov static int __init xtensa_pmu_init(void)
4249bd46da4SMax Filippov {
4259bd46da4SMax Filippov int ret;
4269bd46da4SMax Filippov int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
4279bd46da4SMax Filippov
42825a77b55SSebastian Andrzej Siewior ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
42973c1b41eSThomas Gleixner "perf/xtensa:starting", xtensa_pmu_setup,
43025a77b55SSebastian Andrzej Siewior NULL);
43125a77b55SSebastian Andrzej Siewior if (ret) {
43225a77b55SSebastian Andrzej Siewior pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
43325a77b55SSebastian Andrzej Siewior return ret;
43425a77b55SSebastian Andrzej Siewior }
43538fef73cSMax Filippov #if XTENSA_FAKE_NMI
43638fef73cSMax Filippov enable_irq(irq);
43738fef73cSMax Filippov #else
4389bd46da4SMax Filippov ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
4399bd46da4SMax Filippov "pmu", NULL);
4409bd46da4SMax Filippov if (ret < 0)
4419bd46da4SMax Filippov return ret;
44238fef73cSMax Filippov #endif
4439bd46da4SMax Filippov
4449bd46da4SMax Filippov ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
4459bd46da4SMax Filippov if (ret)
4469bd46da4SMax Filippov free_irq(irq, NULL);
4479bd46da4SMax Filippov
4489bd46da4SMax Filippov return ret;
4499bd46da4SMax Filippov }
4509bd46da4SMax Filippov early_initcall(xtensa_pmu_init);
451