1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
243eab878SWill Deacon /*
343eab878SWill Deacon * ARMv5 [xscale] Performance counter handling code.
443eab878SWill Deacon *
543eab878SWill Deacon * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
643eab878SWill Deacon *
743eab878SWill Deacon * Based on the previous xscale OProfile code.
843eab878SWill Deacon *
943eab878SWill Deacon * There are two variants of the xscale PMU that we support:
1043eab878SWill Deacon * - xscale1pmu: 2 event counters and a cycle counter
1143eab878SWill Deacon * - xscale2pmu: 4 event counters and a cycle counter
1243eab878SWill Deacon * The two variants share event definitions, but have different
1343eab878SWill Deacon * PMU structures.
1443eab878SWill Deacon */
1543eab878SWill Deacon
1643eab878SWill Deacon #ifdef CONFIG_CPU_XSCALE
17a12c72ccSMark Rutland
18a12c72ccSMark Rutland #include <asm/cputype.h>
19a12c72ccSMark Rutland #include <asm/irq_regs.h>
20a12c72ccSMark Rutland
21a12c72ccSMark Rutland #include <linux/of.h>
22fa8ad788SMark Rutland #include <linux/perf/arm_pmu.h>
23a12c72ccSMark Rutland #include <linux/platform_device.h>
24a12c72ccSMark Rutland
2543eab878SWill Deacon enum xscale_perf_types {
2643eab878SWill Deacon XSCALE_PERFCTR_ICACHE_MISS = 0x00,
2743eab878SWill Deacon XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
2843eab878SWill Deacon XSCALE_PERFCTR_DATA_STALL = 0x02,
2943eab878SWill Deacon XSCALE_PERFCTR_ITLB_MISS = 0x03,
3043eab878SWill Deacon XSCALE_PERFCTR_DTLB_MISS = 0x04,
3143eab878SWill Deacon XSCALE_PERFCTR_BRANCH = 0x05,
3243eab878SWill Deacon XSCALE_PERFCTR_BRANCH_MISS = 0x06,
3343eab878SWill Deacon XSCALE_PERFCTR_INSTRUCTION = 0x07,
3443eab878SWill Deacon XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
3543eab878SWill Deacon XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
3643eab878SWill Deacon XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
3743eab878SWill Deacon XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
3843eab878SWill Deacon XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
3943eab878SWill Deacon XSCALE_PERFCTR_PC_CHANGED = 0x0D,
4043eab878SWill Deacon XSCALE_PERFCTR_BCU_REQUEST = 0x10,
4143eab878SWill Deacon XSCALE_PERFCTR_BCU_FULL = 0x11,
4243eab878SWill Deacon XSCALE_PERFCTR_BCU_DRAIN = 0x12,
4343eab878SWill Deacon XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
4443eab878SWill Deacon XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
4543eab878SWill Deacon XSCALE_PERFCTR_RMW = 0x16,
4643eab878SWill Deacon /* XSCALE_PERFCTR_CCNT is not hardware defined */
4743eab878SWill Deacon XSCALE_PERFCTR_CCNT = 0xFE,
4843eab878SWill Deacon XSCALE_PERFCTR_UNUSED = 0xFF,
4943eab878SWill Deacon };
5043eab878SWill Deacon
5143eab878SWill Deacon enum xscale_counters {
52d2b41f74SWill Deacon XSCALE_CYCLE_COUNTER = 0,
5343eab878SWill Deacon XSCALE_COUNTER0,
5443eab878SWill Deacon XSCALE_COUNTER1,
5543eab878SWill Deacon XSCALE_COUNTER2,
5643eab878SWill Deacon XSCALE_COUNTER3,
5743eab878SWill Deacon };
5843eab878SWill Deacon
5943eab878SWill Deacon static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
60f929f575SMark Rutland PERF_MAP_ALL_UNSUPPORTED,
6143eab878SWill Deacon [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
6243eab878SWill Deacon [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
6343eab878SWill Deacon [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
6443eab878SWill Deacon [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
650445e7a5SWill Deacon [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
6643eab878SWill Deacon };
6743eab878SWill Deacon
6843eab878SWill Deacon static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
6943eab878SWill Deacon [PERF_COUNT_HW_CACHE_OP_MAX]
7043eab878SWill Deacon [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
71f929f575SMark Rutland PERF_CACHE_MAP_ALL_UNSUPPORTED,
72f929f575SMark Rutland
73f929f575SMark Rutland [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
74f929f575SMark Rutland [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
75f929f575SMark Rutland [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
76f929f575SMark Rutland [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
77f929f575SMark Rutland
78f929f575SMark Rutland [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
79f929f575SMark Rutland
80f929f575SMark Rutland [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
81f929f575SMark Rutland [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
82f929f575SMark Rutland
83f929f575SMark Rutland [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
84f929f575SMark Rutland [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
8543eab878SWill Deacon };
8643eab878SWill Deacon
8743eab878SWill Deacon #define XSCALE_PMU_ENABLE 0x001
8843eab878SWill Deacon #define XSCALE_PMN_RESET 0x002
8943eab878SWill Deacon #define XSCALE_CCNT_RESET 0x004
9043eab878SWill Deacon #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
9143eab878SWill Deacon #define XSCALE_PMU_CNT64 0x008
9243eab878SWill Deacon
9343eab878SWill Deacon #define XSCALE1_OVERFLOWED_MASK 0x700
9443eab878SWill Deacon #define XSCALE1_CCOUNT_OVERFLOW 0x400
9543eab878SWill Deacon #define XSCALE1_COUNT0_OVERFLOW 0x100
9643eab878SWill Deacon #define XSCALE1_COUNT1_OVERFLOW 0x200
9743eab878SWill Deacon #define XSCALE1_CCOUNT_INT_EN 0x040
9843eab878SWill Deacon #define XSCALE1_COUNT0_INT_EN 0x010
9943eab878SWill Deacon #define XSCALE1_COUNT1_INT_EN 0x020
10043eab878SWill Deacon #define XSCALE1_COUNT0_EVT_SHFT 12
10143eab878SWill Deacon #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
10243eab878SWill Deacon #define XSCALE1_COUNT1_EVT_SHFT 20
10343eab878SWill Deacon #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
10443eab878SWill Deacon
10543eab878SWill Deacon static inline u32
xscale1pmu_read_pmnc(void)10643eab878SWill Deacon xscale1pmu_read_pmnc(void)
10743eab878SWill Deacon {
10843eab878SWill Deacon u32 val;
10943eab878SWill Deacon asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
11043eab878SWill Deacon return val;
11143eab878SWill Deacon }
11243eab878SWill Deacon
11343eab878SWill Deacon static inline void
xscale1pmu_write_pmnc(u32 val)11443eab878SWill Deacon xscale1pmu_write_pmnc(u32 val)
11543eab878SWill Deacon {
11643eab878SWill Deacon /* upper 4bits and 7, 11 are write-as-0 */
11743eab878SWill Deacon val &= 0xffff77f;
11843eab878SWill Deacon asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
11943eab878SWill Deacon }
12043eab878SWill Deacon
12143eab878SWill Deacon static inline int
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,enum xscale_counters counter)12243eab878SWill Deacon xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
12343eab878SWill Deacon enum xscale_counters counter)
12443eab878SWill Deacon {
12543eab878SWill Deacon int ret = 0;
12643eab878SWill Deacon
12743eab878SWill Deacon switch (counter) {
12843eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
12943eab878SWill Deacon ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
13043eab878SWill Deacon break;
13143eab878SWill Deacon case XSCALE_COUNTER0:
13243eab878SWill Deacon ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
13343eab878SWill Deacon break;
13443eab878SWill Deacon case XSCALE_COUNTER1:
13543eab878SWill Deacon ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
13643eab878SWill Deacon break;
13743eab878SWill Deacon default:
13843eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", counter);
13943eab878SWill Deacon }
14043eab878SWill Deacon
14143eab878SWill Deacon return ret;
14243eab878SWill Deacon }
14343eab878SWill Deacon
14443eab878SWill Deacon static irqreturn_t
xscale1pmu_handle_irq(struct arm_pmu * cpu_pmu)1450788f1e9SMark Rutland xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
14643eab878SWill Deacon {
14743eab878SWill Deacon unsigned long pmnc;
14843eab878SWill Deacon struct perf_sample_data data;
14911679250SMark Rutland struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
15043eab878SWill Deacon struct pt_regs *regs;
15143eab878SWill Deacon int idx;
15243eab878SWill Deacon
15343eab878SWill Deacon /*
15443eab878SWill Deacon * NOTE: there's an A stepping erratum that states if an overflow
15543eab878SWill Deacon * bit already exists and another occurs, the previous
15643eab878SWill Deacon * Overflow bit gets cleared. There's no workaround.
15743eab878SWill Deacon * Fixed in B stepping or later.
15843eab878SWill Deacon */
15943eab878SWill Deacon pmnc = xscale1pmu_read_pmnc();
16043eab878SWill Deacon
16143eab878SWill Deacon /*
16243eab878SWill Deacon * Write the value back to clear the overflow flags. Overflow
16343eab878SWill Deacon * flags remain in pmnc for use below. We also disable the PMU
16443eab878SWill Deacon * while we process the interrupt.
16543eab878SWill Deacon */
16643eab878SWill Deacon xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
16743eab878SWill Deacon
16843eab878SWill Deacon if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
16943eab878SWill Deacon return IRQ_NONE;
17043eab878SWill Deacon
17143eab878SWill Deacon regs = get_irq_regs();
17243eab878SWill Deacon
1738be3f9a2SMark Rutland for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
17443eab878SWill Deacon struct perf_event *event = cpuc->events[idx];
17543eab878SWill Deacon struct hw_perf_event *hwc;
17643eab878SWill Deacon
177f6f5a30cSWill Deacon if (!event)
178f6f5a30cSWill Deacon continue;
179f6f5a30cSWill Deacon
18043eab878SWill Deacon if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
18143eab878SWill Deacon continue;
18243eab878SWill Deacon
18343eab878SWill Deacon hwc = &event->hw;
184ed6f2a52SSudeep KarkadaNagesha armpmu_event_update(event);
185fd0d000bSRobert Richter perf_sample_data_init(&data, 0, hwc->last_period);
186ed6f2a52SSudeep KarkadaNagesha if (!armpmu_event_set_period(event))
18743eab878SWill Deacon continue;
18843eab878SWill Deacon
189a8b0ca17SPeter Zijlstra if (perf_event_overflow(event, &data, regs))
190ed6f2a52SSudeep KarkadaNagesha cpu_pmu->disable(event);
19143eab878SWill Deacon }
19243eab878SWill Deacon
19343eab878SWill Deacon irq_work_run();
19443eab878SWill Deacon
19543eab878SWill Deacon /*
19643eab878SWill Deacon * Re-enable the PMU.
19743eab878SWill Deacon */
19843eab878SWill Deacon pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
19943eab878SWill Deacon xscale1pmu_write_pmnc(pmnc);
20043eab878SWill Deacon
20143eab878SWill Deacon return IRQ_HANDLED;
20243eab878SWill Deacon }
20343eab878SWill Deacon
xscale1pmu_enable_event(struct perf_event * event)204ed6f2a52SSudeep KarkadaNagesha static void xscale1pmu_enable_event(struct perf_event *event)
20543eab878SWill Deacon {
20643eab878SWill Deacon unsigned long val, mask, evt, flags;
207ed6f2a52SSudeep KarkadaNagesha struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
208ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
20911679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
210ed6f2a52SSudeep KarkadaNagesha int idx = hwc->idx;
21143eab878SWill Deacon
21243eab878SWill Deacon switch (idx) {
21343eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
21443eab878SWill Deacon mask = 0;
21543eab878SWill Deacon evt = XSCALE1_CCOUNT_INT_EN;
21643eab878SWill Deacon break;
21743eab878SWill Deacon case XSCALE_COUNTER0:
21843eab878SWill Deacon mask = XSCALE1_COUNT0_EVT_MASK;
21943eab878SWill Deacon evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
22043eab878SWill Deacon XSCALE1_COUNT0_INT_EN;
22143eab878SWill Deacon break;
22243eab878SWill Deacon case XSCALE_COUNTER1:
22343eab878SWill Deacon mask = XSCALE1_COUNT1_EVT_MASK;
22443eab878SWill Deacon evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
22543eab878SWill Deacon XSCALE1_COUNT1_INT_EN;
22643eab878SWill Deacon break;
22743eab878SWill Deacon default:
22843eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", idx);
22943eab878SWill Deacon return;
23043eab878SWill Deacon }
23143eab878SWill Deacon
2320f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
23343eab878SWill Deacon val = xscale1pmu_read_pmnc();
23443eab878SWill Deacon val &= ~mask;
23543eab878SWill Deacon val |= evt;
23643eab878SWill Deacon xscale1pmu_write_pmnc(val);
2370f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
23843eab878SWill Deacon }
23943eab878SWill Deacon
xscale1pmu_disable_event(struct perf_event * event)240ed6f2a52SSudeep KarkadaNagesha static void xscale1pmu_disable_event(struct perf_event *event)
24143eab878SWill Deacon {
24243eab878SWill Deacon unsigned long val, mask, evt, flags;
243ed6f2a52SSudeep KarkadaNagesha struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
244ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
24511679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
246ed6f2a52SSudeep KarkadaNagesha int idx = hwc->idx;
24743eab878SWill Deacon
24843eab878SWill Deacon switch (idx) {
24943eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
25043eab878SWill Deacon mask = XSCALE1_CCOUNT_INT_EN;
25143eab878SWill Deacon evt = 0;
25243eab878SWill Deacon break;
25343eab878SWill Deacon case XSCALE_COUNTER0:
25443eab878SWill Deacon mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
25543eab878SWill Deacon evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
25643eab878SWill Deacon break;
25743eab878SWill Deacon case XSCALE_COUNTER1:
25843eab878SWill Deacon mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
25943eab878SWill Deacon evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
26043eab878SWill Deacon break;
26143eab878SWill Deacon default:
26243eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", idx);
26343eab878SWill Deacon return;
26443eab878SWill Deacon }
26543eab878SWill Deacon
2660f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
26743eab878SWill Deacon val = xscale1pmu_read_pmnc();
26843eab878SWill Deacon val &= ~mask;
26943eab878SWill Deacon val |= evt;
27043eab878SWill Deacon xscale1pmu_write_pmnc(val);
2710f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
27243eab878SWill Deacon }
27343eab878SWill Deacon
27443eab878SWill Deacon static int
xscale1pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)2758be3f9a2SMark Rutland xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
276ed6f2a52SSudeep KarkadaNagesha struct perf_event *event)
27743eab878SWill Deacon {
278ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
279ed6f2a52SSudeep KarkadaNagesha if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
28043eab878SWill Deacon if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
28143eab878SWill Deacon return -EAGAIN;
28243eab878SWill Deacon
28343eab878SWill Deacon return XSCALE_CYCLE_COUNTER;
28443eab878SWill Deacon } else {
28543eab878SWill Deacon if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
28643eab878SWill Deacon return XSCALE_COUNTER1;
28743eab878SWill Deacon
28843eab878SWill Deacon if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
28943eab878SWill Deacon return XSCALE_COUNTER0;
29043eab878SWill Deacon
29143eab878SWill Deacon return -EAGAIN;
29243eab878SWill Deacon }
29343eab878SWill Deacon }
29443eab878SWill Deacon
xscalepmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)295*7dfc8db1SSuzuki K Poulose static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
296*7dfc8db1SSuzuki K Poulose struct perf_event *event)
297*7dfc8db1SSuzuki K Poulose {
298*7dfc8db1SSuzuki K Poulose clear_bit(event->hw.idx, cpuc->used_mask);
299*7dfc8db1SSuzuki K Poulose }
300*7dfc8db1SSuzuki K Poulose
xscale1pmu_start(struct arm_pmu * cpu_pmu)301ed6f2a52SSudeep KarkadaNagesha static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
30243eab878SWill Deacon {
30343eab878SWill Deacon unsigned long flags, val;
30411679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
30543eab878SWill Deacon
3060f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
30743eab878SWill Deacon val = xscale1pmu_read_pmnc();
30843eab878SWill Deacon val |= XSCALE_PMU_ENABLE;
30943eab878SWill Deacon xscale1pmu_write_pmnc(val);
3100f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
31143eab878SWill Deacon }
31243eab878SWill Deacon
xscale1pmu_stop(struct arm_pmu * cpu_pmu)313ed6f2a52SSudeep KarkadaNagesha static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
31443eab878SWill Deacon {
31543eab878SWill Deacon unsigned long flags, val;
31611679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
31743eab878SWill Deacon
3180f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
31943eab878SWill Deacon val = xscale1pmu_read_pmnc();
32043eab878SWill Deacon val &= ~XSCALE_PMU_ENABLE;
32143eab878SWill Deacon xscale1pmu_write_pmnc(val);
3220f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
32343eab878SWill Deacon }
32443eab878SWill Deacon
xscale1pmu_read_counter(struct perf_event * event)3253a95200dSSuzuki K Poulose static inline u64 xscale1pmu_read_counter(struct perf_event *event)
32643eab878SWill Deacon {
327ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
328ed6f2a52SSudeep KarkadaNagesha int counter = hwc->idx;
32943eab878SWill Deacon u32 val = 0;
33043eab878SWill Deacon
33143eab878SWill Deacon switch (counter) {
33243eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
33343eab878SWill Deacon asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
33443eab878SWill Deacon break;
33543eab878SWill Deacon case XSCALE_COUNTER0:
33643eab878SWill Deacon asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
33743eab878SWill Deacon break;
33843eab878SWill Deacon case XSCALE_COUNTER1:
33943eab878SWill Deacon asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
34043eab878SWill Deacon break;
34143eab878SWill Deacon }
34243eab878SWill Deacon
34343eab878SWill Deacon return val;
34443eab878SWill Deacon }
34543eab878SWill Deacon
xscale1pmu_write_counter(struct perf_event * event,u64 val)3463a95200dSSuzuki K Poulose static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
34743eab878SWill Deacon {
348ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
349ed6f2a52SSudeep KarkadaNagesha int counter = hwc->idx;
350ed6f2a52SSudeep KarkadaNagesha
35143eab878SWill Deacon switch (counter) {
35243eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
35343eab878SWill Deacon asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
35443eab878SWill Deacon break;
35543eab878SWill Deacon case XSCALE_COUNTER0:
35643eab878SWill Deacon asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
35743eab878SWill Deacon break;
35843eab878SWill Deacon case XSCALE_COUNTER1:
35943eab878SWill Deacon asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
36043eab878SWill Deacon break;
36143eab878SWill Deacon }
36243eab878SWill Deacon }
36343eab878SWill Deacon
xscale_map_event(struct perf_event * event)364e1f431b5SMark Rutland static int xscale_map_event(struct perf_event *event)
365e1f431b5SMark Rutland {
3666dbc0029SWill Deacon return armpmu_map_event(event, &xscale_perf_map,
367e1f431b5SMark Rutland &xscale_perf_cache_map, 0xFF);
368e1f431b5SMark Rutland }
369e1f431b5SMark Rutland
xscale1pmu_init(struct arm_pmu * cpu_pmu)370351a102dSGreg Kroah-Hartman static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
37143eab878SWill Deacon {
3723d1ff755SMark Rutland cpu_pmu->name = "armv5_xscale1";
373513c99ceSSudeep KarkadaNagesha cpu_pmu->handle_irq = xscale1pmu_handle_irq;
374513c99ceSSudeep KarkadaNagesha cpu_pmu->enable = xscale1pmu_enable_event;
375513c99ceSSudeep KarkadaNagesha cpu_pmu->disable = xscale1pmu_disable_event;
376513c99ceSSudeep KarkadaNagesha cpu_pmu->read_counter = xscale1pmu_read_counter;
377513c99ceSSudeep KarkadaNagesha cpu_pmu->write_counter = xscale1pmu_write_counter;
378513c99ceSSudeep KarkadaNagesha cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
379*7dfc8db1SSuzuki K Poulose cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
380513c99ceSSudeep KarkadaNagesha cpu_pmu->start = xscale1pmu_start;
381513c99ceSSudeep KarkadaNagesha cpu_pmu->stop = xscale1pmu_stop;
382513c99ceSSudeep KarkadaNagesha cpu_pmu->map_event = xscale_map_event;
383513c99ceSSudeep KarkadaNagesha cpu_pmu->num_events = 3;
384513c99ceSSudeep KarkadaNagesha
385513c99ceSSudeep KarkadaNagesha return 0;
38643eab878SWill Deacon }
38743eab878SWill Deacon
38843eab878SWill Deacon #define XSCALE2_OVERFLOWED_MASK 0x01f
38943eab878SWill Deacon #define XSCALE2_CCOUNT_OVERFLOW 0x001
39043eab878SWill Deacon #define XSCALE2_COUNT0_OVERFLOW 0x002
39143eab878SWill Deacon #define XSCALE2_COUNT1_OVERFLOW 0x004
39243eab878SWill Deacon #define XSCALE2_COUNT2_OVERFLOW 0x008
39343eab878SWill Deacon #define XSCALE2_COUNT3_OVERFLOW 0x010
39443eab878SWill Deacon #define XSCALE2_CCOUNT_INT_EN 0x001
39543eab878SWill Deacon #define XSCALE2_COUNT0_INT_EN 0x002
39643eab878SWill Deacon #define XSCALE2_COUNT1_INT_EN 0x004
39743eab878SWill Deacon #define XSCALE2_COUNT2_INT_EN 0x008
39843eab878SWill Deacon #define XSCALE2_COUNT3_INT_EN 0x010
39943eab878SWill Deacon #define XSCALE2_COUNT0_EVT_SHFT 0
40043eab878SWill Deacon #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
40143eab878SWill Deacon #define XSCALE2_COUNT1_EVT_SHFT 8
40243eab878SWill Deacon #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
40343eab878SWill Deacon #define XSCALE2_COUNT2_EVT_SHFT 16
40443eab878SWill Deacon #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
40543eab878SWill Deacon #define XSCALE2_COUNT3_EVT_SHFT 24
40643eab878SWill Deacon #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
40743eab878SWill Deacon
40843eab878SWill Deacon static inline u32
xscale2pmu_read_pmnc(void)40943eab878SWill Deacon xscale2pmu_read_pmnc(void)
41043eab878SWill Deacon {
41143eab878SWill Deacon u32 val;
41243eab878SWill Deacon asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
41343eab878SWill Deacon /* bits 1-2 and 4-23 are read-unpredictable */
41443eab878SWill Deacon return val & 0xff000009;
41543eab878SWill Deacon }
41643eab878SWill Deacon
41743eab878SWill Deacon static inline void
xscale2pmu_write_pmnc(u32 val)41843eab878SWill Deacon xscale2pmu_write_pmnc(u32 val)
41943eab878SWill Deacon {
42043eab878SWill Deacon /* bits 4-23 are write-as-0, 24-31 are write ignored */
42143eab878SWill Deacon val &= 0xf;
42243eab878SWill Deacon asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
42343eab878SWill Deacon }
42443eab878SWill Deacon
42543eab878SWill Deacon static inline u32
xscale2pmu_read_overflow_flags(void)42643eab878SWill Deacon xscale2pmu_read_overflow_flags(void)
42743eab878SWill Deacon {
42843eab878SWill Deacon u32 val;
42943eab878SWill Deacon asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
43043eab878SWill Deacon return val;
43143eab878SWill Deacon }
43243eab878SWill Deacon
43343eab878SWill Deacon static inline void
xscale2pmu_write_overflow_flags(u32 val)43443eab878SWill Deacon xscale2pmu_write_overflow_flags(u32 val)
43543eab878SWill Deacon {
43643eab878SWill Deacon asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
43743eab878SWill Deacon }
43843eab878SWill Deacon
43943eab878SWill Deacon static inline u32
xscale2pmu_read_event_select(void)44043eab878SWill Deacon xscale2pmu_read_event_select(void)
44143eab878SWill Deacon {
44243eab878SWill Deacon u32 val;
44343eab878SWill Deacon asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
44443eab878SWill Deacon return val;
44543eab878SWill Deacon }
44643eab878SWill Deacon
44743eab878SWill Deacon static inline void
xscale2pmu_write_event_select(u32 val)44843eab878SWill Deacon xscale2pmu_write_event_select(u32 val)
44943eab878SWill Deacon {
45043eab878SWill Deacon asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
45143eab878SWill Deacon }
45243eab878SWill Deacon
45343eab878SWill Deacon static inline u32
xscale2pmu_read_int_enable(void)45443eab878SWill Deacon xscale2pmu_read_int_enable(void)
45543eab878SWill Deacon {
45643eab878SWill Deacon u32 val;
45743eab878SWill Deacon asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
45843eab878SWill Deacon return val;
45943eab878SWill Deacon }
46043eab878SWill Deacon
46143eab878SWill Deacon static void
xscale2pmu_write_int_enable(u32 val)46243eab878SWill Deacon xscale2pmu_write_int_enable(u32 val)
46343eab878SWill Deacon {
46443eab878SWill Deacon asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
46543eab878SWill Deacon }
46643eab878SWill Deacon
46743eab878SWill Deacon static inline int
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,enum xscale_counters counter)46843eab878SWill Deacon xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
46943eab878SWill Deacon enum xscale_counters counter)
47043eab878SWill Deacon {
47143eab878SWill Deacon int ret = 0;
47243eab878SWill Deacon
47343eab878SWill Deacon switch (counter) {
47443eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
47543eab878SWill Deacon ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
47643eab878SWill Deacon break;
47743eab878SWill Deacon case XSCALE_COUNTER0:
47843eab878SWill Deacon ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
47943eab878SWill Deacon break;
48043eab878SWill Deacon case XSCALE_COUNTER1:
48143eab878SWill Deacon ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
48243eab878SWill Deacon break;
48343eab878SWill Deacon case XSCALE_COUNTER2:
48443eab878SWill Deacon ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
48543eab878SWill Deacon break;
48643eab878SWill Deacon case XSCALE_COUNTER3:
48743eab878SWill Deacon ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
48843eab878SWill Deacon break;
48943eab878SWill Deacon default:
49043eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", counter);
49143eab878SWill Deacon }
49243eab878SWill Deacon
49343eab878SWill Deacon return ret;
49443eab878SWill Deacon }
49543eab878SWill Deacon
49643eab878SWill Deacon static irqreturn_t
xscale2pmu_handle_irq(struct arm_pmu * cpu_pmu)4970788f1e9SMark Rutland xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
49843eab878SWill Deacon {
49943eab878SWill Deacon unsigned long pmnc, of_flags;
50043eab878SWill Deacon struct perf_sample_data data;
50111679250SMark Rutland struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
50243eab878SWill Deacon struct pt_regs *regs;
50343eab878SWill Deacon int idx;
50443eab878SWill Deacon
50543eab878SWill Deacon /* Disable the PMU. */
50643eab878SWill Deacon pmnc = xscale2pmu_read_pmnc();
50743eab878SWill Deacon xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
50843eab878SWill Deacon
50943eab878SWill Deacon /* Check the overflow flag register. */
51043eab878SWill Deacon of_flags = xscale2pmu_read_overflow_flags();
51143eab878SWill Deacon if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
51243eab878SWill Deacon return IRQ_NONE;
51343eab878SWill Deacon
51443eab878SWill Deacon /* Clear the overflow bits. */
51543eab878SWill Deacon xscale2pmu_write_overflow_flags(of_flags);
51643eab878SWill Deacon
51743eab878SWill Deacon regs = get_irq_regs();
51843eab878SWill Deacon
5198be3f9a2SMark Rutland for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
52043eab878SWill Deacon struct perf_event *event = cpuc->events[idx];
52143eab878SWill Deacon struct hw_perf_event *hwc;
52243eab878SWill Deacon
523f6f5a30cSWill Deacon if (!event)
524f6f5a30cSWill Deacon continue;
525f6f5a30cSWill Deacon
5263f31ae12SWill Deacon if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
52743eab878SWill Deacon continue;
52843eab878SWill Deacon
52943eab878SWill Deacon hwc = &event->hw;
530ed6f2a52SSudeep KarkadaNagesha armpmu_event_update(event);
531fd0d000bSRobert Richter perf_sample_data_init(&data, 0, hwc->last_period);
532ed6f2a52SSudeep KarkadaNagesha if (!armpmu_event_set_period(event))
53343eab878SWill Deacon continue;
53443eab878SWill Deacon
535a8b0ca17SPeter Zijlstra if (perf_event_overflow(event, &data, regs))
536ed6f2a52SSudeep KarkadaNagesha cpu_pmu->disable(event);
53743eab878SWill Deacon }
53843eab878SWill Deacon
53943eab878SWill Deacon irq_work_run();
54043eab878SWill Deacon
54143eab878SWill Deacon /*
54243eab878SWill Deacon * Re-enable the PMU.
54343eab878SWill Deacon */
54443eab878SWill Deacon pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
54543eab878SWill Deacon xscale2pmu_write_pmnc(pmnc);
54643eab878SWill Deacon
54743eab878SWill Deacon return IRQ_HANDLED;
54843eab878SWill Deacon }
54943eab878SWill Deacon
xscale2pmu_enable_event(struct perf_event * event)550ed6f2a52SSudeep KarkadaNagesha static void xscale2pmu_enable_event(struct perf_event *event)
55143eab878SWill Deacon {
55243eab878SWill Deacon unsigned long flags, ien, evtsel;
553ed6f2a52SSudeep KarkadaNagesha struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
554ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
55511679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
556ed6f2a52SSudeep KarkadaNagesha int idx = hwc->idx;
55743eab878SWill Deacon
55843eab878SWill Deacon ien = xscale2pmu_read_int_enable();
55943eab878SWill Deacon evtsel = xscale2pmu_read_event_select();
56043eab878SWill Deacon
56143eab878SWill Deacon switch (idx) {
56243eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
56343eab878SWill Deacon ien |= XSCALE2_CCOUNT_INT_EN;
56443eab878SWill Deacon break;
56543eab878SWill Deacon case XSCALE_COUNTER0:
56643eab878SWill Deacon ien |= XSCALE2_COUNT0_INT_EN;
56743eab878SWill Deacon evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
56843eab878SWill Deacon evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
56943eab878SWill Deacon break;
57043eab878SWill Deacon case XSCALE_COUNTER1:
57143eab878SWill Deacon ien |= XSCALE2_COUNT1_INT_EN;
57243eab878SWill Deacon evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
57343eab878SWill Deacon evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
57443eab878SWill Deacon break;
57543eab878SWill Deacon case XSCALE_COUNTER2:
57643eab878SWill Deacon ien |= XSCALE2_COUNT2_INT_EN;
57743eab878SWill Deacon evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
57843eab878SWill Deacon evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
57943eab878SWill Deacon break;
58043eab878SWill Deacon case XSCALE_COUNTER3:
58143eab878SWill Deacon ien |= XSCALE2_COUNT3_INT_EN;
58243eab878SWill Deacon evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
58343eab878SWill Deacon evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
58443eab878SWill Deacon break;
58543eab878SWill Deacon default:
58643eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", idx);
58743eab878SWill Deacon return;
58843eab878SWill Deacon }
58943eab878SWill Deacon
5900f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
59143eab878SWill Deacon xscale2pmu_write_event_select(evtsel);
59243eab878SWill Deacon xscale2pmu_write_int_enable(ien);
5930f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
59443eab878SWill Deacon }
59543eab878SWill Deacon
xscale2pmu_disable_event(struct perf_event * event)596ed6f2a52SSudeep KarkadaNagesha static void xscale2pmu_disable_event(struct perf_event *event)
59743eab878SWill Deacon {
5983f31ae12SWill Deacon unsigned long flags, ien, evtsel, of_flags;
599ed6f2a52SSudeep KarkadaNagesha struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
600ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
60111679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
602ed6f2a52SSudeep KarkadaNagesha int idx = hwc->idx;
60343eab878SWill Deacon
60443eab878SWill Deacon ien = xscale2pmu_read_int_enable();
60543eab878SWill Deacon evtsel = xscale2pmu_read_event_select();
60643eab878SWill Deacon
60743eab878SWill Deacon switch (idx) {
60843eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
60943eab878SWill Deacon ien &= ~XSCALE2_CCOUNT_INT_EN;
6103f31ae12SWill Deacon of_flags = XSCALE2_CCOUNT_OVERFLOW;
61143eab878SWill Deacon break;
61243eab878SWill Deacon case XSCALE_COUNTER0:
61343eab878SWill Deacon ien &= ~XSCALE2_COUNT0_INT_EN;
61443eab878SWill Deacon evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
61543eab878SWill Deacon evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
6163f31ae12SWill Deacon of_flags = XSCALE2_COUNT0_OVERFLOW;
61743eab878SWill Deacon break;
61843eab878SWill Deacon case XSCALE_COUNTER1:
61943eab878SWill Deacon ien &= ~XSCALE2_COUNT1_INT_EN;
62043eab878SWill Deacon evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
62143eab878SWill Deacon evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
6223f31ae12SWill Deacon of_flags = XSCALE2_COUNT1_OVERFLOW;
62343eab878SWill Deacon break;
62443eab878SWill Deacon case XSCALE_COUNTER2:
62543eab878SWill Deacon ien &= ~XSCALE2_COUNT2_INT_EN;
62643eab878SWill Deacon evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
62743eab878SWill Deacon evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
6283f31ae12SWill Deacon of_flags = XSCALE2_COUNT2_OVERFLOW;
62943eab878SWill Deacon break;
63043eab878SWill Deacon case XSCALE_COUNTER3:
63143eab878SWill Deacon ien &= ~XSCALE2_COUNT3_INT_EN;
63243eab878SWill Deacon evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
63343eab878SWill Deacon evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
6343f31ae12SWill Deacon of_flags = XSCALE2_COUNT3_OVERFLOW;
63543eab878SWill Deacon break;
63643eab878SWill Deacon default:
63743eab878SWill Deacon WARN_ONCE(1, "invalid counter number (%d)\n", idx);
63843eab878SWill Deacon return;
63943eab878SWill Deacon }
64043eab878SWill Deacon
6410f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
64243eab878SWill Deacon xscale2pmu_write_event_select(evtsel);
64343eab878SWill Deacon xscale2pmu_write_int_enable(ien);
6443f31ae12SWill Deacon xscale2pmu_write_overflow_flags(of_flags);
6450f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
64643eab878SWill Deacon }
64743eab878SWill Deacon
64843eab878SWill Deacon static int
xscale2pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)6498be3f9a2SMark Rutland xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
650ed6f2a52SSudeep KarkadaNagesha struct perf_event *event)
65143eab878SWill Deacon {
65243eab878SWill Deacon int idx = xscale1pmu_get_event_idx(cpuc, event);
65343eab878SWill Deacon if (idx >= 0)
65443eab878SWill Deacon goto out;
65543eab878SWill Deacon
65643eab878SWill Deacon if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
65743eab878SWill Deacon idx = XSCALE_COUNTER3;
65843eab878SWill Deacon else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
65943eab878SWill Deacon idx = XSCALE_COUNTER2;
66043eab878SWill Deacon out:
66143eab878SWill Deacon return idx;
66243eab878SWill Deacon }
66343eab878SWill Deacon
xscale2pmu_start(struct arm_pmu * cpu_pmu)664ed6f2a52SSudeep KarkadaNagesha static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
66543eab878SWill Deacon {
66643eab878SWill Deacon unsigned long flags, val;
66711679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
66843eab878SWill Deacon
6690f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
67043eab878SWill Deacon val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
67143eab878SWill Deacon val |= XSCALE_PMU_ENABLE;
67243eab878SWill Deacon xscale2pmu_write_pmnc(val);
6730f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
67443eab878SWill Deacon }
67543eab878SWill Deacon
xscale2pmu_stop(struct arm_pmu * cpu_pmu)676ed6f2a52SSudeep KarkadaNagesha static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
67743eab878SWill Deacon {
67843eab878SWill Deacon unsigned long flags, val;
67911679250SMark Rutland struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
68043eab878SWill Deacon
6810f78d2d5SMark Rutland raw_spin_lock_irqsave(&events->pmu_lock, flags);
68243eab878SWill Deacon val = xscale2pmu_read_pmnc();
68343eab878SWill Deacon val &= ~XSCALE_PMU_ENABLE;
68443eab878SWill Deacon xscale2pmu_write_pmnc(val);
6850f78d2d5SMark Rutland raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
68643eab878SWill Deacon }
68743eab878SWill Deacon
xscale2pmu_read_counter(struct perf_event * event)6883a95200dSSuzuki K Poulose static inline u64 xscale2pmu_read_counter(struct perf_event *event)
68943eab878SWill Deacon {
690ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
691ed6f2a52SSudeep KarkadaNagesha int counter = hwc->idx;
69243eab878SWill Deacon u32 val = 0;
69343eab878SWill Deacon
69443eab878SWill Deacon switch (counter) {
69543eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
69643eab878SWill Deacon asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
69743eab878SWill Deacon break;
69843eab878SWill Deacon case XSCALE_COUNTER0:
69943eab878SWill Deacon asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
70043eab878SWill Deacon break;
70143eab878SWill Deacon case XSCALE_COUNTER1:
70243eab878SWill Deacon asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
70343eab878SWill Deacon break;
70443eab878SWill Deacon case XSCALE_COUNTER2:
70543eab878SWill Deacon asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
70643eab878SWill Deacon break;
70743eab878SWill Deacon case XSCALE_COUNTER3:
70843eab878SWill Deacon asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
70943eab878SWill Deacon break;
71043eab878SWill Deacon }
71143eab878SWill Deacon
71243eab878SWill Deacon return val;
71343eab878SWill Deacon }
71443eab878SWill Deacon
xscale2pmu_write_counter(struct perf_event * event,u64 val)7153a95200dSSuzuki K Poulose static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
71643eab878SWill Deacon {
717ed6f2a52SSudeep KarkadaNagesha struct hw_perf_event *hwc = &event->hw;
718ed6f2a52SSudeep KarkadaNagesha int counter = hwc->idx;
719ed6f2a52SSudeep KarkadaNagesha
72043eab878SWill Deacon switch (counter) {
72143eab878SWill Deacon case XSCALE_CYCLE_COUNTER:
72243eab878SWill Deacon asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
72343eab878SWill Deacon break;
72443eab878SWill Deacon case XSCALE_COUNTER0:
72543eab878SWill Deacon asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
72643eab878SWill Deacon break;
72743eab878SWill Deacon case XSCALE_COUNTER1:
72843eab878SWill Deacon asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
72943eab878SWill Deacon break;
73043eab878SWill Deacon case XSCALE_COUNTER2:
73143eab878SWill Deacon asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
73243eab878SWill Deacon break;
73343eab878SWill Deacon case XSCALE_COUNTER3:
73443eab878SWill Deacon asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
73543eab878SWill Deacon break;
73643eab878SWill Deacon }
73743eab878SWill Deacon }
73843eab878SWill Deacon
xscale2pmu_init(struct arm_pmu * cpu_pmu)739351a102dSGreg Kroah-Hartman static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
74043eab878SWill Deacon {
7413d1ff755SMark Rutland cpu_pmu->name = "armv5_xscale2";
742513c99ceSSudeep KarkadaNagesha cpu_pmu->handle_irq = xscale2pmu_handle_irq;
743513c99ceSSudeep KarkadaNagesha cpu_pmu->enable = xscale2pmu_enable_event;
744513c99ceSSudeep KarkadaNagesha cpu_pmu->disable = xscale2pmu_disable_event;
745513c99ceSSudeep KarkadaNagesha cpu_pmu->read_counter = xscale2pmu_read_counter;
746513c99ceSSudeep KarkadaNagesha cpu_pmu->write_counter = xscale2pmu_write_counter;
747513c99ceSSudeep KarkadaNagesha cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
748*7dfc8db1SSuzuki K Poulose cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
749513c99ceSSudeep KarkadaNagesha cpu_pmu->start = xscale2pmu_start;
750513c99ceSSudeep KarkadaNagesha cpu_pmu->stop = xscale2pmu_stop;
751513c99ceSSudeep KarkadaNagesha cpu_pmu->map_event = xscale_map_event;
752513c99ceSSudeep KarkadaNagesha cpu_pmu->num_events = 5;
753513c99ceSSudeep KarkadaNagesha
754513c99ceSSudeep KarkadaNagesha return 0;
75543eab878SWill Deacon }
756a12c72ccSMark Rutland
757a12c72ccSMark Rutland static const struct pmu_probe_info xscale_pmu_probe_table[] = {
758a12c72ccSMark Rutland XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
759a12c72ccSMark Rutland XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
760a12c72ccSMark Rutland { /* sentinel value */ }
761a12c72ccSMark Rutland };
762a12c72ccSMark Rutland
xscale_pmu_device_probe(struct platform_device * pdev)763a12c72ccSMark Rutland static int xscale_pmu_device_probe(struct platform_device *pdev)
76443eab878SWill Deacon {
765a12c72ccSMark Rutland return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
76643eab878SWill Deacon }
76743eab878SWill Deacon
768a12c72ccSMark Rutland static struct platform_driver xscale_pmu_driver = {
769a12c72ccSMark Rutland .driver = {
770a12c72ccSMark Rutland .name = "xscale-pmu",
771a12c72ccSMark Rutland },
772a12c72ccSMark Rutland .probe = xscale_pmu_device_probe,
773a12c72ccSMark Rutland };
774a12c72ccSMark Rutland
775b128cb55SGeliang Tang builtin_platform_driver(xscale_pmu_driver);
77643eab878SWill Deacon #endif /* CONFIG_CPU_XSCALE */
777