xref: /openbmc/linux/arch/powerpc/perf/core-fsl-emb.c (revision fd0d000b)
1f2699491SMichael Ellerman /*
2f2699491SMichael Ellerman  * Performance event support - Freescale Embedded Performance Monitor
3f2699491SMichael Ellerman  *
4f2699491SMichael Ellerman  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5f2699491SMichael Ellerman  * Copyright 2010 Freescale Semiconductor, Inc.
6f2699491SMichael Ellerman  *
7f2699491SMichael Ellerman  * This program is free software; you can redistribute it and/or
8f2699491SMichael Ellerman  * modify it under the terms of the GNU General Public License
9f2699491SMichael Ellerman  * as published by the Free Software Foundation; either version
10f2699491SMichael Ellerman  * 2 of the License, or (at your option) any later version.
11f2699491SMichael Ellerman  */
12f2699491SMichael Ellerman #include <linux/kernel.h>
13f2699491SMichael Ellerman #include <linux/sched.h>
14f2699491SMichael Ellerman #include <linux/perf_event.h>
15f2699491SMichael Ellerman #include <linux/percpu.h>
16f2699491SMichael Ellerman #include <linux/hardirq.h>
17f2699491SMichael Ellerman #include <asm/reg_fsl_emb.h>
18f2699491SMichael Ellerman #include <asm/pmc.h>
19f2699491SMichael Ellerman #include <asm/machdep.h>
20f2699491SMichael Ellerman #include <asm/firmware.h>
21f2699491SMichael Ellerman #include <asm/ptrace.h>
22f2699491SMichael Ellerman 
23f2699491SMichael Ellerman struct cpu_hw_events {
24f2699491SMichael Ellerman 	int n_events;
25f2699491SMichael Ellerman 	int disabled;
26f2699491SMichael Ellerman 	u8  pmcs_enabled;
27f2699491SMichael Ellerman 	struct perf_event *event[MAX_HWEVENTS];
28f2699491SMichael Ellerman };
29f2699491SMichael Ellerman static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
30f2699491SMichael Ellerman 
31f2699491SMichael Ellerman static struct fsl_emb_pmu *ppmu;
32f2699491SMichael Ellerman 
33f2699491SMichael Ellerman /* Number of perf_events counting hardware events */
34f2699491SMichael Ellerman static atomic_t num_events;
35f2699491SMichael Ellerman /* Used to avoid races in calling reserve/release_pmc_hardware */
36f2699491SMichael Ellerman static DEFINE_MUTEX(pmc_reserve_mutex);
37f2699491SMichael Ellerman 
38f2699491SMichael Ellerman /*
39f2699491SMichael Ellerman  * If interrupts were soft-disabled when a PMU interrupt occurs, treat
40f2699491SMichael Ellerman  * it as an NMI.
41f2699491SMichael Ellerman  */
42f2699491SMichael Ellerman static inline int perf_intr_is_nmi(struct pt_regs *regs)
43f2699491SMichael Ellerman {
44f2699491SMichael Ellerman #ifdef __powerpc64__
45f2699491SMichael Ellerman 	return !regs->softe;
46f2699491SMichael Ellerman #else
47f2699491SMichael Ellerman 	return 0;
48f2699491SMichael Ellerman #endif
49f2699491SMichael Ellerman }
50f2699491SMichael Ellerman 
51f2699491SMichael Ellerman static void perf_event_interrupt(struct pt_regs *regs);
52f2699491SMichael Ellerman 
53f2699491SMichael Ellerman /*
54f2699491SMichael Ellerman  * Read one performance monitor counter (PMC).
55f2699491SMichael Ellerman  */
56f2699491SMichael Ellerman static unsigned long read_pmc(int idx)
57f2699491SMichael Ellerman {
58f2699491SMichael Ellerman 	unsigned long val;
59f2699491SMichael Ellerman 
60f2699491SMichael Ellerman 	switch (idx) {
61f2699491SMichael Ellerman 	case 0:
62f2699491SMichael Ellerman 		val = mfpmr(PMRN_PMC0);
63f2699491SMichael Ellerman 		break;
64f2699491SMichael Ellerman 	case 1:
65f2699491SMichael Ellerman 		val = mfpmr(PMRN_PMC1);
66f2699491SMichael Ellerman 		break;
67f2699491SMichael Ellerman 	case 2:
68f2699491SMichael Ellerman 		val = mfpmr(PMRN_PMC2);
69f2699491SMichael Ellerman 		break;
70f2699491SMichael Ellerman 	case 3:
71f2699491SMichael Ellerman 		val = mfpmr(PMRN_PMC3);
72f2699491SMichael Ellerman 		break;
73f2699491SMichael Ellerman 	default:
74f2699491SMichael Ellerman 		printk(KERN_ERR "oops trying to read PMC%d\n", idx);
75f2699491SMichael Ellerman 		val = 0;
76f2699491SMichael Ellerman 	}
77f2699491SMichael Ellerman 	return val;
78f2699491SMichael Ellerman }
79f2699491SMichael Ellerman 
80f2699491SMichael Ellerman /*
81f2699491SMichael Ellerman  * Write one PMC.
82f2699491SMichael Ellerman  */
83f2699491SMichael Ellerman static void write_pmc(int idx, unsigned long val)
84f2699491SMichael Ellerman {
85f2699491SMichael Ellerman 	switch (idx) {
86f2699491SMichael Ellerman 	case 0:
87f2699491SMichael Ellerman 		mtpmr(PMRN_PMC0, val);
88f2699491SMichael Ellerman 		break;
89f2699491SMichael Ellerman 	case 1:
90f2699491SMichael Ellerman 		mtpmr(PMRN_PMC1, val);
91f2699491SMichael Ellerman 		break;
92f2699491SMichael Ellerman 	case 2:
93f2699491SMichael Ellerman 		mtpmr(PMRN_PMC2, val);
94f2699491SMichael Ellerman 		break;
95f2699491SMichael Ellerman 	case 3:
96f2699491SMichael Ellerman 		mtpmr(PMRN_PMC3, val);
97f2699491SMichael Ellerman 		break;
98f2699491SMichael Ellerman 	default:
99f2699491SMichael Ellerman 		printk(KERN_ERR "oops trying to write PMC%d\n", idx);
100f2699491SMichael Ellerman 	}
101f2699491SMichael Ellerman 
102f2699491SMichael Ellerman 	isync();
103f2699491SMichael Ellerman }
104f2699491SMichael Ellerman 
105f2699491SMichael Ellerman /*
106f2699491SMichael Ellerman  * Write one local control A register
107f2699491SMichael Ellerman  */
108f2699491SMichael Ellerman static void write_pmlca(int idx, unsigned long val)
109f2699491SMichael Ellerman {
110f2699491SMichael Ellerman 	switch (idx) {
111f2699491SMichael Ellerman 	case 0:
112f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCA0, val);
113f2699491SMichael Ellerman 		break;
114f2699491SMichael Ellerman 	case 1:
115f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCA1, val);
116f2699491SMichael Ellerman 		break;
117f2699491SMichael Ellerman 	case 2:
118f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCA2, val);
119f2699491SMichael Ellerman 		break;
120f2699491SMichael Ellerman 	case 3:
121f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCA3, val);
122f2699491SMichael Ellerman 		break;
123f2699491SMichael Ellerman 	default:
124f2699491SMichael Ellerman 		printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
125f2699491SMichael Ellerman 	}
126f2699491SMichael Ellerman 
127f2699491SMichael Ellerman 	isync();
128f2699491SMichael Ellerman }
129f2699491SMichael Ellerman 
130f2699491SMichael Ellerman /*
131f2699491SMichael Ellerman  * Write one local control B register
132f2699491SMichael Ellerman  */
133f2699491SMichael Ellerman static void write_pmlcb(int idx, unsigned long val)
134f2699491SMichael Ellerman {
135f2699491SMichael Ellerman 	switch (idx) {
136f2699491SMichael Ellerman 	case 0:
137f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCB0, val);
138f2699491SMichael Ellerman 		break;
139f2699491SMichael Ellerman 	case 1:
140f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCB1, val);
141f2699491SMichael Ellerman 		break;
142f2699491SMichael Ellerman 	case 2:
143f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCB2, val);
144f2699491SMichael Ellerman 		break;
145f2699491SMichael Ellerman 	case 3:
146f2699491SMichael Ellerman 		mtpmr(PMRN_PMLCB3, val);
147f2699491SMichael Ellerman 		break;
148f2699491SMichael Ellerman 	default:
149f2699491SMichael Ellerman 		printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
150f2699491SMichael Ellerman 	}
151f2699491SMichael Ellerman 
152f2699491SMichael Ellerman 	isync();
153f2699491SMichael Ellerman }
154f2699491SMichael Ellerman 
155f2699491SMichael Ellerman static void fsl_emb_pmu_read(struct perf_event *event)
156f2699491SMichael Ellerman {
157f2699491SMichael Ellerman 	s64 val, delta, prev;
158f2699491SMichael Ellerman 
159f2699491SMichael Ellerman 	if (event->hw.state & PERF_HES_STOPPED)
160f2699491SMichael Ellerman 		return;
161f2699491SMichael Ellerman 
162f2699491SMichael Ellerman 	/*
163f2699491SMichael Ellerman 	 * Performance monitor interrupts come even when interrupts
164f2699491SMichael Ellerman 	 * are soft-disabled, as long as interrupts are hard-enabled.
165f2699491SMichael Ellerman 	 * Therefore we treat them like NMIs.
166f2699491SMichael Ellerman 	 */
167f2699491SMichael Ellerman 	do {
168f2699491SMichael Ellerman 		prev = local64_read(&event->hw.prev_count);
169f2699491SMichael Ellerman 		barrier();
170f2699491SMichael Ellerman 		val = read_pmc(event->hw.idx);
171f2699491SMichael Ellerman 	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
172f2699491SMichael Ellerman 
173f2699491SMichael Ellerman 	/* The counters are only 32 bits wide */
174f2699491SMichael Ellerman 	delta = (val - prev) & 0xfffffffful;
175f2699491SMichael Ellerman 	local64_add(delta, &event->count);
176f2699491SMichael Ellerman 	local64_sub(delta, &event->hw.period_left);
177f2699491SMichael Ellerman }
178f2699491SMichael Ellerman 
179f2699491SMichael Ellerman /*
180f2699491SMichael Ellerman  * Disable all events to prevent PMU interrupts and to allow
181f2699491SMichael Ellerman  * events to be added or removed.
182f2699491SMichael Ellerman  */
183f2699491SMichael Ellerman static void fsl_emb_pmu_disable(struct pmu *pmu)
184f2699491SMichael Ellerman {
185f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw;
186f2699491SMichael Ellerman 	unsigned long flags;
187f2699491SMichael Ellerman 
188f2699491SMichael Ellerman 	local_irq_save(flags);
189f2699491SMichael Ellerman 	cpuhw = &__get_cpu_var(cpu_hw_events);
190f2699491SMichael Ellerman 
191f2699491SMichael Ellerman 	if (!cpuhw->disabled) {
192f2699491SMichael Ellerman 		cpuhw->disabled = 1;
193f2699491SMichael Ellerman 
194f2699491SMichael Ellerman 		/*
195f2699491SMichael Ellerman 		 * Check if we ever enabled the PMU on this cpu.
196f2699491SMichael Ellerman 		 */
197f2699491SMichael Ellerman 		if (!cpuhw->pmcs_enabled) {
198f2699491SMichael Ellerman 			ppc_enable_pmcs();
199f2699491SMichael Ellerman 			cpuhw->pmcs_enabled = 1;
200f2699491SMichael Ellerman 		}
201f2699491SMichael Ellerman 
202f2699491SMichael Ellerman 		if (atomic_read(&num_events)) {
203f2699491SMichael Ellerman 			/*
204f2699491SMichael Ellerman 			 * Set the 'freeze all counters' bit, and disable
205f2699491SMichael Ellerman 			 * interrupts.  The barrier is to make sure the
206f2699491SMichael Ellerman 			 * mtpmr has been executed and the PMU has frozen
207f2699491SMichael Ellerman 			 * the events before we return.
208f2699491SMichael Ellerman 			 */
209f2699491SMichael Ellerman 
210f2699491SMichael Ellerman 			mtpmr(PMRN_PMGC0, PMGC0_FAC);
211f2699491SMichael Ellerman 			isync();
212f2699491SMichael Ellerman 		}
213f2699491SMichael Ellerman 	}
214f2699491SMichael Ellerman 	local_irq_restore(flags);
215f2699491SMichael Ellerman }
216f2699491SMichael Ellerman 
217f2699491SMichael Ellerman /*
218f2699491SMichael Ellerman  * Re-enable all events if disable == 0.
219f2699491SMichael Ellerman  * If we were previously disabled and events were added, then
220f2699491SMichael Ellerman  * put the new config on the PMU.
221f2699491SMichael Ellerman  */
222f2699491SMichael Ellerman static void fsl_emb_pmu_enable(struct pmu *pmu)
223f2699491SMichael Ellerman {
224f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw;
225f2699491SMichael Ellerman 	unsigned long flags;
226f2699491SMichael Ellerman 
227f2699491SMichael Ellerman 	local_irq_save(flags);
228f2699491SMichael Ellerman 	cpuhw = &__get_cpu_var(cpu_hw_events);
229f2699491SMichael Ellerman 	if (!cpuhw->disabled)
230f2699491SMichael Ellerman 		goto out;
231f2699491SMichael Ellerman 
232f2699491SMichael Ellerman 	cpuhw->disabled = 0;
233f2699491SMichael Ellerman 	ppc_set_pmu_inuse(cpuhw->n_events != 0);
234f2699491SMichael Ellerman 
235f2699491SMichael Ellerman 	if (cpuhw->n_events > 0) {
236f2699491SMichael Ellerman 		mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
237f2699491SMichael Ellerman 		isync();
238f2699491SMichael Ellerman 	}
239f2699491SMichael Ellerman 
240f2699491SMichael Ellerman  out:
241f2699491SMichael Ellerman 	local_irq_restore(flags);
242f2699491SMichael Ellerman }
243f2699491SMichael Ellerman 
244f2699491SMichael Ellerman static int collect_events(struct perf_event *group, int max_count,
245f2699491SMichael Ellerman 			  struct perf_event *ctrs[])
246f2699491SMichael Ellerman {
247f2699491SMichael Ellerman 	int n = 0;
248f2699491SMichael Ellerman 	struct perf_event *event;
249f2699491SMichael Ellerman 
250f2699491SMichael Ellerman 	if (!is_software_event(group)) {
251f2699491SMichael Ellerman 		if (n >= max_count)
252f2699491SMichael Ellerman 			return -1;
253f2699491SMichael Ellerman 		ctrs[n] = group;
254f2699491SMichael Ellerman 		n++;
255f2699491SMichael Ellerman 	}
256f2699491SMichael Ellerman 	list_for_each_entry(event, &group->sibling_list, group_entry) {
257f2699491SMichael Ellerman 		if (!is_software_event(event) &&
258f2699491SMichael Ellerman 		    event->state != PERF_EVENT_STATE_OFF) {
259f2699491SMichael Ellerman 			if (n >= max_count)
260f2699491SMichael Ellerman 				return -1;
261f2699491SMichael Ellerman 			ctrs[n] = event;
262f2699491SMichael Ellerman 			n++;
263f2699491SMichael Ellerman 		}
264f2699491SMichael Ellerman 	}
265f2699491SMichael Ellerman 	return n;
266f2699491SMichael Ellerman }
267f2699491SMichael Ellerman 
268f2699491SMichael Ellerman /* context locked on entry */
269f2699491SMichael Ellerman static int fsl_emb_pmu_add(struct perf_event *event, int flags)
270f2699491SMichael Ellerman {
271f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw;
272f2699491SMichael Ellerman 	int ret = -EAGAIN;
273f2699491SMichael Ellerman 	int num_counters = ppmu->n_counter;
274f2699491SMichael Ellerman 	u64 val;
275f2699491SMichael Ellerman 	int i;
276f2699491SMichael Ellerman 
277f2699491SMichael Ellerman 	perf_pmu_disable(event->pmu);
278f2699491SMichael Ellerman 	cpuhw = &get_cpu_var(cpu_hw_events);
279f2699491SMichael Ellerman 
280f2699491SMichael Ellerman 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
281f2699491SMichael Ellerman 		num_counters = ppmu->n_restricted;
282f2699491SMichael Ellerman 
283f2699491SMichael Ellerman 	/*
284f2699491SMichael Ellerman 	 * Allocate counters from top-down, so that restricted-capable
285f2699491SMichael Ellerman 	 * counters are kept free as long as possible.
286f2699491SMichael Ellerman 	 */
287f2699491SMichael Ellerman 	for (i = num_counters - 1; i >= 0; i--) {
288f2699491SMichael Ellerman 		if (cpuhw->event[i])
289f2699491SMichael Ellerman 			continue;
290f2699491SMichael Ellerman 
291f2699491SMichael Ellerman 		break;
292f2699491SMichael Ellerman 	}
293f2699491SMichael Ellerman 
294f2699491SMichael Ellerman 	if (i < 0)
295f2699491SMichael Ellerman 		goto out;
296f2699491SMichael Ellerman 
297f2699491SMichael Ellerman 	event->hw.idx = i;
298f2699491SMichael Ellerman 	cpuhw->event[i] = event;
299f2699491SMichael Ellerman 	++cpuhw->n_events;
300f2699491SMichael Ellerman 
301f2699491SMichael Ellerman 	val = 0;
302f2699491SMichael Ellerman 	if (event->hw.sample_period) {
303f2699491SMichael Ellerman 		s64 left = local64_read(&event->hw.period_left);
304f2699491SMichael Ellerman 		if (left < 0x80000000L)
305f2699491SMichael Ellerman 			val = 0x80000000L - left;
306f2699491SMichael Ellerman 	}
307f2699491SMichael Ellerman 	local64_set(&event->hw.prev_count, val);
308f2699491SMichael Ellerman 
309f2699491SMichael Ellerman 	if (!(flags & PERF_EF_START)) {
310f2699491SMichael Ellerman 		event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
311f2699491SMichael Ellerman 		val = 0;
312f2699491SMichael Ellerman 	}
313f2699491SMichael Ellerman 
314f2699491SMichael Ellerman 	write_pmc(i, val);
315f2699491SMichael Ellerman 	perf_event_update_userpage(event);
316f2699491SMichael Ellerman 
317f2699491SMichael Ellerman 	write_pmlcb(i, event->hw.config >> 32);
318f2699491SMichael Ellerman 	write_pmlca(i, event->hw.config_base);
319f2699491SMichael Ellerman 
320f2699491SMichael Ellerman 	ret = 0;
321f2699491SMichael Ellerman  out:
322f2699491SMichael Ellerman 	put_cpu_var(cpu_hw_events);
323f2699491SMichael Ellerman 	perf_pmu_enable(event->pmu);
324f2699491SMichael Ellerman 	return ret;
325f2699491SMichael Ellerman }
326f2699491SMichael Ellerman 
327f2699491SMichael Ellerman /* context locked on entry */
328f2699491SMichael Ellerman static void fsl_emb_pmu_del(struct perf_event *event, int flags)
329f2699491SMichael Ellerman {
330f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw;
331f2699491SMichael Ellerman 	int i = event->hw.idx;
332f2699491SMichael Ellerman 
333f2699491SMichael Ellerman 	perf_pmu_disable(event->pmu);
334f2699491SMichael Ellerman 	if (i < 0)
335f2699491SMichael Ellerman 		goto out;
336f2699491SMichael Ellerman 
337f2699491SMichael Ellerman 	fsl_emb_pmu_read(event);
338f2699491SMichael Ellerman 
339f2699491SMichael Ellerman 	cpuhw = &get_cpu_var(cpu_hw_events);
340f2699491SMichael Ellerman 
341f2699491SMichael Ellerman 	WARN_ON(event != cpuhw->event[event->hw.idx]);
342f2699491SMichael Ellerman 
343f2699491SMichael Ellerman 	write_pmlca(i, 0);
344f2699491SMichael Ellerman 	write_pmlcb(i, 0);
345f2699491SMichael Ellerman 	write_pmc(i, 0);
346f2699491SMichael Ellerman 
347f2699491SMichael Ellerman 	cpuhw->event[i] = NULL;
348f2699491SMichael Ellerman 	event->hw.idx = -1;
349f2699491SMichael Ellerman 
350f2699491SMichael Ellerman 	/*
351f2699491SMichael Ellerman 	 * TODO: if at least one restricted event exists, and we
352f2699491SMichael Ellerman 	 * just freed up a non-restricted-capable counter, and
353f2699491SMichael Ellerman 	 * there is a restricted-capable counter occupied by
354f2699491SMichael Ellerman 	 * a non-restricted event, migrate that event to the
355f2699491SMichael Ellerman 	 * vacated counter.
356f2699491SMichael Ellerman 	 */
357f2699491SMichael Ellerman 
358f2699491SMichael Ellerman 	cpuhw->n_events--;
359f2699491SMichael Ellerman 
360f2699491SMichael Ellerman  out:
361f2699491SMichael Ellerman 	perf_pmu_enable(event->pmu);
362f2699491SMichael Ellerman 	put_cpu_var(cpu_hw_events);
363f2699491SMichael Ellerman }
364f2699491SMichael Ellerman 
365f2699491SMichael Ellerman static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
366f2699491SMichael Ellerman {
367f2699491SMichael Ellerman 	unsigned long flags;
368f2699491SMichael Ellerman 	s64 left;
369f2699491SMichael Ellerman 
370f2699491SMichael Ellerman 	if (event->hw.idx < 0 || !event->hw.sample_period)
371f2699491SMichael Ellerman 		return;
372f2699491SMichael Ellerman 
373f2699491SMichael Ellerman 	if (!(event->hw.state & PERF_HES_STOPPED))
374f2699491SMichael Ellerman 		return;
375f2699491SMichael Ellerman 
376f2699491SMichael Ellerman 	if (ef_flags & PERF_EF_RELOAD)
377f2699491SMichael Ellerman 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
378f2699491SMichael Ellerman 
379f2699491SMichael Ellerman 	local_irq_save(flags);
380f2699491SMichael Ellerman 	perf_pmu_disable(event->pmu);
381f2699491SMichael Ellerman 
382f2699491SMichael Ellerman 	event->hw.state = 0;
383f2699491SMichael Ellerman 	left = local64_read(&event->hw.period_left);
384f2699491SMichael Ellerman 	write_pmc(event->hw.idx, left);
385f2699491SMichael Ellerman 
386f2699491SMichael Ellerman 	perf_event_update_userpage(event);
387f2699491SMichael Ellerman 	perf_pmu_enable(event->pmu);
388f2699491SMichael Ellerman 	local_irq_restore(flags);
389f2699491SMichael Ellerman }
390f2699491SMichael Ellerman 
391f2699491SMichael Ellerman static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
392f2699491SMichael Ellerman {
393f2699491SMichael Ellerman 	unsigned long flags;
394f2699491SMichael Ellerman 
395f2699491SMichael Ellerman 	if (event->hw.idx < 0 || !event->hw.sample_period)
396f2699491SMichael Ellerman 		return;
397f2699491SMichael Ellerman 
398f2699491SMichael Ellerman 	if (event->hw.state & PERF_HES_STOPPED)
399f2699491SMichael Ellerman 		return;
400f2699491SMichael Ellerman 
401f2699491SMichael Ellerman 	local_irq_save(flags);
402f2699491SMichael Ellerman 	perf_pmu_disable(event->pmu);
403f2699491SMichael Ellerman 
404f2699491SMichael Ellerman 	fsl_emb_pmu_read(event);
405f2699491SMichael Ellerman 	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
406f2699491SMichael Ellerman 	write_pmc(event->hw.idx, 0);
407f2699491SMichael Ellerman 
408f2699491SMichael Ellerman 	perf_event_update_userpage(event);
409f2699491SMichael Ellerman 	perf_pmu_enable(event->pmu);
410f2699491SMichael Ellerman 	local_irq_restore(flags);
411f2699491SMichael Ellerman }
412f2699491SMichael Ellerman 
413f2699491SMichael Ellerman /*
414f2699491SMichael Ellerman  * Release the PMU if this is the last perf_event.
415f2699491SMichael Ellerman  */
416f2699491SMichael Ellerman static void hw_perf_event_destroy(struct perf_event *event)
417f2699491SMichael Ellerman {
418f2699491SMichael Ellerman 	if (!atomic_add_unless(&num_events, -1, 1)) {
419f2699491SMichael Ellerman 		mutex_lock(&pmc_reserve_mutex);
420f2699491SMichael Ellerman 		if (atomic_dec_return(&num_events) == 0)
421f2699491SMichael Ellerman 			release_pmc_hardware();
422f2699491SMichael Ellerman 		mutex_unlock(&pmc_reserve_mutex);
423f2699491SMichael Ellerman 	}
424f2699491SMichael Ellerman }
425f2699491SMichael Ellerman 
426f2699491SMichael Ellerman /*
427f2699491SMichael Ellerman  * Translate a generic cache event_id config to a raw event_id code.
428f2699491SMichael Ellerman  */
429f2699491SMichael Ellerman static int hw_perf_cache_event(u64 config, u64 *eventp)
430f2699491SMichael Ellerman {
431f2699491SMichael Ellerman 	unsigned long type, op, result;
432f2699491SMichael Ellerman 	int ev;
433f2699491SMichael Ellerman 
434f2699491SMichael Ellerman 	if (!ppmu->cache_events)
435f2699491SMichael Ellerman 		return -EINVAL;
436f2699491SMichael Ellerman 
437f2699491SMichael Ellerman 	/* unpack config */
438f2699491SMichael Ellerman 	type = config & 0xff;
439f2699491SMichael Ellerman 	op = (config >> 8) & 0xff;
440f2699491SMichael Ellerman 	result = (config >> 16) & 0xff;
441f2699491SMichael Ellerman 
442f2699491SMichael Ellerman 	if (type >= PERF_COUNT_HW_CACHE_MAX ||
443f2699491SMichael Ellerman 	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
444f2699491SMichael Ellerman 	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
445f2699491SMichael Ellerman 		return -EINVAL;
446f2699491SMichael Ellerman 
447f2699491SMichael Ellerman 	ev = (*ppmu->cache_events)[type][op][result];
448f2699491SMichael Ellerman 	if (ev == 0)
449f2699491SMichael Ellerman 		return -EOPNOTSUPP;
450f2699491SMichael Ellerman 	if (ev == -1)
451f2699491SMichael Ellerman 		return -EINVAL;
452f2699491SMichael Ellerman 	*eventp = ev;
453f2699491SMichael Ellerman 	return 0;
454f2699491SMichael Ellerman }
455f2699491SMichael Ellerman 
456f2699491SMichael Ellerman static int fsl_emb_pmu_event_init(struct perf_event *event)
457f2699491SMichael Ellerman {
458f2699491SMichael Ellerman 	u64 ev;
459f2699491SMichael Ellerman 	struct perf_event *events[MAX_HWEVENTS];
460f2699491SMichael Ellerman 	int n;
461f2699491SMichael Ellerman 	int err;
462f2699491SMichael Ellerman 	int num_restricted;
463f2699491SMichael Ellerman 	int i;
464f2699491SMichael Ellerman 
465f2699491SMichael Ellerman 	switch (event->attr.type) {
466f2699491SMichael Ellerman 	case PERF_TYPE_HARDWARE:
467f2699491SMichael Ellerman 		ev = event->attr.config;
468f2699491SMichael Ellerman 		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
469f2699491SMichael Ellerman 			return -EOPNOTSUPP;
470f2699491SMichael Ellerman 		ev = ppmu->generic_events[ev];
471f2699491SMichael Ellerman 		break;
472f2699491SMichael Ellerman 
473f2699491SMichael Ellerman 	case PERF_TYPE_HW_CACHE:
474f2699491SMichael Ellerman 		err = hw_perf_cache_event(event->attr.config, &ev);
475f2699491SMichael Ellerman 		if (err)
476f2699491SMichael Ellerman 			return err;
477f2699491SMichael Ellerman 		break;
478f2699491SMichael Ellerman 
479f2699491SMichael Ellerman 	case PERF_TYPE_RAW:
480f2699491SMichael Ellerman 		ev = event->attr.config;
481f2699491SMichael Ellerman 		break;
482f2699491SMichael Ellerman 
483f2699491SMichael Ellerman 	default:
484f2699491SMichael Ellerman 		return -ENOENT;
485f2699491SMichael Ellerman 	}
486f2699491SMichael Ellerman 
487f2699491SMichael Ellerman 	event->hw.config = ppmu->xlate_event(ev);
488f2699491SMichael Ellerman 	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
489f2699491SMichael Ellerman 		return -EINVAL;
490f2699491SMichael Ellerman 
491f2699491SMichael Ellerman 	/*
492f2699491SMichael Ellerman 	 * If this is in a group, check if it can go on with all the
493f2699491SMichael Ellerman 	 * other hardware events in the group.  We assume the event
494f2699491SMichael Ellerman 	 * hasn't been linked into its leader's sibling list at this point.
495f2699491SMichael Ellerman 	 */
496f2699491SMichael Ellerman 	n = 0;
497f2699491SMichael Ellerman 	if (event->group_leader != event) {
498f2699491SMichael Ellerman 		n = collect_events(event->group_leader,
499f2699491SMichael Ellerman 		                   ppmu->n_counter - 1, events);
500f2699491SMichael Ellerman 		if (n < 0)
501f2699491SMichael Ellerman 			return -EINVAL;
502f2699491SMichael Ellerman 	}
503f2699491SMichael Ellerman 
504f2699491SMichael Ellerman 	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
505f2699491SMichael Ellerman 		num_restricted = 0;
506f2699491SMichael Ellerman 		for (i = 0; i < n; i++) {
507f2699491SMichael Ellerman 			if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
508f2699491SMichael Ellerman 				num_restricted++;
509f2699491SMichael Ellerman 		}
510f2699491SMichael Ellerman 
511f2699491SMichael Ellerman 		if (num_restricted >= ppmu->n_restricted)
512f2699491SMichael Ellerman 			return -EINVAL;
513f2699491SMichael Ellerman 	}
514f2699491SMichael Ellerman 
515f2699491SMichael Ellerman 	event->hw.idx = -1;
516f2699491SMichael Ellerman 
517f2699491SMichael Ellerman 	event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
518f2699491SMichael Ellerman 	                        (u32)((ev << 16) & PMLCA_EVENT_MASK);
519f2699491SMichael Ellerman 
520f2699491SMichael Ellerman 	if (event->attr.exclude_user)
521f2699491SMichael Ellerman 		event->hw.config_base |= PMLCA_FCU;
522f2699491SMichael Ellerman 	if (event->attr.exclude_kernel)
523f2699491SMichael Ellerman 		event->hw.config_base |= PMLCA_FCS;
524f2699491SMichael Ellerman 	if (event->attr.exclude_idle)
525f2699491SMichael Ellerman 		return -ENOTSUPP;
526f2699491SMichael Ellerman 
527f2699491SMichael Ellerman 	event->hw.last_period = event->hw.sample_period;
528f2699491SMichael Ellerman 	local64_set(&event->hw.period_left, event->hw.last_period);
529f2699491SMichael Ellerman 
530f2699491SMichael Ellerman 	/*
531f2699491SMichael Ellerman 	 * See if we need to reserve the PMU.
532f2699491SMichael Ellerman 	 * If no events are currently in use, then we have to take a
533f2699491SMichael Ellerman 	 * mutex to ensure that we don't race with another task doing
534f2699491SMichael Ellerman 	 * reserve_pmc_hardware or release_pmc_hardware.
535f2699491SMichael Ellerman 	 */
536f2699491SMichael Ellerman 	err = 0;
537f2699491SMichael Ellerman 	if (!atomic_inc_not_zero(&num_events)) {
538f2699491SMichael Ellerman 		mutex_lock(&pmc_reserve_mutex);
539f2699491SMichael Ellerman 		if (atomic_read(&num_events) == 0 &&
540f2699491SMichael Ellerman 		    reserve_pmc_hardware(perf_event_interrupt))
541f2699491SMichael Ellerman 			err = -EBUSY;
542f2699491SMichael Ellerman 		else
543f2699491SMichael Ellerman 			atomic_inc(&num_events);
544f2699491SMichael Ellerman 		mutex_unlock(&pmc_reserve_mutex);
545f2699491SMichael Ellerman 
546f2699491SMichael Ellerman 		mtpmr(PMRN_PMGC0, PMGC0_FAC);
547f2699491SMichael Ellerman 		isync();
548f2699491SMichael Ellerman 	}
549f2699491SMichael Ellerman 	event->destroy = hw_perf_event_destroy;
550f2699491SMichael Ellerman 
551f2699491SMichael Ellerman 	return err;
552f2699491SMichael Ellerman }
553f2699491SMichael Ellerman 
554f2699491SMichael Ellerman static struct pmu fsl_emb_pmu = {
555f2699491SMichael Ellerman 	.pmu_enable	= fsl_emb_pmu_enable,
556f2699491SMichael Ellerman 	.pmu_disable	= fsl_emb_pmu_disable,
557f2699491SMichael Ellerman 	.event_init	= fsl_emb_pmu_event_init,
558f2699491SMichael Ellerman 	.add		= fsl_emb_pmu_add,
559f2699491SMichael Ellerman 	.del		= fsl_emb_pmu_del,
560f2699491SMichael Ellerman 	.start		= fsl_emb_pmu_start,
561f2699491SMichael Ellerman 	.stop		= fsl_emb_pmu_stop,
562f2699491SMichael Ellerman 	.read		= fsl_emb_pmu_read,
563f2699491SMichael Ellerman };
564f2699491SMichael Ellerman 
565f2699491SMichael Ellerman /*
566f2699491SMichael Ellerman  * A counter has overflowed; update its count and record
567f2699491SMichael Ellerman  * things if requested.  Note that interrupts are hard-disabled
568f2699491SMichael Ellerman  * here so there is no possibility of being interrupted.
569f2699491SMichael Ellerman  */
570f2699491SMichael Ellerman static void record_and_restart(struct perf_event *event, unsigned long val,
571f2699491SMichael Ellerman 			       struct pt_regs *regs)
572f2699491SMichael Ellerman {
573f2699491SMichael Ellerman 	u64 period = event->hw.sample_period;
574f2699491SMichael Ellerman 	s64 prev, delta, left;
575f2699491SMichael Ellerman 	int record = 0;
576f2699491SMichael Ellerman 
577f2699491SMichael Ellerman 	if (event->hw.state & PERF_HES_STOPPED) {
578f2699491SMichael Ellerman 		write_pmc(event->hw.idx, 0);
579f2699491SMichael Ellerman 		return;
580f2699491SMichael Ellerman 	}
581f2699491SMichael Ellerman 
582f2699491SMichael Ellerman 	/* we don't have to worry about interrupts here */
583f2699491SMichael Ellerman 	prev = local64_read(&event->hw.prev_count);
584f2699491SMichael Ellerman 	delta = (val - prev) & 0xfffffffful;
585f2699491SMichael Ellerman 	local64_add(delta, &event->count);
586f2699491SMichael Ellerman 
587f2699491SMichael Ellerman 	/*
588f2699491SMichael Ellerman 	 * See if the total period for this event has expired,
589f2699491SMichael Ellerman 	 * and update for the next period.
590f2699491SMichael Ellerman 	 */
591f2699491SMichael Ellerman 	val = 0;
592f2699491SMichael Ellerman 	left = local64_read(&event->hw.period_left) - delta;
593f2699491SMichael Ellerman 	if (period) {
594f2699491SMichael Ellerman 		if (left <= 0) {
595f2699491SMichael Ellerman 			left += period;
596f2699491SMichael Ellerman 			if (left <= 0)
597f2699491SMichael Ellerman 				left = period;
598f2699491SMichael Ellerman 			record = 1;
599f2699491SMichael Ellerman 			event->hw.last_period = event->hw.sample_period;
600f2699491SMichael Ellerman 		}
601f2699491SMichael Ellerman 		if (left < 0x80000000LL)
602f2699491SMichael Ellerman 			val = 0x80000000LL - left;
603f2699491SMichael Ellerman 	}
604f2699491SMichael Ellerman 
605f2699491SMichael Ellerman 	write_pmc(event->hw.idx, val);
606f2699491SMichael Ellerman 	local64_set(&event->hw.prev_count, val);
607f2699491SMichael Ellerman 	local64_set(&event->hw.period_left, left);
608f2699491SMichael Ellerman 	perf_event_update_userpage(event);
609f2699491SMichael Ellerman 
610f2699491SMichael Ellerman 	/*
611f2699491SMichael Ellerman 	 * Finally record data if requested.
612f2699491SMichael Ellerman 	 */
613f2699491SMichael Ellerman 	if (record) {
614f2699491SMichael Ellerman 		struct perf_sample_data data;
615f2699491SMichael Ellerman 
616fd0d000bSRobert Richter 		perf_sample_data_init(&data, 0, event->hw.last_period);
617f2699491SMichael Ellerman 
618f2699491SMichael Ellerman 		if (perf_event_overflow(event, &data, regs))
619f2699491SMichael Ellerman 			fsl_emb_pmu_stop(event, 0);
620f2699491SMichael Ellerman 	}
621f2699491SMichael Ellerman }
622f2699491SMichael Ellerman 
623f2699491SMichael Ellerman static void perf_event_interrupt(struct pt_regs *regs)
624f2699491SMichael Ellerman {
625f2699491SMichael Ellerman 	int i;
626f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
627f2699491SMichael Ellerman 	struct perf_event *event;
628f2699491SMichael Ellerman 	unsigned long val;
629f2699491SMichael Ellerman 	int found = 0;
630f2699491SMichael Ellerman 	int nmi;
631f2699491SMichael Ellerman 
632f2699491SMichael Ellerman 	nmi = perf_intr_is_nmi(regs);
633f2699491SMichael Ellerman 	if (nmi)
634f2699491SMichael Ellerman 		nmi_enter();
635f2699491SMichael Ellerman 	else
636f2699491SMichael Ellerman 		irq_enter();
637f2699491SMichael Ellerman 
638f2699491SMichael Ellerman 	for (i = 0; i < ppmu->n_counter; ++i) {
639f2699491SMichael Ellerman 		event = cpuhw->event[i];
640f2699491SMichael Ellerman 
641f2699491SMichael Ellerman 		val = read_pmc(i);
642f2699491SMichael Ellerman 		if ((int)val < 0) {
643f2699491SMichael Ellerman 			if (event) {
644f2699491SMichael Ellerman 				/* event has overflowed */
645f2699491SMichael Ellerman 				found = 1;
646f2699491SMichael Ellerman 				record_and_restart(event, val, regs);
647f2699491SMichael Ellerman 			} else {
648f2699491SMichael Ellerman 				/*
649f2699491SMichael Ellerman 				 * Disabled counter is negative,
650f2699491SMichael Ellerman 				 * reset it just in case.
651f2699491SMichael Ellerman 				 */
652f2699491SMichael Ellerman 				write_pmc(i, 0);
653f2699491SMichael Ellerman 			}
654f2699491SMichael Ellerman 		}
655f2699491SMichael Ellerman 	}
656f2699491SMichael Ellerman 
657f2699491SMichael Ellerman 	/* PMM will keep counters frozen until we return from the interrupt. */
658f2699491SMichael Ellerman 	mtmsr(mfmsr() | MSR_PMM);
659f2699491SMichael Ellerman 	mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
660f2699491SMichael Ellerman 	isync();
661f2699491SMichael Ellerman 
662f2699491SMichael Ellerman 	if (nmi)
663f2699491SMichael Ellerman 		nmi_exit();
664f2699491SMichael Ellerman 	else
665f2699491SMichael Ellerman 		irq_exit();
666f2699491SMichael Ellerman }
667f2699491SMichael Ellerman 
668f2699491SMichael Ellerman void hw_perf_event_setup(int cpu)
669f2699491SMichael Ellerman {
670f2699491SMichael Ellerman 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
671f2699491SMichael Ellerman 
672f2699491SMichael Ellerman 	memset(cpuhw, 0, sizeof(*cpuhw));
673f2699491SMichael Ellerman }
674f2699491SMichael Ellerman 
675f2699491SMichael Ellerman int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
676f2699491SMichael Ellerman {
677f2699491SMichael Ellerman 	if (ppmu)
678f2699491SMichael Ellerman 		return -EBUSY;		/* something's already registered */
679f2699491SMichael Ellerman 
680f2699491SMichael Ellerman 	ppmu = pmu;
681f2699491SMichael Ellerman 	pr_info("%s performance monitor hardware support registered\n",
682f2699491SMichael Ellerman 		pmu->name);
683f2699491SMichael Ellerman 
684f2699491SMichael Ellerman 	perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
685f2699491SMichael Ellerman 
686f2699491SMichael Ellerman 	return 0;
687f2699491SMichael Ellerman }
688