xref: /openbmc/linux/arch/powerpc/perf/8xx-pmu.c (revision 1251288e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance event support - PPC 8xx
4  *
5  * Copyright 2016 Christophe Leroy, CS Systemes d'Information
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/perf_event.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <asm/pmc.h>
14 #include <asm/machdep.h>
15 #include <asm/firmware.h>
16 #include <asm/ptrace.h>
17 #include <asm/code-patching.h>
18 #include <asm/inst.h>
19 
20 #define PERF_8xx_ID_CPU_CYCLES		1
21 #define PERF_8xx_ID_HW_INSTRUCTIONS	2
22 #define PERF_8xx_ID_ITLB_LOAD_MISS	3
23 #define PERF_8xx_ID_DTLB_LOAD_MISS	4
24 
25 #define C(x)	PERF_COUNT_HW_CACHE_##x
26 #define DTLB_LOAD_MISS	(C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
27 #define ITLB_LOAD_MISS	(C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
28 
29 extern unsigned long itlb_miss_counter, dtlb_miss_counter;
30 extern atomic_t instruction_counter;
31 
32 static atomic_t insn_ctr_ref;
33 static atomic_t itlb_miss_ref;
34 static atomic_t dtlb_miss_ref;
35 
36 static s64 get_insn_ctr(void)
37 {
38 	int ctr;
39 	unsigned long counta;
40 
41 	do {
42 		ctr = atomic_read(&instruction_counter);
43 		counta = mfspr(SPRN_COUNTA);
44 	} while (ctr != atomic_read(&instruction_counter));
45 
46 	return ((s64)ctr << 16) | (counta >> 16);
47 }
48 
49 static int event_type(struct perf_event *event)
50 {
51 	switch (event->attr.type) {
52 	case PERF_TYPE_HARDWARE:
53 		if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES)
54 			return PERF_8xx_ID_CPU_CYCLES;
55 		if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS)
56 			return PERF_8xx_ID_HW_INSTRUCTIONS;
57 		break;
58 	case PERF_TYPE_HW_CACHE:
59 		if (event->attr.config == ITLB_LOAD_MISS)
60 			return PERF_8xx_ID_ITLB_LOAD_MISS;
61 		if (event->attr.config == DTLB_LOAD_MISS)
62 			return PERF_8xx_ID_DTLB_LOAD_MISS;
63 		break;
64 	case PERF_TYPE_RAW:
65 		break;
66 	default:
67 		return -ENOENT;
68 	}
69 	return -EOPNOTSUPP;
70 }
71 
72 static int mpc8xx_pmu_event_init(struct perf_event *event)
73 {
74 	int type = event_type(event);
75 
76 	if (type < 0)
77 		return type;
78 	return 0;
79 }
80 
81 static int mpc8xx_pmu_add(struct perf_event *event, int flags)
82 {
83 	int type = event_type(event);
84 	s64 val = 0;
85 
86 	if (type < 0)
87 		return type;
88 
89 	switch (type) {
90 	case PERF_8xx_ID_CPU_CYCLES:
91 		val = get_tb();
92 		break;
93 	case PERF_8xx_ID_HW_INSTRUCTIONS:
94 		if (atomic_inc_return(&insn_ctr_ref) == 1)
95 			mtspr(SPRN_ICTRL, 0xc0080007);
96 		val = get_insn_ctr();
97 		break;
98 	case PERF_8xx_ID_ITLB_LOAD_MISS:
99 		if (atomic_inc_return(&itlb_miss_ref) == 1) {
100 			unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
101 
102 			patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
103 		}
104 		val = itlb_miss_counter;
105 		break;
106 	case PERF_8xx_ID_DTLB_LOAD_MISS:
107 		if (atomic_inc_return(&dtlb_miss_ref) == 1) {
108 			unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
109 
110 			patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
111 		}
112 		val = dtlb_miss_counter;
113 		break;
114 	}
115 	local64_set(&event->hw.prev_count, val);
116 	return 0;
117 }
118 
119 static void mpc8xx_pmu_read(struct perf_event *event)
120 {
121 	int type = event_type(event);
122 	s64 prev, val = 0, delta = 0;
123 
124 	if (type < 0)
125 		return;
126 
127 	do {
128 		prev = local64_read(&event->hw.prev_count);
129 		switch (type) {
130 		case PERF_8xx_ID_CPU_CYCLES:
131 			val = get_tb();
132 			delta = 16 * (val - prev);
133 			break;
134 		case PERF_8xx_ID_HW_INSTRUCTIONS:
135 			val = get_insn_ctr();
136 			delta = prev - val;
137 			if (delta < 0)
138 				delta += 0x1000000000000LL;
139 			break;
140 		case PERF_8xx_ID_ITLB_LOAD_MISS:
141 			val = itlb_miss_counter;
142 			delta = (s64)((s32)val - (s32)prev);
143 			break;
144 		case PERF_8xx_ID_DTLB_LOAD_MISS:
145 			val = dtlb_miss_counter;
146 			delta = (s64)((s32)val - (s32)prev);
147 			break;
148 		}
149 	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
150 
151 	local64_add(delta, &event->count);
152 }
153 
154 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
155 {
156 	mpc8xx_pmu_read(event);
157 
158 	/* If it was the last user, stop counting to avoid useles overhead */
159 	switch (event_type(event)) {
160 	case PERF_8xx_ID_CPU_CYCLES:
161 		break;
162 	case PERF_8xx_ID_HW_INSTRUCTIONS:
163 		if (atomic_dec_return(&insn_ctr_ref) == 0)
164 			mtspr(SPRN_ICTRL, 7);
165 		break;
166 	case PERF_8xx_ID_ITLB_LOAD_MISS:
167 		if (atomic_dec_return(&itlb_miss_ref) == 0) {
168 			/* mfspr r10, SPRN_SPRG_SCRATCH0 */
169 			struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
170 					    __PPC_SPR(SPRN_SPRG_SCRATCH0));
171 
172 			patch_instruction_site(&patch__itlbmiss_exit_1, insn);
173 		}
174 		break;
175 	case PERF_8xx_ID_DTLB_LOAD_MISS:
176 		if (atomic_dec_return(&dtlb_miss_ref) == 0) {
177 			/* mfspr r10, SPRN_DAR */
178 			struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
179 					    __PPC_SPR(SPRN_DAR));
180 
181 			patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
182 		}
183 		break;
184 	}
185 }
186 
187 static struct pmu mpc8xx_pmu = {
188 	.event_init	= mpc8xx_pmu_event_init,
189 	.add		= mpc8xx_pmu_add,
190 	.del		= mpc8xx_pmu_del,
191 	.read		= mpc8xx_pmu_read,
192 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT |
193 			  PERF_PMU_CAP_NO_NMI,
194 };
195 
196 static int init_mpc8xx_pmu(void)
197 {
198 	mtspr(SPRN_ICTRL, 7);
199 	mtspr(SPRN_CMPA, 0);
200 	mtspr(SPRN_COUNTA, 0xffff);
201 
202 	return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW);
203 }
204 
205 early_initcall(init_mpc8xx_pmu);
206