1 /*
2  * HiSilicon SoC HHA uncore Hardware event counters support
3  *
4  * Copyright (C) 2017 Hisilicon Limited
5  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
6  *         Anurup M <anurup.m@huawei.com>
7  *
8  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 #include <linux/acpi.h>
15 #include <linux/bug.h>
16 #include <linux/cpuhotplug.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/smp.h>
22 
23 #include "hisi_uncore_pmu.h"
24 
25 /* HHA register definition */
26 #define HHA_INT_MASK		0x0804
27 #define HHA_INT_STATUS		0x0808
28 #define HHA_INT_CLEAR		0x080C
29 #define HHA_PERF_CTRL		0x1E00
30 #define HHA_EVENT_CTRL		0x1E04
31 #define HHA_EVENT_TYPE0		0x1E80
32 /*
33  * Each counter is 48-bits and [48:63] are reserved
34  * which are Read-As-Zero and Writes-Ignored.
35  */
36 #define HHA_CNT0_LOWER		0x1F00
37 
38 /* HHA has 16-counters */
39 #define HHA_NR_COUNTERS		0x10
40 
41 #define HHA_PERF_CTRL_EN	0x1
42 #define HHA_EVTYPE_NONE		0xff
43 
44 /*
45  * Select the counter register offset using the counter index
46  * each counter is 48-bits.
47  */
48 static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
49 {
50 	return (HHA_CNT0_LOWER + (cntr_idx * 8));
51 }
52 
53 static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
54 				     struct hw_perf_event *hwc)
55 {
56 	u32 idx = hwc->idx;
57 
58 	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
59 		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
60 		return 0;
61 	}
62 
63 	/* Read 64 bits and like L3C, top 16 bits are RAZ */
64 	return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
65 }
66 
67 static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
68 				       struct hw_perf_event *hwc, u64 val)
69 {
70 	u32 idx = hwc->idx;
71 
72 	if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
73 		dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
74 		return;
75 	}
76 
77 	/* Write 64 bits and like L3C, top 16 bits are WI */
78 	writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
79 }
80 
81 static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
82 				      u32 type)
83 {
84 	u32 reg, reg_idx, shift, val;
85 
86 	/*
87 	 * Select the appropriate event select register(HHA_EVENT_TYPEx).
88 	 * There are 4 event select registers for the 16 hardware counters.
89 	 * Event code is 8-bits and for the first 4 hardware counters,
90 	 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
91 	 * HHA_EVENT_TYPE1 is chosen and so on.
92 	 */
93 	reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
94 	reg_idx = idx % 4;
95 	shift = 8 * reg_idx;
96 
97 	/* Write event code to HHA_EVENT_TYPEx register */
98 	val = readl(hha_pmu->base + reg);
99 	val &= ~(HHA_EVTYPE_NONE << shift);
100 	val |= (type << shift);
101 	writel(val, hha_pmu->base + reg);
102 }
103 
104 static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
105 {
106 	u32 val;
107 
108 	/*
109 	 * Set perf_enable bit in HHA_PERF_CTRL to start event
110 	 * counting for all enabled counters.
111 	 */
112 	val = readl(hha_pmu->base + HHA_PERF_CTRL);
113 	val |= HHA_PERF_CTRL_EN;
114 	writel(val, hha_pmu->base + HHA_PERF_CTRL);
115 }
116 
117 static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
118 {
119 	u32 val;
120 
121 	/*
122 	 * Clear perf_enable bit in HHA_PERF_CTRL to stop event
123 	 * counting for all enabled counters.
124 	 */
125 	val = readl(hha_pmu->base + HHA_PERF_CTRL);
126 	val &= ~(HHA_PERF_CTRL_EN);
127 	writel(val, hha_pmu->base + HHA_PERF_CTRL);
128 }
129 
130 static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
131 					struct hw_perf_event *hwc)
132 {
133 	u32 val;
134 
135 	/* Enable counter index in HHA_EVENT_CTRL register */
136 	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
137 	val |= (1 << hwc->idx);
138 	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
139 }
140 
141 static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
142 					 struct hw_perf_event *hwc)
143 {
144 	u32 val;
145 
146 	/* Clear counter index in HHA_EVENT_CTRL register */
147 	val = readl(hha_pmu->base + HHA_EVENT_CTRL);
148 	val &= ~(1 << hwc->idx);
149 	writel(val, hha_pmu->base + HHA_EVENT_CTRL);
150 }
151 
152 static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
153 					    struct hw_perf_event *hwc)
154 {
155 	u32 val;
156 
157 	/* Write 0 to enable interrupt */
158 	val = readl(hha_pmu->base + HHA_INT_MASK);
159 	val &= ~(1 << hwc->idx);
160 	writel(val, hha_pmu->base + HHA_INT_MASK);
161 }
162 
163 static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
164 					     struct hw_perf_event *hwc)
165 {
166 	u32 val;
167 
168 	/* Write 1 to mask interrupt */
169 	val = readl(hha_pmu->base + HHA_INT_MASK);
170 	val |= (1 << hwc->idx);
171 	writel(val, hha_pmu->base + HHA_INT_MASK);
172 }
173 
174 static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
175 {
176 	struct hisi_pmu *hha_pmu = dev_id;
177 	struct perf_event *event;
178 	unsigned long overflown;
179 	int idx;
180 
181 	/* Read HHA_INT_STATUS register */
182 	overflown = readl(hha_pmu->base + HHA_INT_STATUS);
183 	if (!overflown)
184 		return IRQ_NONE;
185 
186 	/*
187 	 * Find the counter index which overflowed if the bit was set
188 	 * and handle it
189 	 */
190 	for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
191 		/* Write 1 to clear the IRQ status flag */
192 		writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
193 
194 		/* Get the corresponding event struct */
195 		event = hha_pmu->pmu_events.hw_events[idx];
196 		if (!event)
197 			continue;
198 
199 		hisi_uncore_pmu_event_update(event);
200 		hisi_uncore_pmu_set_event_period(event);
201 	}
202 
203 	return IRQ_HANDLED;
204 }
205 
206 static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
207 				 struct platform_device *pdev)
208 {
209 	int irq, ret;
210 
211 	/* Read and init IRQ */
212 	irq = platform_get_irq(pdev, 0);
213 	if (irq < 0) {
214 		dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
215 		return irq;
216 	}
217 
218 	ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
219 			      IRQF_NOBALANCING | IRQF_NO_THREAD,
220 			      dev_name(&pdev->dev), hha_pmu);
221 	if (ret < 0) {
222 		dev_err(&pdev->dev,
223 			"Fail to request IRQ:%d ret:%d\n", irq, ret);
224 		return ret;
225 	}
226 
227 	hha_pmu->irq = irq;
228 
229 	return 0;
230 }
231 
232 static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
233 	{ "HISI0243", },
234 	{},
235 };
236 MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
237 
238 static int hisi_hha_pmu_init_data(struct platform_device *pdev,
239 				  struct hisi_pmu *hha_pmu)
240 {
241 	unsigned long long id;
242 	struct resource *res;
243 	acpi_status status;
244 
245 	status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
246 				       "_UID", NULL, &id);
247 	if (ACPI_FAILURE(status))
248 		return -EINVAL;
249 
250 	hha_pmu->index_id = id;
251 
252 	/*
253 	 * Use SCCL_ID and UID to identify the HHA PMU, while
254 	 * SCCL_ID is in MPIDR[aff2].
255 	 */
256 	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
257 				     &hha_pmu->sccl_id)) {
258 		dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
259 		return -EINVAL;
260 	}
261 	/* HHA PMUs only share the same SCCL */
262 	hha_pmu->ccl_id = -1;
263 
264 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
265 	hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
266 	if (IS_ERR(hha_pmu->base)) {
267 		dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
268 		return PTR_ERR(hha_pmu->base);
269 	}
270 
271 	return 0;
272 }
273 
274 static struct attribute *hisi_hha_pmu_format_attr[] = {
275 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
276 	NULL,
277 };
278 
279 static const struct attribute_group hisi_hha_pmu_format_group = {
280 	.name = "format",
281 	.attrs = hisi_hha_pmu_format_attr,
282 };
283 
284 static struct attribute *hisi_hha_pmu_events_attr[] = {
285 	HISI_PMU_EVENT_ATTR(rx_ops_num,		0x00),
286 	HISI_PMU_EVENT_ATTR(rx_outer,		0x01),
287 	HISI_PMU_EVENT_ATTR(rx_sccl,		0x02),
288 	HISI_PMU_EVENT_ATTR(rx_ccix,		0x03),
289 	HISI_PMU_EVENT_ATTR(rx_wbi,		0x04),
290 	HISI_PMU_EVENT_ATTR(rx_wbip,		0x05),
291 	HISI_PMU_EVENT_ATTR(rx_wtistash,	0x11),
292 	HISI_PMU_EVENT_ATTR(rd_ddr_64b,		0x1c),
293 	HISI_PMU_EVENT_ATTR(wr_dr_64b,		0x1d),
294 	HISI_PMU_EVENT_ATTR(rd_ddr_128b,	0x1e),
295 	HISI_PMU_EVENT_ATTR(wr_ddr_128b,	0x1f),
296 	HISI_PMU_EVENT_ATTR(spill_num,		0x20),
297 	HISI_PMU_EVENT_ATTR(spill_success,	0x21),
298 	HISI_PMU_EVENT_ATTR(bi_num,		0x23),
299 	HISI_PMU_EVENT_ATTR(mediated_num,	0x32),
300 	HISI_PMU_EVENT_ATTR(tx_snp_num,		0x33),
301 	HISI_PMU_EVENT_ATTR(tx_snp_outer,	0x34),
302 	HISI_PMU_EVENT_ATTR(tx_snp_ccix,	0x35),
303 	HISI_PMU_EVENT_ATTR(rx_snprspdata,	0x38),
304 	HISI_PMU_EVENT_ATTR(rx_snprsp_outer,	0x3c),
305 	HISI_PMU_EVENT_ATTR(sdir-lookup,	0x40),
306 	HISI_PMU_EVENT_ATTR(edir-lookup,	0x41),
307 	HISI_PMU_EVENT_ATTR(sdir-hit,		0x42),
308 	HISI_PMU_EVENT_ATTR(edir-hit,		0x43),
309 	HISI_PMU_EVENT_ATTR(sdir-home-migrate,	0x4c),
310 	HISI_PMU_EVENT_ATTR(edir-home-migrate,  0x4d),
311 	NULL,
312 };
313 
314 static const struct attribute_group hisi_hha_pmu_events_group = {
315 	.name = "events",
316 	.attrs = hisi_hha_pmu_events_attr,
317 };
318 
319 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
320 
321 static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
322 	&dev_attr_cpumask.attr,
323 	NULL,
324 };
325 
326 static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
327 	.attrs = hisi_hha_pmu_cpumask_attrs,
328 };
329 
330 static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
331 	&hisi_hha_pmu_format_group,
332 	&hisi_hha_pmu_events_group,
333 	&hisi_hha_pmu_cpumask_attr_group,
334 	NULL,
335 };
336 
337 static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
338 	.write_evtype		= hisi_hha_pmu_write_evtype,
339 	.get_event_idx		= hisi_uncore_pmu_get_event_idx,
340 	.start_counters		= hisi_hha_pmu_start_counters,
341 	.stop_counters		= hisi_hha_pmu_stop_counters,
342 	.enable_counter		= hisi_hha_pmu_enable_counter,
343 	.disable_counter	= hisi_hha_pmu_disable_counter,
344 	.enable_counter_int	= hisi_hha_pmu_enable_counter_int,
345 	.disable_counter_int	= hisi_hha_pmu_disable_counter_int,
346 	.write_counter		= hisi_hha_pmu_write_counter,
347 	.read_counter		= hisi_hha_pmu_read_counter,
348 };
349 
350 static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
351 				  struct hisi_pmu *hha_pmu)
352 {
353 	int ret;
354 
355 	ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
356 	if (ret)
357 		return ret;
358 
359 	ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
360 	if (ret)
361 		return ret;
362 
363 	hha_pmu->num_counters = HHA_NR_COUNTERS;
364 	hha_pmu->counter_bits = 48;
365 	hha_pmu->ops = &hisi_uncore_hha_ops;
366 	hha_pmu->dev = &pdev->dev;
367 	hha_pmu->on_cpu = -1;
368 	hha_pmu->check_event = 0x65;
369 
370 	return 0;
371 }
372 
373 static int hisi_hha_pmu_probe(struct platform_device *pdev)
374 {
375 	struct hisi_pmu *hha_pmu;
376 	char *name;
377 	int ret;
378 
379 	hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
380 	if (!hha_pmu)
381 		return -ENOMEM;
382 
383 	platform_set_drvdata(pdev, hha_pmu);
384 
385 	ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
386 	if (ret)
387 		return ret;
388 
389 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
390 				       &hha_pmu->node);
391 	if (ret) {
392 		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
393 		return ret;
394 	}
395 
396 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
397 			      hha_pmu->sccl_id, hha_pmu->index_id);
398 	hha_pmu->pmu = (struct pmu) {
399 		.name		= name,
400 		.task_ctx_nr	= perf_invalid_context,
401 		.event_init	= hisi_uncore_pmu_event_init,
402 		.pmu_enable	= hisi_uncore_pmu_enable,
403 		.pmu_disable	= hisi_uncore_pmu_disable,
404 		.add		= hisi_uncore_pmu_add,
405 		.del		= hisi_uncore_pmu_del,
406 		.start		= hisi_uncore_pmu_start,
407 		.stop		= hisi_uncore_pmu_stop,
408 		.read		= hisi_uncore_pmu_read,
409 		.attr_groups	= hisi_hha_pmu_attr_groups,
410 	};
411 
412 	ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
413 	if (ret) {
414 		dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
415 		cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
416 					    &hha_pmu->node);
417 	}
418 
419 	return ret;
420 }
421 
422 static int hisi_hha_pmu_remove(struct platform_device *pdev)
423 {
424 	struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
425 
426 	perf_pmu_unregister(&hha_pmu->pmu);
427 	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
428 				    &hha_pmu->node);
429 
430 	return 0;
431 }
432 
433 static struct platform_driver hisi_hha_pmu_driver = {
434 	.driver = {
435 		.name = "hisi_hha_pmu",
436 		.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
437 	},
438 	.probe = hisi_hha_pmu_probe,
439 	.remove = hisi_hha_pmu_remove,
440 };
441 
442 static int __init hisi_hha_pmu_module_init(void)
443 {
444 	int ret;
445 
446 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
447 				      "AP_PERF_ARM_HISI_HHA_ONLINE",
448 				      hisi_uncore_pmu_online_cpu,
449 				      hisi_uncore_pmu_offline_cpu);
450 	if (ret) {
451 		pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
452 		return ret;
453 	}
454 
455 	ret = platform_driver_register(&hisi_hha_pmu_driver);
456 	if (ret)
457 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
458 
459 	return ret;
460 }
461 module_init(hisi_hha_pmu_module_init);
462 
463 static void __exit hisi_hha_pmu_module_exit(void)
464 {
465 	platform_driver_unregister(&hisi_hha_pmu_driver);
466 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
467 }
468 module_exit(hisi_hha_pmu_module_exit);
469 
470 MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
471 MODULE_LICENSE("GPL v2");
472 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
473 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
474