1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon SoC DDRC uncore Hardware event counters support
4  *
5  * Copyright (C) 2017 Hisilicon Limited
6  * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7  *         Anurup M <anurup.m@huawei.com>
8  *
9  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10  */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
19 
20 #include "hisi_uncore_pmu.h"
21 
22 /* DDRC register definition */
23 #define DDRC_PERF_CTRL		0x010
24 #define DDRC_FLUX_WR		0x380
25 #define DDRC_FLUX_RD		0x384
26 #define DDRC_FLUX_WCMD          0x388
27 #define DDRC_FLUX_RCMD          0x38c
28 #define DDRC_PRE_CMD            0x3c0
29 #define DDRC_ACT_CMD            0x3c4
30 #define DDRC_RNK_CHG            0x3cc
31 #define DDRC_RW_CHG             0x3d0
32 #define DDRC_EVENT_CTRL         0x6C0
33 #define DDRC_INT_MASK		0x6c8
34 #define DDRC_INT_STATUS		0x6cc
35 #define DDRC_INT_CLEAR		0x6d0
36 
37 /* DDRC has 8-counters */
38 #define DDRC_NR_COUNTERS	0x8
39 #define DDRC_PERF_CTRL_EN	0x2
40 
41 /*
42  * For DDRC PMU, there are eight-events and every event has been mapped
43  * to fixed-purpose counters which register offset is not consistent.
44  * Therefore there is no write event type and we assume that event
45  * code (0 to 7) is equal to counter index in PMU driver.
46  */
47 #define GET_DDRC_EVENTID(hwc)	(hwc->config_base & 0x7)
48 
49 static const u32 ddrc_reg_off[] = {
50 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
51 	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
52 };
53 
54 /*
55  * Select the counter register offset using the counter index.
56  * In DDRC there are no programmable counter, the count
57  * is readed form the statistics counter register itself.
58  */
59 static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
60 {
61 	return ddrc_reg_off[cntr_idx];
62 }
63 
64 static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
65 				      struct hw_perf_event *hwc)
66 {
67 	/* Use event code as counter index */
68 	u32 idx = GET_DDRC_EVENTID(hwc);
69 
70 	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
71 		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
72 		return 0;
73 	}
74 
75 	return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
76 }
77 
78 static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
79 					struct hw_perf_event *hwc, u64 val)
80 {
81 	u32 idx = GET_DDRC_EVENTID(hwc);
82 
83 	if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
84 		dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
85 		return;
86 	}
87 
88 	writel((u32)val,
89 	       ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
90 }
91 
92 /*
93  * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
94  * so there is no need to write event type.
95  */
96 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
97 				       u32 type)
98 {
99 }
100 
101 static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
102 {
103 	u32 val;
104 
105 	/* Set perf_enable in DDRC_PERF_CTRL to start event counting */
106 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
107 	val |= DDRC_PERF_CTRL_EN;
108 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
109 }
110 
111 static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
112 {
113 	u32 val;
114 
115 	/* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
116 	val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
117 	val &= ~DDRC_PERF_CTRL_EN;
118 	writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
119 }
120 
121 static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
122 					 struct hw_perf_event *hwc)
123 {
124 	u32 val;
125 
126 	/* Set counter index(event code) in DDRC_EVENT_CTRL register */
127 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
128 	val |= (1 << GET_DDRC_EVENTID(hwc));
129 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
130 }
131 
132 static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
133 					  struct hw_perf_event *hwc)
134 {
135 	u32 val;
136 
137 	/* Clear counter index(event code) in DDRC_EVENT_CTRL register */
138 	val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
139 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
140 	writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
141 }
142 
143 static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
144 {
145 	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
146 	unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
147 	struct hw_perf_event *hwc = &event->hw;
148 	/* For DDRC PMU, we use event code as counter index */
149 	int idx = GET_DDRC_EVENTID(hwc);
150 
151 	if (test_bit(idx, used_mask))
152 		return -EAGAIN;
153 
154 	set_bit(idx, used_mask);
155 
156 	return idx;
157 }
158 
159 static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
160 					     struct hw_perf_event *hwc)
161 {
162 	u32 val;
163 
164 	/* Write 0 to enable interrupt */
165 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
166 	val &= ~(1 << GET_DDRC_EVENTID(hwc));
167 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
168 }
169 
170 static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
171 					      struct hw_perf_event *hwc)
172 {
173 	u32 val;
174 
175 	/* Write 1 to mask interrupt */
176 	val = readl(ddrc_pmu->base + DDRC_INT_MASK);
177 	val |= (1 << GET_DDRC_EVENTID(hwc));
178 	writel(val, ddrc_pmu->base + DDRC_INT_MASK);
179 }
180 
181 static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
182 {
183 	struct hisi_pmu *ddrc_pmu = dev_id;
184 	struct perf_event *event;
185 	unsigned long overflown;
186 	int idx;
187 
188 	/* Read the DDRC_INT_STATUS register */
189 	overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
190 	if (!overflown)
191 		return IRQ_NONE;
192 
193 	/*
194 	 * Find the counter index which overflowed if the bit was set
195 	 * and handle it
196 	 */
197 	for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
198 		/* Write 1 to clear the IRQ status flag */
199 		writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
200 
201 		/* Get the corresponding event struct */
202 		event = ddrc_pmu->pmu_events.hw_events[idx];
203 		if (!event)
204 			continue;
205 
206 		hisi_uncore_pmu_event_update(event);
207 		hisi_uncore_pmu_set_event_period(event);
208 	}
209 
210 	return IRQ_HANDLED;
211 }
212 
213 static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
214 				  struct platform_device *pdev)
215 {
216 	int irq, ret;
217 
218 	/* Read and init IRQ */
219 	irq = platform_get_irq(pdev, 0);
220 	if (irq < 0)
221 		return irq;
222 
223 	ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
224 			       IRQF_NOBALANCING | IRQF_NO_THREAD,
225 			       dev_name(&pdev->dev), ddrc_pmu);
226 	if (ret < 0) {
227 		dev_err(&pdev->dev,
228 			"Fail to request IRQ:%d ret:%d\n", irq, ret);
229 		return ret;
230 	}
231 
232 	ddrc_pmu->irq = irq;
233 
234 	return 0;
235 }
236 
237 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
238 	{ "HISI0233", },
239 	{},
240 };
241 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
242 
243 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
244 				   struct hisi_pmu *ddrc_pmu)
245 {
246 	/*
247 	 * Use the SCCL_ID and DDRC channel ID to identify the
248 	 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
249 	 */
250 	if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
251 				     &ddrc_pmu->index_id)) {
252 		dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
253 		return -EINVAL;
254 	}
255 
256 	if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
257 				     &ddrc_pmu->sccl_id)) {
258 		dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
259 		return -EINVAL;
260 	}
261 	/* DDRC PMUs only share the same SCCL */
262 	ddrc_pmu->ccl_id = -1;
263 
264 	ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
265 	if (IS_ERR(ddrc_pmu->base)) {
266 		dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
267 		return PTR_ERR(ddrc_pmu->base);
268 	}
269 
270 	return 0;
271 }
272 
273 static struct attribute *hisi_ddrc_pmu_format_attr[] = {
274 	HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
275 	NULL,
276 };
277 
278 static const struct attribute_group hisi_ddrc_pmu_format_group = {
279 	.name = "format",
280 	.attrs = hisi_ddrc_pmu_format_attr,
281 };
282 
283 static struct attribute *hisi_ddrc_pmu_events_attr[] = {
284 	HISI_PMU_EVENT_ATTR(flux_wr,		0x00),
285 	HISI_PMU_EVENT_ATTR(flux_rd,		0x01),
286 	HISI_PMU_EVENT_ATTR(flux_wcmd,		0x02),
287 	HISI_PMU_EVENT_ATTR(flux_rcmd,		0x03),
288 	HISI_PMU_EVENT_ATTR(pre_cmd,		0x04),
289 	HISI_PMU_EVENT_ATTR(act_cmd,		0x05),
290 	HISI_PMU_EVENT_ATTR(rnk_chg,		0x06),
291 	HISI_PMU_EVENT_ATTR(rw_chg,		0x07),
292 	NULL,
293 };
294 
295 static const struct attribute_group hisi_ddrc_pmu_events_group = {
296 	.name = "events",
297 	.attrs = hisi_ddrc_pmu_events_attr,
298 };
299 
300 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
301 
302 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
303 	&dev_attr_cpumask.attr,
304 	NULL,
305 };
306 
307 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
308 	.attrs = hisi_ddrc_pmu_cpumask_attrs,
309 };
310 
311 static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
312 	&hisi_ddrc_pmu_format_group,
313 	&hisi_ddrc_pmu_events_group,
314 	&hisi_ddrc_pmu_cpumask_attr_group,
315 	NULL,
316 };
317 
318 static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
319 	.write_evtype           = hisi_ddrc_pmu_write_evtype,
320 	.get_event_idx		= hisi_ddrc_pmu_get_event_idx,
321 	.start_counters		= hisi_ddrc_pmu_start_counters,
322 	.stop_counters		= hisi_ddrc_pmu_stop_counters,
323 	.enable_counter		= hisi_ddrc_pmu_enable_counter,
324 	.disable_counter	= hisi_ddrc_pmu_disable_counter,
325 	.enable_counter_int	= hisi_ddrc_pmu_enable_counter_int,
326 	.disable_counter_int	= hisi_ddrc_pmu_disable_counter_int,
327 	.write_counter		= hisi_ddrc_pmu_write_counter,
328 	.read_counter		= hisi_ddrc_pmu_read_counter,
329 };
330 
331 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
332 				   struct hisi_pmu *ddrc_pmu)
333 {
334 	int ret;
335 
336 	ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
337 	if (ret)
338 		return ret;
339 
340 	ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
341 	if (ret)
342 		return ret;
343 
344 	ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
345 	ddrc_pmu->counter_bits = 32;
346 	ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
347 	ddrc_pmu->dev = &pdev->dev;
348 	ddrc_pmu->on_cpu = -1;
349 	ddrc_pmu->check_event = 7;
350 
351 	return 0;
352 }
353 
354 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
355 {
356 	struct hisi_pmu *ddrc_pmu;
357 	char *name;
358 	int ret;
359 
360 	ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
361 	if (!ddrc_pmu)
362 		return -ENOMEM;
363 
364 	platform_set_drvdata(pdev, ddrc_pmu);
365 
366 	ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
367 	if (ret)
368 		return ret;
369 
370 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
371 				       &ddrc_pmu->node);
372 	if (ret) {
373 		dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
374 		return ret;
375 	}
376 
377 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
378 			      ddrc_pmu->sccl_id, ddrc_pmu->index_id);
379 	ddrc_pmu->pmu = (struct pmu) {
380 		.name		= name,
381 		.module		= THIS_MODULE,
382 		.task_ctx_nr	= perf_invalid_context,
383 		.event_init	= hisi_uncore_pmu_event_init,
384 		.pmu_enable	= hisi_uncore_pmu_enable,
385 		.pmu_disable	= hisi_uncore_pmu_disable,
386 		.add		= hisi_uncore_pmu_add,
387 		.del		= hisi_uncore_pmu_del,
388 		.start		= hisi_uncore_pmu_start,
389 		.stop		= hisi_uncore_pmu_stop,
390 		.read		= hisi_uncore_pmu_read,
391 		.attr_groups	= hisi_ddrc_pmu_attr_groups,
392 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
393 	};
394 
395 	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
396 	if (ret) {
397 		dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
398 		cpuhp_state_remove_instance_nocalls(
399 			CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
400 		irq_set_affinity_hint(ddrc_pmu->irq, NULL);
401 	}
402 
403 	return ret;
404 }
405 
406 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
407 {
408 	struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
409 
410 	perf_pmu_unregister(&ddrc_pmu->pmu);
411 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
412 					    &ddrc_pmu->node);
413 	irq_set_affinity_hint(ddrc_pmu->irq, NULL);
414 
415 	return 0;
416 }
417 
418 static struct platform_driver hisi_ddrc_pmu_driver = {
419 	.driver = {
420 		.name = "hisi_ddrc_pmu",
421 		.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
422 		.suppress_bind_attrs = true,
423 	},
424 	.probe = hisi_ddrc_pmu_probe,
425 	.remove = hisi_ddrc_pmu_remove,
426 };
427 
428 static int __init hisi_ddrc_pmu_module_init(void)
429 {
430 	int ret;
431 
432 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
433 				      "AP_PERF_ARM_HISI_DDRC_ONLINE",
434 				      hisi_uncore_pmu_online_cpu,
435 				      hisi_uncore_pmu_offline_cpu);
436 	if (ret) {
437 		pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
438 		return ret;
439 	}
440 
441 	ret = platform_driver_register(&hisi_ddrc_pmu_driver);
442 	if (ret)
443 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
444 
445 	return ret;
446 }
447 module_init(hisi_ddrc_pmu_module_init);
448 
449 static void __exit hisi_ddrc_pmu_module_exit(void)
450 {
451 	platform_driver_unregister(&hisi_ddrc_pmu_driver);
452 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
453 
454 }
455 module_exit(hisi_ddrc_pmu_module_exit);
456 
457 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
458 MODULE_LICENSE("GPL v2");
459 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
460 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
461