1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
3  *
4  * Copyright (C) 2021 Marvell.
5  */
6 
7 #include <linux/init.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/perf_event.h>
14 #include <linux/hrtimer.h>
15 #include <linux/acpi.h>
16 
17 /* Performance Counters Operating Mode Control Registers */
18 #define DDRC_PERF_CNT_OP_MODE_CTRL	0x8020
19 #define OP_MODE_CTRL_VAL_MANNUAL	0x1
20 
21 /* Performance Counters Start Operation Control Registers */
22 #define DDRC_PERF_CNT_START_OP_CTRL	0x8028
23 #define START_OP_CTRL_VAL_START		0x1ULL
24 #define START_OP_CTRL_VAL_ACTIVE	0x2
25 
26 /* Performance Counters End Operation Control Registers */
27 #define DDRC_PERF_CNT_END_OP_CTRL	0x8030
28 #define END_OP_CTRL_VAL_END		0x1ULL
29 
30 /* Performance Counters End Status Registers */
31 #define DDRC_PERF_CNT_END_STATUS		0x8038
32 #define END_STATUS_VAL_END_TIMER_MODE_END	0x1
33 
34 /* Performance Counters Configuration Registers */
35 #define DDRC_PERF_CFG_BASE		0x8040
36 
37 /* 8 Generic event counter + 2 fixed event counters */
38 #define DDRC_PERF_NUM_GEN_COUNTERS	8
39 #define DDRC_PERF_NUM_FIX_COUNTERS	2
40 #define DDRC_PERF_READ_COUNTER_IDX	DDRC_PERF_NUM_GEN_COUNTERS
41 #define DDRC_PERF_WRITE_COUNTER_IDX	(DDRC_PERF_NUM_GEN_COUNTERS + 1)
42 #define DDRC_PERF_NUM_COUNTERS		(DDRC_PERF_NUM_GEN_COUNTERS + \
43 					 DDRC_PERF_NUM_FIX_COUNTERS)
44 
45 /* Generic event counter registers */
46 #define DDRC_PERF_CFG(n)		(DDRC_PERF_CFG_BASE + 8 * (n))
47 #define EVENT_ENABLE			BIT_ULL(63)
48 
49 /* Two dedicated event counters for DDR reads and writes */
50 #define EVENT_DDR_READS			101
51 #define EVENT_DDR_WRITES		100
52 
53 /*
54  * programmable events IDs in programmable event counters.
55  * DO NOT change these event-id numbers, they are used to
56  * program event bitmap in h/w.
57  */
58 #define EVENT_OP_IS_ZQLATCH			55
59 #define EVENT_OP_IS_ZQSTART			54
60 #define EVENT_OP_IS_TCR_MRR			53
61 #define EVENT_OP_IS_DQSOSC_MRR			52
62 #define EVENT_OP_IS_DQSOSC_MPC			51
63 #define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR	50
64 #define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD	49
65 #define EVENT_BSM_STARVATION			48
66 #define EVENT_BSM_ALLOC				47
67 #define EVENT_LPR_REQ_WITH_NOCREDIT		46
68 #define EVENT_HPR_REQ_WITH_NOCREDIT		45
69 #define EVENT_OP_IS_ZQCS			44
70 #define EVENT_OP_IS_ZQCL			43
71 #define EVENT_OP_IS_LOAD_MODE			42
72 #define EVENT_OP_IS_SPEC_REF			41
73 #define EVENT_OP_IS_CRIT_REF			40
74 #define EVENT_OP_IS_REFRESH			39
75 #define EVENT_OP_IS_ENTER_MPSM			35
76 #define EVENT_OP_IS_ENTER_POWERDOWN		31
77 #define EVENT_OP_IS_ENTER_SELFREF		27
78 #define EVENT_WAW_HAZARD			26
79 #define EVENT_RAW_HAZARD			25
80 #define EVENT_WAR_HAZARD			24
81 #define EVENT_WRITE_COMBINE			23
82 #define EVENT_RDWR_TRANSITIONS			22
83 #define EVENT_PRECHARGE_FOR_OTHER		21
84 #define EVENT_PRECHARGE_FOR_RDWR		20
85 #define EVENT_OP_IS_PRECHARGE			19
86 #define EVENT_OP_IS_MWR				18
87 #define EVENT_OP_IS_WR				17
88 #define EVENT_OP_IS_RD				16
89 #define EVENT_OP_IS_RD_ACTIVATE			15
90 #define EVENT_OP_IS_RD_OR_WR			14
91 #define EVENT_OP_IS_ACTIVATE			13
92 #define EVENT_WR_XACT_WHEN_CRITICAL		12
93 #define EVENT_LPR_XACT_WHEN_CRITICAL		11
94 #define EVENT_HPR_XACT_WHEN_CRITICAL		10
95 #define EVENT_DFI_RD_DATA_CYCLES		9
96 #define EVENT_DFI_WR_DATA_CYCLES		8
97 #define EVENT_ACT_BYPASS			7
98 #define EVENT_READ_BYPASS			6
99 #define EVENT_HIF_HI_PRI_RD			5
100 #define EVENT_HIF_RMW				4
101 #define EVENT_HIF_RD				3
102 #define EVENT_HIF_WR				2
103 #define EVENT_HIF_RD_OR_WR			1
104 
105 /* Event counter value registers */
106 #define DDRC_PERF_CNT_VALUE_BASE		0x8080
107 #define DDRC_PERF_CNT_VALUE(n)	(DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
108 
109 /* Fixed event counter enable/disable register */
110 #define DDRC_PERF_CNT_FREERUN_EN	0x80C0
111 #define DDRC_PERF_FREERUN_WRITE_EN	0x1
112 #define DDRC_PERF_FREERUN_READ_EN	0x2
113 
114 /* Fixed event counter control register */
115 #define DDRC_PERF_CNT_FREERUN_CTRL	0x80C8
116 #define DDRC_FREERUN_WRITE_CNT_CLR	0x1
117 #define DDRC_FREERUN_READ_CNT_CLR	0x2
118 
119 /* Fixed event counter value register */
120 #define DDRC_PERF_CNT_VALUE_WR_OP	0x80D0
121 #define DDRC_PERF_CNT_VALUE_RD_OP	0x80D8
122 #define DDRC_PERF_CNT_VALUE_OVERFLOW	BIT_ULL(48)
123 #define DDRC_PERF_CNT_MAX_VALUE		GENMASK_ULL(48, 0)
124 
125 struct cn10k_ddr_pmu {
126 	struct pmu pmu;
127 	void __iomem *base;
128 	unsigned int cpu;
129 	struct	device *dev;
130 	int active_events;
131 	struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
132 	struct hrtimer hrtimer;
133 	struct hlist_node node;
134 };
135 
136 #define to_cn10k_ddr_pmu(p)	container_of(p, struct cn10k_ddr_pmu, pmu)
137 
138 static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
139 					struct device_attribute *attr,
140 					char *page)
141 {
142 	struct perf_pmu_events_attr *pmu_attr;
143 
144 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
145 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
146 
147 }
148 
149 #define CN10K_DDR_PMU_EVENT_ATTR(_name, _id)				     \
150 	PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id)
151 
152 static struct attribute *cn10k_ddr_perf_events_attrs[] = {
153 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
154 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
155 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
156 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
157 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
158 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
159 	CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
160 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
161 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
162 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
163 					EVENT_HPR_XACT_WHEN_CRITICAL),
164 	CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
165 					EVENT_LPR_XACT_WHEN_CRITICAL),
166 	CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
167 					EVENT_WR_XACT_WHEN_CRITICAL),
168 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
169 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
170 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
171 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
172 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
173 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
174 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
175 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
176 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
177 					EVENT_PRECHARGE_FOR_OTHER),
178 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
179 	CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
180 	CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
181 	CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
182 	CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
183 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
184 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
185 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
186 	CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
187 	CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
188 	CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
189 	CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
190 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
191 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
192 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
193 					EVENT_HPR_REQ_WITH_NOCREDIT),
194 	CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
195 					EVENT_LPR_REQ_WITH_NOCREDIT),
196 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
197 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
198 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
199 					EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
200 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
201 					EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
202 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
203 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
204 	CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
205 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
206 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
207 	/* Free run event counters */
208 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
209 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
210 	NULL
211 };
212 
213 static struct attribute_group cn10k_ddr_perf_events_attr_group = {
214 	.name = "events",
215 	.attrs = cn10k_ddr_perf_events_attrs,
216 };
217 
218 PMU_FORMAT_ATTR(event, "config:0-8");
219 
220 static struct attribute *cn10k_ddr_perf_format_attrs[] = {
221 	&format_attr_event.attr,
222 	NULL,
223 };
224 
225 static struct attribute_group cn10k_ddr_perf_format_attr_group = {
226 	.name = "format",
227 	.attrs = cn10k_ddr_perf_format_attrs,
228 };
229 
230 static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
231 					   struct device_attribute *attr,
232 					   char *buf)
233 {
234 	struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
235 
236 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
237 }
238 
239 static struct device_attribute cn10k_ddr_perf_cpumask_attr =
240 	__ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
241 
242 static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
243 	&cn10k_ddr_perf_cpumask_attr.attr,
244 	NULL,
245 };
246 
247 static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
248 	.attrs = cn10k_ddr_perf_cpumask_attrs,
249 };
250 
251 static const struct attribute_group *cn10k_attr_groups[] = {
252 	&cn10k_ddr_perf_events_attr_group,
253 	&cn10k_ddr_perf_format_attr_group,
254 	&cn10k_ddr_perf_cpumask_attr_group,
255 	NULL,
256 };
257 
258 /* Default poll timeout is 100 sec, which is very sufficient for
259  * 48 bit counter incremented max at 5.6 GT/s, which may take many
260  * hours to overflow.
261  */
262 static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
263 module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
264 
265 static ktime_t cn10k_ddr_pmu_timer_period(void)
266 {
267 	return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
268 }
269 
270 static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
271 {
272 	switch (eventid) {
273 	case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
274 	case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
275 		*event_bitmap = (1ULL << (eventid - 1));
276 		break;
277 	case EVENT_OP_IS_ENTER_SELFREF:
278 	case EVENT_OP_IS_ENTER_POWERDOWN:
279 	case EVENT_OP_IS_ENTER_MPSM:
280 		*event_bitmap = (0xFULL << (eventid - 1));
281 		break;
282 	default:
283 		pr_err("%s Invalid eventid %d\n", __func__, eventid);
284 		return -EINVAL;
285 	}
286 
287 	return 0;
288 }
289 
290 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
291 					struct perf_event *event)
292 {
293 	u8 config = event->attr.config;
294 	int i;
295 
296 	/* DDR read free-run counter index */
297 	if (config == EVENT_DDR_READS) {
298 		pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
299 		return DDRC_PERF_READ_COUNTER_IDX;
300 	}
301 
302 	/* DDR write free-run counter index */
303 	if (config == EVENT_DDR_WRITES) {
304 		pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
305 		return DDRC_PERF_WRITE_COUNTER_IDX;
306 	}
307 
308 	/* Allocate DDR generic counters */
309 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
310 		if (pmu->events[i] == NULL) {
311 			pmu->events[i] = event;
312 			return i;
313 		}
314 	}
315 
316 	return -ENOENT;
317 }
318 
319 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
320 {
321 	pmu->events[counter] = NULL;
322 }
323 
324 static int cn10k_ddr_perf_event_init(struct perf_event *event)
325 {
326 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
327 	struct hw_perf_event *hwc = &event->hw;
328 
329 	if (event->attr.type != event->pmu->type)
330 		return -ENOENT;
331 
332 	if (is_sampling_event(event)) {
333 		dev_info(pmu->dev, "Sampling not supported!\n");
334 		return -EOPNOTSUPP;
335 	}
336 
337 	if (event->cpu < 0) {
338 		dev_warn(pmu->dev, "Can't provide per-task data!\n");
339 		return -EOPNOTSUPP;
340 	}
341 
342 	/*  We must NOT create groups containing mixed PMUs */
343 	if (event->group_leader->pmu != event->pmu &&
344 	    !is_software_event(event->group_leader))
345 		return -EINVAL;
346 
347 	/* Set ownership of event to one CPU, same event can not be observed
348 	 * on multiple cpus at same time.
349 	 */
350 	event->cpu = pmu->cpu;
351 	hwc->idx = -1;
352 	return 0;
353 }
354 
355 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
356 					  int counter, bool enable)
357 {
358 	u32 reg;
359 	u64 val;
360 
361 	if (counter > DDRC_PERF_NUM_COUNTERS) {
362 		pr_err("Error: unsupported counter %d\n", counter);
363 		return;
364 	}
365 
366 	if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
367 		reg = DDRC_PERF_CFG(counter);
368 		val = readq_relaxed(pmu->base + reg);
369 
370 		if (enable)
371 			val |= EVENT_ENABLE;
372 		else
373 			val &= ~EVENT_ENABLE;
374 
375 		writeq_relaxed(val, pmu->base + reg);
376 	} else {
377 		val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
378 		if (enable) {
379 			if (counter == DDRC_PERF_READ_COUNTER_IDX)
380 				val |= DDRC_PERF_FREERUN_READ_EN;
381 			else
382 				val |= DDRC_PERF_FREERUN_WRITE_EN;
383 		} else {
384 			if (counter == DDRC_PERF_READ_COUNTER_IDX)
385 				val &= ~DDRC_PERF_FREERUN_READ_EN;
386 			else
387 				val &= ~DDRC_PERF_FREERUN_WRITE_EN;
388 		}
389 		writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
390 	}
391 }
392 
393 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
394 {
395 	u64 val;
396 
397 	if (counter == DDRC_PERF_READ_COUNTER_IDX)
398 		return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
399 
400 	if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
401 		return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
402 
403 	val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
404 	return val;
405 }
406 
407 static void cn10k_ddr_perf_event_update(struct perf_event *event)
408 {
409 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
410 	struct hw_perf_event *hwc = &event->hw;
411 	u64 prev_count, new_count, mask;
412 
413 	do {
414 		prev_count = local64_read(&hwc->prev_count);
415 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
416 	} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
417 
418 	mask = DDRC_PERF_CNT_MAX_VALUE;
419 
420 	local64_add((new_count - prev_count) & mask, &event->count);
421 }
422 
423 static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
424 {
425 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
426 	struct hw_perf_event *hwc = &event->hw;
427 	int counter = hwc->idx;
428 
429 	local64_set(&hwc->prev_count, 0);
430 
431 	cn10k_ddr_perf_counter_enable(pmu, counter, true);
432 
433 	hwc->state = 0;
434 }
435 
436 static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
437 {
438 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
439 	struct hw_perf_event *hwc = &event->hw;
440 	u8 config = event->attr.config;
441 	int counter, ret;
442 	u32 reg_offset;
443 	u64 val;
444 
445 	counter = cn10k_ddr_perf_alloc_counter(pmu, event);
446 	if (counter < 0)
447 		return -EAGAIN;
448 
449 	pmu->active_events++;
450 	hwc->idx = counter;
451 
452 	if (pmu->active_events == 1)
453 		hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
454 			      HRTIMER_MODE_REL_PINNED);
455 
456 	if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
457 		/* Generic counters, configure event id */
458 		reg_offset = DDRC_PERF_CFG(counter);
459 		ret = ddr_perf_get_event_bitmap(config, &val);
460 		if (ret)
461 			return ret;
462 
463 		writeq_relaxed(val, pmu->base + reg_offset);
464 	} else {
465 		/* fixed event counter, clear counter value */
466 		if (counter == DDRC_PERF_READ_COUNTER_IDX)
467 			val = DDRC_FREERUN_READ_CNT_CLR;
468 		else
469 			val = DDRC_FREERUN_WRITE_CNT_CLR;
470 
471 		writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
472 	}
473 
474 	hwc->state |= PERF_HES_STOPPED;
475 
476 	if (flags & PERF_EF_START)
477 		cn10k_ddr_perf_event_start(event, flags);
478 
479 	return 0;
480 }
481 
482 static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
483 {
484 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
485 	struct hw_perf_event *hwc = &event->hw;
486 	int counter = hwc->idx;
487 
488 	cn10k_ddr_perf_counter_enable(pmu, counter, false);
489 
490 	if (flags & PERF_EF_UPDATE)
491 		cn10k_ddr_perf_event_update(event);
492 
493 	hwc->state |= PERF_HES_STOPPED;
494 }
495 
496 static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
497 {
498 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
499 	struct hw_perf_event *hwc = &event->hw;
500 	int counter = hwc->idx;
501 
502 	cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
503 
504 	cn10k_ddr_perf_free_counter(pmu, counter);
505 	pmu->active_events--;
506 	hwc->idx = -1;
507 
508 	/* Cancel timer when no events to capture */
509 	if (pmu->active_events == 0)
510 		hrtimer_cancel(&pmu->hrtimer);
511 }
512 
513 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
514 {
515 	struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
516 
517 	writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
518 		       DDRC_PERF_CNT_START_OP_CTRL);
519 }
520 
521 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
522 {
523 	struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
524 
525 	writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
526 		       DDRC_PERF_CNT_END_OP_CTRL);
527 }
528 
529 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
530 {
531 	struct hw_perf_event *hwc;
532 	int i;
533 
534 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
535 		if (pmu->events[i] == NULL)
536 			continue;
537 
538 		cn10k_ddr_perf_event_update(pmu->events[i]);
539 	}
540 
541 	/* Reset previous count as h/w counter are reset */
542 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
543 		if (pmu->events[i] == NULL)
544 			continue;
545 
546 		hwc = &pmu->events[i]->hw;
547 		local64_set(&hwc->prev_count, 0);
548 	}
549 }
550 
551 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
552 {
553 	struct perf_event *event;
554 	struct hw_perf_event *hwc;
555 	u64 prev_count, new_count;
556 	u64 value;
557 	int i;
558 
559 	event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
560 	if (event) {
561 		hwc = &event->hw;
562 		prev_count = local64_read(&hwc->prev_count);
563 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
564 
565 		/* Overflow condition is when new count less than
566 		 * previous count
567 		 */
568 		if (new_count < prev_count)
569 			cn10k_ddr_perf_event_update(event);
570 	}
571 
572 	event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
573 	if (event) {
574 		hwc = &event->hw;
575 		prev_count = local64_read(&hwc->prev_count);
576 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
577 
578 		/* Overflow condition is when new count less than
579 		 * previous count
580 		 */
581 		if (new_count < prev_count)
582 			cn10k_ddr_perf_event_update(event);
583 	}
584 
585 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
586 		if (pmu->events[i] == NULL)
587 			continue;
588 
589 		value = cn10k_ddr_perf_read_counter(pmu, i);
590 		if (value == DDRC_PERF_CNT_MAX_VALUE) {
591 			pr_info("Counter-(%d) reached max value\n", i);
592 			cn10k_ddr_perf_event_update_all(pmu);
593 			cn10k_ddr_perf_pmu_disable(&pmu->pmu);
594 			cn10k_ddr_perf_pmu_enable(&pmu->pmu);
595 		}
596 	}
597 
598 	return IRQ_HANDLED;
599 }
600 
601 static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
602 {
603 	struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
604 						 hrtimer);
605 	unsigned long flags;
606 
607 	local_irq_save(flags);
608 	cn10k_ddr_pmu_overflow_handler(pmu);
609 	local_irq_restore(flags);
610 
611 	hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
612 	return HRTIMER_RESTART;
613 }
614 
615 static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
616 {
617 	struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
618 						     node);
619 	unsigned int target;
620 
621 	if (cpu != pmu->cpu)
622 		return 0;
623 
624 	target = cpumask_any_but(cpu_online_mask, cpu);
625 	if (target >= nr_cpu_ids)
626 		return 0;
627 
628 	perf_pmu_migrate_context(&pmu->pmu, cpu, target);
629 	pmu->cpu = target;
630 	return 0;
631 }
632 
633 static int cn10k_ddr_perf_probe(struct platform_device *pdev)
634 {
635 	struct cn10k_ddr_pmu *ddr_pmu;
636 	struct resource *res;
637 	void __iomem *base;
638 	char *name;
639 	int ret;
640 
641 	ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
642 	if (!ddr_pmu)
643 		return -ENOMEM;
644 
645 	ddr_pmu->dev = &pdev->dev;
646 	platform_set_drvdata(pdev, ddr_pmu);
647 
648 	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
649 	if (IS_ERR(base))
650 		return PTR_ERR(base);
651 
652 	ddr_pmu->base = base;
653 
654 	/* Setup the PMU counter to work in manual mode */
655 	writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
656 		       DDRC_PERF_CNT_OP_MODE_CTRL);
657 
658 	ddr_pmu->pmu = (struct pmu) {
659 		.module	      = THIS_MODULE,
660 		.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
661 		.task_ctx_nr = perf_invalid_context,
662 		.attr_groups = cn10k_attr_groups,
663 		.event_init  = cn10k_ddr_perf_event_init,
664 		.add	     = cn10k_ddr_perf_event_add,
665 		.del	     = cn10k_ddr_perf_event_del,
666 		.start	     = cn10k_ddr_perf_event_start,
667 		.stop	     = cn10k_ddr_perf_event_stop,
668 		.read	     = cn10k_ddr_perf_event_update,
669 		.pmu_enable  = cn10k_ddr_perf_pmu_enable,
670 		.pmu_disable = cn10k_ddr_perf_pmu_disable,
671 	};
672 
673 	/* Choose this cpu to collect perf data */
674 	ddr_pmu->cpu = raw_smp_processor_id();
675 
676 	name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx",
677 			      res->start);
678 	if (!name)
679 		return -ENOMEM;
680 
681 	hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
682 	ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
683 
684 	cpuhp_state_add_instance_nocalls(
685 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
686 				&ddr_pmu->node);
687 
688 	ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
689 	if (ret)
690 		goto error;
691 
692 	pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
693 	return 0;
694 error:
695 	cpuhp_state_remove_instance_nocalls(
696 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
697 				&ddr_pmu->node);
698 	return ret;
699 }
700 
701 static int cn10k_ddr_perf_remove(struct platform_device *pdev)
702 {
703 	struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
704 
705 	cpuhp_state_remove_instance_nocalls(
706 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
707 				&ddr_pmu->node);
708 
709 	perf_pmu_unregister(&ddr_pmu->pmu);
710 	return 0;
711 }
712 
713 #ifdef CONFIG_OF
714 static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
715 	{ .compatible = "marvell,cn10k-ddr-pmu", },
716 	{ },
717 };
718 MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
719 #endif
720 
721 #ifdef CONFIG_ACPI
722 static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
723 	{"MRVL000A", 0},
724 	{},
725 };
726 MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
727 #endif
728 
729 static struct platform_driver cn10k_ddr_pmu_driver = {
730 	.driver	= {
731 		.name   = "cn10k-ddr-pmu",
732 		.of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
733 		.acpi_match_table  = ACPI_PTR(cn10k_ddr_pmu_acpi_match),
734 		.suppress_bind_attrs = true,
735 	},
736 	.probe		= cn10k_ddr_perf_probe,
737 	.remove		= cn10k_ddr_perf_remove,
738 };
739 
740 static int __init cn10k_ddr_pmu_init(void)
741 {
742 	int ret;
743 
744 	ret = cpuhp_setup_state_multi(
745 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
746 				"perf/marvell/cn10k/ddr:online", NULL,
747 				cn10k_ddr_pmu_offline_cpu);
748 	if (ret)
749 		return ret;
750 
751 	ret = platform_driver_register(&cn10k_ddr_pmu_driver);
752 	if (ret)
753 		cpuhp_remove_multi_state(
754 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
755 	return ret;
756 }
757 
758 static void __exit cn10k_ddr_pmu_exit(void)
759 {
760 	platform_driver_unregister(&cn10k_ddr_pmu_driver);
761 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
762 }
763 
764 module_init(cn10k_ddr_pmu_init);
765 module_exit(cn10k_ddr_pmu_exit);
766 
767 MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
768 MODULE_LICENSE("GPL v2");
769