xref: /openbmc/linux/drivers/bus/arm-cci.c (revision 801f33be)
1ed69bdd8SLorenzo Pieralisi /*
2ed69bdd8SLorenzo Pieralisi  * CCI cache coherent interconnect driver
3ed69bdd8SLorenzo Pieralisi  *
4ed69bdd8SLorenzo Pieralisi  * Copyright (C) 2013 ARM Ltd.
5ed69bdd8SLorenzo Pieralisi  * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6ed69bdd8SLorenzo Pieralisi  *
7ed69bdd8SLorenzo Pieralisi  * This program is free software; you can redistribute it and/or modify
8ed69bdd8SLorenzo Pieralisi  * it under the terms of the GNU General Public License version 2 as
9ed69bdd8SLorenzo Pieralisi  * published by the Free Software Foundation.
10ed69bdd8SLorenzo Pieralisi  *
11ed69bdd8SLorenzo Pieralisi  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12ed69bdd8SLorenzo Pieralisi  * kind, whether express or implied; without even the implied warranty
13ed69bdd8SLorenzo Pieralisi  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14ed69bdd8SLorenzo Pieralisi  * GNU General Public License for more details.
15ed69bdd8SLorenzo Pieralisi  */
16ed69bdd8SLorenzo Pieralisi 
17ed69bdd8SLorenzo Pieralisi #include <linux/arm-cci.h>
18ed69bdd8SLorenzo Pieralisi #include <linux/io.h>
19c6f85cb4SMark Rutland #include <linux/interrupt.h>
20ed69bdd8SLorenzo Pieralisi #include <linux/module.h>
21ed69bdd8SLorenzo Pieralisi #include <linux/of_address.h>
22b91c8f28SPunit Agrawal #include <linux/of_irq.h>
23b91c8f28SPunit Agrawal #include <linux/of_platform.h>
24c6f85cb4SMark Rutland #include <linux/perf_event.h>
25b91c8f28SPunit Agrawal #include <linux/platform_device.h>
26ed69bdd8SLorenzo Pieralisi #include <linux/slab.h>
27b91c8f28SPunit Agrawal #include <linux/spinlock.h>
28ed69bdd8SLorenzo Pieralisi 
29ed69bdd8SLorenzo Pieralisi #include <asm/cacheflush.h>
30ed69bdd8SLorenzo Pieralisi #include <asm/smp_plat.h>
31ed69bdd8SLorenzo Pieralisi 
32f6b9e83cSSuzuki K. Poulose static void __iomem *cci_ctrl_base;
33f6b9e83cSSuzuki K. Poulose static unsigned long cci_ctrl_phys;
34ed69bdd8SLorenzo Pieralisi 
35ee8e5d5fSSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PORT_CTRL
36ed69bdd8SLorenzo Pieralisi struct cci_nb_ports {
37ed69bdd8SLorenzo Pieralisi 	unsigned int nb_ace;
38ed69bdd8SLorenzo Pieralisi 	unsigned int nb_ace_lite;
39ed69bdd8SLorenzo Pieralisi };
40ed69bdd8SLorenzo Pieralisi 
41f6b9e83cSSuzuki K. Poulose static const struct cci_nb_ports cci400_ports = {
42f6b9e83cSSuzuki K. Poulose 	.nb_ace = 2,
43f6b9e83cSSuzuki K. Poulose 	.nb_ace_lite = 3
44ed69bdd8SLorenzo Pieralisi };
45ed69bdd8SLorenzo Pieralisi 
46ee8e5d5fSSuzuki K. Poulose #define CCI400_PORTS_DATA	(&cci400_ports)
47ee8e5d5fSSuzuki K. Poulose #else
48ee8e5d5fSSuzuki K. Poulose #define CCI400_PORTS_DATA	(NULL)
49ee8e5d5fSSuzuki K. Poulose #endif
50ee8e5d5fSSuzuki K. Poulose 
51f6b9e83cSSuzuki K. Poulose static const struct of_device_id arm_cci_matches[] = {
52ee8e5d5fSSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_COMMON
53ee8e5d5fSSuzuki K. Poulose 	{.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
54ee8e5d5fSSuzuki K. Poulose #endif
553d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
56a95791efSSuzuki K. Poulose 	{ .compatible = "arm,cci-500", },
57d7dd5fd7SSuzuki K Poulose 	{ .compatible = "arm,cci-550", },
58a95791efSSuzuki K. Poulose #endif
59f6b9e83cSSuzuki K. Poulose 	{},
60ed69bdd8SLorenzo Pieralisi };
61ed69bdd8SLorenzo Pieralisi 
62f4d58938SSuzuki K. Poulose #ifdef CONFIG_ARM_CCI_PMU
63b91c8f28SPunit Agrawal 
64f4d58938SSuzuki K. Poulose #define DRIVER_NAME		"ARM-CCI"
65f6b9e83cSSuzuki K. Poulose #define DRIVER_NAME_PMU		DRIVER_NAME " PMU"
66f6b9e83cSSuzuki K. Poulose 
67b91c8f28SPunit Agrawal #define CCI_PMCR		0x0100
68b91c8f28SPunit Agrawal #define CCI_PID2		0x0fe8
69b91c8f28SPunit Agrawal 
70b91c8f28SPunit Agrawal #define CCI_PMCR_CEN		0x00000001
71b91c8f28SPunit Agrawal #define CCI_PMCR_NCNT_MASK	0x0000f800
72b91c8f28SPunit Agrawal #define CCI_PMCR_NCNT_SHIFT	11
73b91c8f28SPunit Agrawal 
74b91c8f28SPunit Agrawal #define CCI_PID2_REV_MASK	0xf0
75b91c8f28SPunit Agrawal #define CCI_PID2_REV_SHIFT	4
76b91c8f28SPunit Agrawal 
77f6b9e83cSSuzuki K. Poulose #define CCI_PMU_EVT_SEL		0x000
78f6b9e83cSSuzuki K. Poulose #define CCI_PMU_CNTR		0x004
79f6b9e83cSSuzuki K. Poulose #define CCI_PMU_CNTR_CTRL	0x008
80f6b9e83cSSuzuki K. Poulose #define CCI_PMU_OVRFLW		0x00c
81f6b9e83cSSuzuki K. Poulose 
82f6b9e83cSSuzuki K. Poulose #define CCI_PMU_OVRFLW_FLAG	1
83f6b9e83cSSuzuki K. Poulose 
84ab5b316dSSuzuki K. Poulose #define CCI_PMU_CNTR_SIZE(model)	((model)->cntr_size)
85ab5b316dSSuzuki K. Poulose #define CCI_PMU_CNTR_BASE(model, idx)	((idx) * CCI_PMU_CNTR_SIZE(model))
86f6b9e83cSSuzuki K. Poulose #define CCI_PMU_CNTR_MASK		((1ULL << 32) -1)
87ab5b316dSSuzuki K. Poulose #define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)
88f6b9e83cSSuzuki K. Poulose 
89ab5b316dSSuzuki K. Poulose #define CCI_PMU_MAX_HW_CNTRS(model) \
90ab5b316dSSuzuki K. Poulose 	((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
91f6b9e83cSSuzuki K. Poulose 
92fc17c839SSuzuki K. Poulose /* Types of interfaces that can generate events */
93fc17c839SSuzuki K. Poulose enum {
94fc17c839SSuzuki K. Poulose 	CCI_IF_SLAVE,
95fc17c839SSuzuki K. Poulose 	CCI_IF_MASTER,
963d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
97a95791efSSuzuki K. Poulose 	CCI_IF_GLOBAL,
98a95791efSSuzuki K. Poulose #endif
99fc17c839SSuzuki K. Poulose 	CCI_IF_MAX,
100fc17c839SSuzuki K. Poulose };
101fc17c839SSuzuki K. Poulose 
102fc17c839SSuzuki K. Poulose struct event_range {
103fc17c839SSuzuki K. Poulose 	u32 min;
104fc17c839SSuzuki K. Poulose 	u32 max;
105fc17c839SSuzuki K. Poulose };
106fc17c839SSuzuki K. Poulose 
107f6b9e83cSSuzuki K. Poulose struct cci_pmu_hw_events {
108ab5b316dSSuzuki K. Poulose 	struct perf_event **events;
109ab5b316dSSuzuki K. Poulose 	unsigned long *used_mask;
110f6b9e83cSSuzuki K. Poulose 	raw_spinlock_t pmu_lock;
111f6b9e83cSSuzuki K. Poulose };
112f6b9e83cSSuzuki K. Poulose 
11331216290SSuzuki K. Poulose struct cci_pmu;
114ab5b316dSSuzuki K. Poulose /*
115ab5b316dSSuzuki K. Poulose  * struct cci_pmu_model:
116ab5b316dSSuzuki K. Poulose  * @fixed_hw_cntrs - Number of fixed event counters
117ab5b316dSSuzuki K. Poulose  * @num_hw_cntrs - Maximum number of programmable event counters
118ab5b316dSSuzuki K. Poulose  * @cntr_size - Size of an event counter mapping
119ab5b316dSSuzuki K. Poulose  */
120fc17c839SSuzuki K. Poulose struct cci_pmu_model {
121fc17c839SSuzuki K. Poulose 	char *name;
122ab5b316dSSuzuki K. Poulose 	u32 fixed_hw_cntrs;
123ab5b316dSSuzuki K. Poulose 	u32 num_hw_cntrs;
124ab5b316dSSuzuki K. Poulose 	u32 cntr_size;
1255e442ebaSMark Rutland 	struct attribute **format_attrs;
1265e442ebaSMark Rutland 	struct attribute **event_attrs;
127fc17c839SSuzuki K. Poulose 	struct event_range event_ranges[CCI_IF_MAX];
12831216290SSuzuki K. Poulose 	int (*validate_hw_event)(struct cci_pmu *, unsigned long);
12931216290SSuzuki K. Poulose 	int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
130fff3f1a0SSuzuki K Poulose 	void (*write_counters)(struct cci_pmu *, unsigned long *);
131fc17c839SSuzuki K. Poulose };
132fc17c839SSuzuki K. Poulose 
133fc17c839SSuzuki K. Poulose static struct cci_pmu_model cci_pmu_models[];
134fc17c839SSuzuki K. Poulose 
135f6b9e83cSSuzuki K. Poulose struct cci_pmu {
136f6b9e83cSSuzuki K. Poulose 	void __iomem *base;
137f6b9e83cSSuzuki K. Poulose 	struct pmu pmu;
138f6b9e83cSSuzuki K. Poulose 	int nr_irqs;
139ab5b316dSSuzuki K. Poulose 	int *irqs;
140f6b9e83cSSuzuki K. Poulose 	unsigned long active_irqs;
141fc17c839SSuzuki K. Poulose 	const struct cci_pmu_model *model;
142f6b9e83cSSuzuki K. Poulose 	struct cci_pmu_hw_events hw_events;
143f6b9e83cSSuzuki K. Poulose 	struct platform_device *plat_device;
144ab5b316dSSuzuki K. Poulose 	int num_cntrs;
145f6b9e83cSSuzuki K. Poulose 	atomic_t active_events;
146f6b9e83cSSuzuki K. Poulose 	struct mutex reserve_mutex;
147b230f0dbSSebastian Andrzej Siewior 	struct hlist_node node;
148f6b9e83cSSuzuki K. Poulose 	cpumask_t cpus;
149f6b9e83cSSuzuki K. Poulose };
150f6b9e83cSSuzuki K. Poulose 
151f6b9e83cSSuzuki K. Poulose #define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
152f6b9e83cSSuzuki K. Poulose 
153f4d58938SSuzuki K. Poulose enum cci_models {
154f4d58938SSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PMU
155f4d58938SSuzuki K. Poulose 	CCI400_R0,
156f4d58938SSuzuki K. Poulose 	CCI400_R1,
157f4d58938SSuzuki K. Poulose #endif
1583d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
159a95791efSSuzuki K. Poulose 	CCI500_R0,
160d7dd5fd7SSuzuki K Poulose 	CCI550_R0,
161a95791efSSuzuki K. Poulose #endif
162f4d58938SSuzuki K. Poulose 	CCI_MODEL_MAX
163f4d58938SSuzuki K. Poulose };
164b91c8f28SPunit Agrawal 
165c66eea5fSSuzuki K Poulose static void pmu_write_counters(struct cci_pmu *cci_pmu,
166c66eea5fSSuzuki K Poulose 				 unsigned long *mask);
167e14cfad3SSuzuki K. Poulose static ssize_t cci_pmu_format_show(struct device *dev,
168e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf);
169e14cfad3SSuzuki K. Poulose static ssize_t cci_pmu_event_show(struct device *dev,
170e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf);
171e14cfad3SSuzuki K. Poulose 
172e14cfad3SSuzuki K. Poulose #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) 				\
1735e442ebaSMark Rutland 	&((struct dev_ext_attribute[]) {					\
1745e442ebaSMark Rutland 		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }	\
1755e442ebaSMark Rutland 	})[0].attr.attr
176e14cfad3SSuzuki K. Poulose 
177e14cfad3SSuzuki K. Poulose #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
178e14cfad3SSuzuki K. Poulose 	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
179e14cfad3SSuzuki K. Poulose #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
180e14cfad3SSuzuki K. Poulose 	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
181e14cfad3SSuzuki K. Poulose 
182f4d58938SSuzuki K. Poulose /* CCI400 PMU Specific definitions */
183f4d58938SSuzuki K. Poulose 
184f4d58938SSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PMU
185f4d58938SSuzuki K. Poulose 
186f4d58938SSuzuki K. Poulose /* Port ids */
187f4d58938SSuzuki K. Poulose #define CCI400_PORT_S0		0
188f4d58938SSuzuki K. Poulose #define CCI400_PORT_S1		1
189f4d58938SSuzuki K. Poulose #define CCI400_PORT_S2		2
190f4d58938SSuzuki K. Poulose #define CCI400_PORT_S3		3
191f4d58938SSuzuki K. Poulose #define CCI400_PORT_S4		4
192f4d58938SSuzuki K. Poulose #define CCI400_PORT_M0		5
193f4d58938SSuzuki K. Poulose #define CCI400_PORT_M1		6
194f4d58938SSuzuki K. Poulose #define CCI400_PORT_M2		7
195f4d58938SSuzuki K. Poulose 
196f4d58938SSuzuki K. Poulose #define CCI400_R1_PX		5
197b91c8f28SPunit Agrawal 
198b91c8f28SPunit Agrawal /*
199b91c8f28SPunit Agrawal  * Instead of an event id to monitor CCI cycles, a dedicated counter is
200b91c8f28SPunit Agrawal  * provided. Use 0xff to represent CCI cycles and hope that no future revisions
201b91c8f28SPunit Agrawal  * make use of this event in hardware.
202b91c8f28SPunit Agrawal  */
203b91c8f28SPunit Agrawal enum cci400_perf_events {
204f4d58938SSuzuki K. Poulose 	CCI400_PMU_CYCLES = 0xff
205b91c8f28SPunit Agrawal };
206b91c8f28SPunit Agrawal 
207f4d58938SSuzuki K. Poulose #define CCI400_PMU_CYCLE_CNTR_IDX	0
208f4d58938SSuzuki K. Poulose #define CCI400_PMU_CNTR0_IDX		1
209b91c8f28SPunit Agrawal 
210b91c8f28SPunit Agrawal /*
211b91c8f28SPunit Agrawal  * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
212b91c8f28SPunit Agrawal  * ports and bits 4:0 are event codes. There are different event codes
213b91c8f28SPunit Agrawal  * associated with each port type.
214b91c8f28SPunit Agrawal  *
215b91c8f28SPunit Agrawal  * Additionally, the range of events associated with the port types changed
216b91c8f28SPunit Agrawal  * between Rev0 and Rev1.
217b91c8f28SPunit Agrawal  *
218b91c8f28SPunit Agrawal  * The constants below define the range of valid codes for each port type for
219b91c8f28SPunit Agrawal  * the different revisions and are used to validate the event to be monitored.
220b91c8f28SPunit Agrawal  */
221b91c8f28SPunit Agrawal 
222f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_MASK		0xffUL
223f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_SOURCE_SHIFT	5
224f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_SOURCE_MASK	0x7
225f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_CODE_SHIFT	0
226f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_CODE_MASK	0x1f
227f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_SOURCE(event) \
228f4d58938SSuzuki K. Poulose 	((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
229f4d58938SSuzuki K. Poulose 			CCI400_PMU_EVENT_SOURCE_MASK)
230f4d58938SSuzuki K. Poulose #define CCI400_PMU_EVENT_CODE(event) \
231f4d58938SSuzuki K. Poulose 	((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
232b91c8f28SPunit Agrawal 
233f4d58938SSuzuki K. Poulose #define CCI400_R0_SLAVE_PORT_MIN_EV	0x00
234f4d58938SSuzuki K. Poulose #define CCI400_R0_SLAVE_PORT_MAX_EV	0x13
235f4d58938SSuzuki K. Poulose #define CCI400_R0_MASTER_PORT_MIN_EV	0x14
236f4d58938SSuzuki K. Poulose #define CCI400_R0_MASTER_PORT_MAX_EV	0x1a
237f4d58938SSuzuki K. Poulose 
238f4d58938SSuzuki K. Poulose #define CCI400_R1_SLAVE_PORT_MIN_EV	0x00
239f4d58938SSuzuki K. Poulose #define CCI400_R1_SLAVE_PORT_MAX_EV	0x14
240f4d58938SSuzuki K. Poulose #define CCI400_R1_MASTER_PORT_MIN_EV	0x00
241f4d58938SSuzuki K. Poulose #define CCI400_R1_MASTER_PORT_MAX_EV	0x11
242b91c8f28SPunit Agrawal 
243e14cfad3SSuzuki K. Poulose #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
244e14cfad3SSuzuki K. Poulose 	CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
245e14cfad3SSuzuki K. Poulose 					(unsigned long)_config)
246e14cfad3SSuzuki K. Poulose 
247e14cfad3SSuzuki K. Poulose static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
248e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf);
249e14cfad3SSuzuki K. Poulose 
2505e442ebaSMark Rutland static struct attribute *cci400_pmu_format_attrs[] = {
251e14cfad3SSuzuki K. Poulose 	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
252e14cfad3SSuzuki K. Poulose 	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
2535e442ebaSMark Rutland 	NULL
254e14cfad3SSuzuki K. Poulose };
255e14cfad3SSuzuki K. Poulose 
2565e442ebaSMark Rutland static struct attribute *cci400_r0_pmu_event_attrs[] = {
257e14cfad3SSuzuki K. Poulose 	/* Slave events */
258e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
259e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
260e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
261e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
262e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
263e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
264e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
265e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
266e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
267e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
268e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
269e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
270e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
271e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
272e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
273e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
274e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
275e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
276e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
277e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
278e14cfad3SSuzuki K. Poulose 	/* Master events */
279e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
280e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
281e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
282e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
283e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
284e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
285e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
286e14cfad3SSuzuki K. Poulose 	/* Special event for cycles counter */
287e14cfad3SSuzuki K. Poulose 	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
2885e442ebaSMark Rutland 	NULL
289e14cfad3SSuzuki K. Poulose };
290e14cfad3SSuzuki K. Poulose 
2915e442ebaSMark Rutland static struct attribute *cci400_r1_pmu_event_attrs[] = {
292e14cfad3SSuzuki K. Poulose 	/* Slave events */
293e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
294e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
295e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
296e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
297e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
298e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
299e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
300e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
301e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
302e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
303e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
304e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
305e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
306e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
307e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
308e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
309e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
310e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
311e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
312e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
313e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
314e14cfad3SSuzuki K. Poulose 	/* Master events */
315e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
316e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
317e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
318e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
319e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
320e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
321e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
322e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
323e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
324e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
325e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
326e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
327e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
328e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
329e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
330e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
331e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
332e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
333e14cfad3SSuzuki K. Poulose 	/* Special event for cycles counter */
334e14cfad3SSuzuki K. Poulose 	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
3355e442ebaSMark Rutland 	NULL
336e14cfad3SSuzuki K. Poulose };
337e14cfad3SSuzuki K. Poulose 
338e14cfad3SSuzuki K. Poulose static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
339e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf)
340e14cfad3SSuzuki K. Poulose {
341e14cfad3SSuzuki K. Poulose 	struct dev_ext_attribute *eattr = container_of(attr,
342e14cfad3SSuzuki K. Poulose 				struct dev_ext_attribute, attr);
343e14cfad3SSuzuki K. Poulose 	return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
344e14cfad3SSuzuki K. Poulose }
345e14cfad3SSuzuki K. Poulose 
34631216290SSuzuki K. Poulose static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
34731216290SSuzuki K. Poulose 				struct cci_pmu_hw_events *hw,
34831216290SSuzuki K. Poulose 				unsigned long cci_event)
34931216290SSuzuki K. Poulose {
35031216290SSuzuki K. Poulose 	int idx;
35131216290SSuzuki K. Poulose 
35231216290SSuzuki K. Poulose 	/* cycles event idx is fixed */
353f4d58938SSuzuki K. Poulose 	if (cci_event == CCI400_PMU_CYCLES) {
354f4d58938SSuzuki K. Poulose 		if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
35531216290SSuzuki K. Poulose 			return -EAGAIN;
35631216290SSuzuki K. Poulose 
357f4d58938SSuzuki K. Poulose 		return CCI400_PMU_CYCLE_CNTR_IDX;
35831216290SSuzuki K. Poulose 	}
35931216290SSuzuki K. Poulose 
360f4d58938SSuzuki K. Poulose 	for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
36131216290SSuzuki K. Poulose 		if (!test_and_set_bit(idx, hw->used_mask))
36231216290SSuzuki K. Poulose 			return idx;
36331216290SSuzuki K. Poulose 
36431216290SSuzuki K. Poulose 	/* No counters available */
36531216290SSuzuki K. Poulose 	return -EAGAIN;
36631216290SSuzuki K. Poulose }
36731216290SSuzuki K. Poulose 
36831216290SSuzuki K. Poulose static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
369b91c8f28SPunit Agrawal {
370f4d58938SSuzuki K. Poulose 	u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
371f4d58938SSuzuki K. Poulose 	u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
372fc17c839SSuzuki K. Poulose 	int if_type;
373b91c8f28SPunit Agrawal 
374f4d58938SSuzuki K. Poulose 	if (hw_event & ~CCI400_PMU_EVENT_MASK)
375874c5714SSuzuki K. Poulose 		return -ENOENT;
376874c5714SSuzuki K. Poulose 
377f4d58938SSuzuki K. Poulose 	if (hw_event == CCI400_PMU_CYCLES)
37831216290SSuzuki K. Poulose 		return hw_event;
37931216290SSuzuki K. Poulose 
380b91c8f28SPunit Agrawal 	switch (ev_source) {
381f4d58938SSuzuki K. Poulose 	case CCI400_PORT_S0:
382f4d58938SSuzuki K. Poulose 	case CCI400_PORT_S1:
383f4d58938SSuzuki K. Poulose 	case CCI400_PORT_S2:
384f4d58938SSuzuki K. Poulose 	case CCI400_PORT_S3:
385f4d58938SSuzuki K. Poulose 	case CCI400_PORT_S4:
386b91c8f28SPunit Agrawal 		/* Slave Interface */
387fc17c839SSuzuki K. Poulose 		if_type = CCI_IF_SLAVE;
388b91c8f28SPunit Agrawal 		break;
389f4d58938SSuzuki K. Poulose 	case CCI400_PORT_M0:
390f4d58938SSuzuki K. Poulose 	case CCI400_PORT_M1:
391f4d58938SSuzuki K. Poulose 	case CCI400_PORT_M2:
392b91c8f28SPunit Agrawal 		/* Master Interface */
393fc17c839SSuzuki K. Poulose 		if_type = CCI_IF_MASTER;
394b91c8f28SPunit Agrawal 		break;
395fc17c839SSuzuki K. Poulose 	default:
396fc17c839SSuzuki K. Poulose 		return -ENOENT;
397b91c8f28SPunit Agrawal 	}
398b91c8f28SPunit Agrawal 
399a1a076d7SSuzuki K. Poulose 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
400a1a076d7SSuzuki K. Poulose 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
401fc17c839SSuzuki K. Poulose 		return hw_event;
402fc17c839SSuzuki K. Poulose 
403b91c8f28SPunit Agrawal 	return -ENOENT;
404b91c8f28SPunit Agrawal }
405b91c8f28SPunit Agrawal 
406f4d58938SSuzuki K. Poulose static int probe_cci400_revision(void)
407f6b9e83cSSuzuki K. Poulose {
408f6b9e83cSSuzuki K. Poulose 	int rev;
409f6b9e83cSSuzuki K. Poulose 	rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
410f6b9e83cSSuzuki K. Poulose 	rev >>= CCI_PID2_REV_SHIFT;
411f6b9e83cSSuzuki K. Poulose 
412f4d58938SSuzuki K. Poulose 	if (rev < CCI400_R1_PX)
413f4d58938SSuzuki K. Poulose 		return CCI400_R0;
414f6b9e83cSSuzuki K. Poulose 	else
415f4d58938SSuzuki K. Poulose 		return CCI400_R1;
416f6b9e83cSSuzuki K. Poulose }
417f6b9e83cSSuzuki K. Poulose 
418fc17c839SSuzuki K. Poulose static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
419f6b9e83cSSuzuki K. Poulose {
420772742a6SSuzuki K. Poulose 	if (platform_has_secure_cci_access())
421f4d58938SSuzuki K. Poulose 		return &cci_pmu_models[probe_cci400_revision()];
422772742a6SSuzuki K. Poulose 	return NULL;
423f6b9e83cSSuzuki K. Poulose }
424f4d58938SSuzuki K. Poulose #else	/* !CONFIG_ARM_CCI400_PMU */
425f4d58938SSuzuki K. Poulose static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
426f4d58938SSuzuki K. Poulose {
427f4d58938SSuzuki K. Poulose 	return NULL;
428f4d58938SSuzuki K. Poulose }
429f4d58938SSuzuki K. Poulose #endif	/* CONFIG_ARM_CCI400_PMU */
430f6b9e83cSSuzuki K. Poulose 
4313d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
432a95791efSSuzuki K. Poulose 
433a95791efSSuzuki K. Poulose /*
4343d2e8701SSuzuki K Poulose  * CCI5xx PMU event id is an 9-bit value made of two parts.
435a95791efSSuzuki K. Poulose  *	 bits [8:5] - Source for the event
436a95791efSSuzuki K. Poulose  *	 bits [4:0] - Event code (specific to type of interface)
4373d2e8701SSuzuki K Poulose  *
4383d2e8701SSuzuki K Poulose  *
439a95791efSSuzuki K. Poulose  */
440a95791efSSuzuki K. Poulose 
441a95791efSSuzuki K. Poulose /* Port ids */
4423d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S0			0x0
4433d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S1			0x1
4443d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S2			0x2
4453d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S3			0x3
4463d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S4			0x4
4473d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S5			0x5
4483d2e8701SSuzuki K Poulose #define CCI5xx_PORT_S6			0x6
449a95791efSSuzuki K. Poulose 
4503d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M0			0x8
4513d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M1			0x9
4523d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M2			0xa
4533d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M3			0xb
4543d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M4			0xc
4553d2e8701SSuzuki K Poulose #define CCI5xx_PORT_M5			0xd
456d7dd5fd7SSuzuki K Poulose #define CCI5xx_PORT_M6			0xe
457a95791efSSuzuki K. Poulose 
4583d2e8701SSuzuki K Poulose #define CCI5xx_PORT_GLOBAL		0xf
459a95791efSSuzuki K. Poulose 
4603d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_MASK		0x1ffUL
4613d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_SOURCE_SHIFT	0x5
4623d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_SOURCE_MASK	0xf
4633d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_CODE_SHIFT	0x0
4643d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_CODE_MASK	0x1f
465a95791efSSuzuki K. Poulose 
4663d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_SOURCE(event)	\
4673d2e8701SSuzuki K Poulose 	((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
4683d2e8701SSuzuki K Poulose #define CCI5xx_PMU_EVENT_CODE(event)	\
4693d2e8701SSuzuki K Poulose 	((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
470a95791efSSuzuki K. Poulose 
4713d2e8701SSuzuki K Poulose #define CCI5xx_SLAVE_PORT_MIN_EV	0x00
4723d2e8701SSuzuki K Poulose #define CCI5xx_SLAVE_PORT_MAX_EV	0x1f
4733d2e8701SSuzuki K Poulose #define CCI5xx_MASTER_PORT_MIN_EV	0x00
4743d2e8701SSuzuki K Poulose #define CCI5xx_MASTER_PORT_MAX_EV	0x06
4753d2e8701SSuzuki K Poulose #define CCI5xx_GLOBAL_PORT_MIN_EV	0x00
4763d2e8701SSuzuki K Poulose #define CCI5xx_GLOBAL_PORT_MAX_EV	0x0f
477a95791efSSuzuki K. Poulose 
478e14cfad3SSuzuki K. Poulose 
4793d2e8701SSuzuki K Poulose #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
4803d2e8701SSuzuki K Poulose 	CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
481e14cfad3SSuzuki K. Poulose 					(unsigned long) _config)
482e14cfad3SSuzuki K. Poulose 
4833d2e8701SSuzuki K Poulose static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
484e14cfad3SSuzuki K. Poulose 				struct device_attribute *attr, char *buf);
485e14cfad3SSuzuki K. Poulose 
4863d2e8701SSuzuki K Poulose static struct attribute *cci5xx_pmu_format_attrs[] = {
487e14cfad3SSuzuki K. Poulose 	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
488e14cfad3SSuzuki K. Poulose 	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
4895e442ebaSMark Rutland 	NULL,
490e14cfad3SSuzuki K. Poulose };
491e14cfad3SSuzuki K. Poulose 
4923d2e8701SSuzuki K Poulose static struct attribute *cci5xx_pmu_event_attrs[] = {
493e14cfad3SSuzuki K. Poulose 	/* Slave events */
494e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
495e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
496e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
497e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
498e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
499e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
500e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
501e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
502e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
503e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
504e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
505e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
506e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
507e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
508e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
509e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
510e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
511e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
512e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
513e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
514e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
515e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
516e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
517e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
518e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
519e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
520e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
521e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
522e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
523e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
524e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
525e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
526e14cfad3SSuzuki K. Poulose 
527e14cfad3SSuzuki K. Poulose 	/* Master events */
528e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
529e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
530e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
531e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
532e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
533e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
534e14cfad3SSuzuki K. Poulose 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
535e14cfad3SSuzuki K. Poulose 
536e14cfad3SSuzuki K. Poulose 	/* Global events */
5373d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
5383d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
5393d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
5403d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
5413d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
5423d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
5433d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
5443d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
5453d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
5463d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
5473d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
5483d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
5493d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
5503d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
5511d3ef9c2SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
5523d2e8701SSuzuki K Poulose 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
5535e442ebaSMark Rutland 	NULL
554e14cfad3SSuzuki K. Poulose };
555e14cfad3SSuzuki K. Poulose 
5563d2e8701SSuzuki K Poulose static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
557e14cfad3SSuzuki K. Poulose 				struct device_attribute *attr, char *buf)
558e14cfad3SSuzuki K. Poulose {
559e14cfad3SSuzuki K. Poulose 	struct dev_ext_attribute *eattr = container_of(attr,
560e14cfad3SSuzuki K. Poulose 					struct dev_ext_attribute, attr);
561e14cfad3SSuzuki K. Poulose 	/* Global events have single fixed source code */
562e14cfad3SSuzuki K. Poulose 	return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
5633d2e8701SSuzuki K Poulose 				(unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
564e14cfad3SSuzuki K. Poulose }
565e14cfad3SSuzuki K. Poulose 
5663d2e8701SSuzuki K Poulose /*
5673d2e8701SSuzuki K Poulose  * CCI500 provides 8 independent event counters that can count
5683d2e8701SSuzuki K Poulose  * any of the events available.
5693d2e8701SSuzuki K Poulose  * CCI500 PMU event source ids
5703d2e8701SSuzuki K Poulose  *	0x0-0x6 - Slave interfaces
5713d2e8701SSuzuki K Poulose  *	0x8-0xD - Master interfaces
5723d2e8701SSuzuki K Poulose  *	0xf     - Global Events
5733d2e8701SSuzuki K Poulose  *	0x7,0xe - Reserved
5743d2e8701SSuzuki K Poulose  */
575a95791efSSuzuki K. Poulose static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
576a95791efSSuzuki K. Poulose 					unsigned long hw_event)
577a95791efSSuzuki K. Poulose {
5783d2e8701SSuzuki K Poulose 	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
5793d2e8701SSuzuki K Poulose 	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
580a95791efSSuzuki K. Poulose 	int if_type;
581a95791efSSuzuki K. Poulose 
5823d2e8701SSuzuki K Poulose 	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
583a95791efSSuzuki K. Poulose 		return -ENOENT;
584a95791efSSuzuki K. Poulose 
585a95791efSSuzuki K. Poulose 	switch (ev_source) {
5863d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S0:
5873d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S1:
5883d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S2:
5893d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S3:
5903d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S4:
5913d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S5:
5923d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_S6:
593a95791efSSuzuki K. Poulose 		if_type = CCI_IF_SLAVE;
594a95791efSSuzuki K. Poulose 		break;
5953d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M0:
5963d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M1:
5973d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M2:
5983d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M3:
5993d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M4:
6003d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_M5:
601a95791efSSuzuki K. Poulose 		if_type = CCI_IF_MASTER;
602a95791efSSuzuki K. Poulose 		break;
6033d2e8701SSuzuki K Poulose 	case CCI5xx_PORT_GLOBAL:
604a95791efSSuzuki K. Poulose 		if_type = CCI_IF_GLOBAL;
605a95791efSSuzuki K. Poulose 		break;
606a95791efSSuzuki K. Poulose 	default:
607a95791efSSuzuki K. Poulose 		return -ENOENT;
608a95791efSSuzuki K. Poulose 	}
609a95791efSSuzuki K. Poulose 
610a95791efSSuzuki K. Poulose 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
611a95791efSSuzuki K. Poulose 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
612a95791efSSuzuki K. Poulose 		return hw_event;
613a95791efSSuzuki K. Poulose 
614a95791efSSuzuki K. Poulose 	return -ENOENT;
615a95791efSSuzuki K. Poulose }
6163d2e8701SSuzuki K Poulose 
617d7dd5fd7SSuzuki K Poulose /*
618d7dd5fd7SSuzuki K Poulose  * CCI550 provides 8 independent event counters that can count
619d7dd5fd7SSuzuki K Poulose  * any of the events available.
620d7dd5fd7SSuzuki K Poulose  * CCI550 PMU event source ids
621d7dd5fd7SSuzuki K Poulose  *	0x0-0x6 - Slave interfaces
622d7dd5fd7SSuzuki K Poulose  *	0x8-0xe - Master interfaces
623d7dd5fd7SSuzuki K Poulose  *	0xf     - Global Events
624d7dd5fd7SSuzuki K Poulose  *	0x7	- Reserved
625d7dd5fd7SSuzuki K Poulose  */
626d7dd5fd7SSuzuki K Poulose static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
627d7dd5fd7SSuzuki K Poulose 					unsigned long hw_event)
628d7dd5fd7SSuzuki K Poulose {
629d7dd5fd7SSuzuki K Poulose 	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
630d7dd5fd7SSuzuki K Poulose 	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
631d7dd5fd7SSuzuki K Poulose 	int if_type;
632d7dd5fd7SSuzuki K Poulose 
633d7dd5fd7SSuzuki K Poulose 	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
634d7dd5fd7SSuzuki K Poulose 		return -ENOENT;
635d7dd5fd7SSuzuki K Poulose 
636d7dd5fd7SSuzuki K Poulose 	switch (ev_source) {
637d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S0:
638d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S1:
639d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S2:
640d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S3:
641d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S4:
642d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S5:
643d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_S6:
644d7dd5fd7SSuzuki K Poulose 		if_type = CCI_IF_SLAVE;
645d7dd5fd7SSuzuki K Poulose 		break;
646d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M0:
647d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M1:
648d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M2:
649d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M3:
650d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M4:
651d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M5:
652d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_M6:
653d7dd5fd7SSuzuki K Poulose 		if_type = CCI_IF_MASTER;
654d7dd5fd7SSuzuki K Poulose 		break;
655d7dd5fd7SSuzuki K Poulose 	case CCI5xx_PORT_GLOBAL:
656d7dd5fd7SSuzuki K Poulose 		if_type = CCI_IF_GLOBAL;
657d7dd5fd7SSuzuki K Poulose 		break;
658d7dd5fd7SSuzuki K Poulose 	default:
659d7dd5fd7SSuzuki K Poulose 		return -ENOENT;
660d7dd5fd7SSuzuki K Poulose 	}
661d7dd5fd7SSuzuki K Poulose 
662d7dd5fd7SSuzuki K Poulose 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
663d7dd5fd7SSuzuki K Poulose 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
664d7dd5fd7SSuzuki K Poulose 		return hw_event;
665d7dd5fd7SSuzuki K Poulose 
666d7dd5fd7SSuzuki K Poulose 	return -ENOENT;
667d7dd5fd7SSuzuki K Poulose }
668d7dd5fd7SSuzuki K Poulose 
6693d2e8701SSuzuki K Poulose #endif	/* CONFIG_ARM_CCI5xx_PMU */
670a95791efSSuzuki K. Poulose 
671c66eea5fSSuzuki K Poulose /*
672c66eea5fSSuzuki K Poulose  * Program the CCI PMU counters which have PERF_HES_ARCH set
673c66eea5fSSuzuki K Poulose  * with the event period and mark them ready before we enable
674c66eea5fSSuzuki K Poulose  * PMU.
675c66eea5fSSuzuki K Poulose  */
676ceb49512SWill Deacon static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
677c66eea5fSSuzuki K Poulose {
678c66eea5fSSuzuki K Poulose 	int i;
679c66eea5fSSuzuki K Poulose 	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
680c66eea5fSSuzuki K Poulose 
681c66eea5fSSuzuki K Poulose 	DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
682c66eea5fSSuzuki K Poulose 
683c66eea5fSSuzuki K Poulose 	bitmap_zero(mask, cci_pmu->num_cntrs);
684c66eea5fSSuzuki K Poulose 	for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
685c66eea5fSSuzuki K Poulose 		struct perf_event *event = cci_hw->events[i];
686c66eea5fSSuzuki K Poulose 
687c66eea5fSSuzuki K Poulose 		if (WARN_ON(!event))
688c66eea5fSSuzuki K Poulose 			continue;
689c66eea5fSSuzuki K Poulose 
690c66eea5fSSuzuki K Poulose 		/* Leave the events which are not counting */
691c66eea5fSSuzuki K Poulose 		if (event->hw.state & PERF_HES_STOPPED)
692c66eea5fSSuzuki K Poulose 			continue;
693c66eea5fSSuzuki K Poulose 		if (event->hw.state & PERF_HES_ARCH) {
694c66eea5fSSuzuki K Poulose 			set_bit(i, mask);
695c66eea5fSSuzuki K Poulose 			event->hw.state &= ~PERF_HES_ARCH;
696c66eea5fSSuzuki K Poulose 		}
697c66eea5fSSuzuki K Poulose 	}
698c66eea5fSSuzuki K Poulose 
699c66eea5fSSuzuki K Poulose 	pmu_write_counters(cci_pmu, mask);
700c66eea5fSSuzuki K Poulose }
701c66eea5fSSuzuki K Poulose 
702a077c52fSSuzuki K Poulose /* Should be called with cci_pmu->hw_events->pmu_lock held */
70311300027SSuzuki K Poulose static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
704a077c52fSSuzuki K Poulose {
705a077c52fSSuzuki K Poulose 	u32 val;
706a077c52fSSuzuki K Poulose 
707a077c52fSSuzuki K Poulose 	/* Enable all the PMU counters. */
708a077c52fSSuzuki K Poulose 	val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
709a077c52fSSuzuki K Poulose 	writel(val, cci_ctrl_base + CCI_PMCR);
710a077c52fSSuzuki K Poulose }
711a077c52fSSuzuki K Poulose 
712a077c52fSSuzuki K Poulose /* Should be called with cci_pmu->hw_events->pmu_lock held */
71311300027SSuzuki K Poulose static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
71411300027SSuzuki K Poulose {
71511300027SSuzuki K Poulose 	cci_pmu_sync_counters(cci_pmu);
71611300027SSuzuki K Poulose 	__cci_pmu_enable_nosync(cci_pmu);
71711300027SSuzuki K Poulose }
71811300027SSuzuki K Poulose 
71911300027SSuzuki K Poulose /* Should be called with cci_pmu->hw_events->pmu_lock held */
720a077c52fSSuzuki K Poulose static void __cci_pmu_disable(void)
721a077c52fSSuzuki K Poulose {
722a077c52fSSuzuki K Poulose 	u32 val;
723a077c52fSSuzuki K Poulose 
724a077c52fSSuzuki K Poulose 	/* Disable all the PMU counters. */
725a077c52fSSuzuki K Poulose 	val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
726a077c52fSSuzuki K Poulose 	writel(val, cci_ctrl_base + CCI_PMCR);
727a077c52fSSuzuki K Poulose }
728a077c52fSSuzuki K Poulose 
729e14cfad3SSuzuki K. Poulose static ssize_t cci_pmu_format_show(struct device *dev,
730e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf)
731e14cfad3SSuzuki K. Poulose {
732e14cfad3SSuzuki K. Poulose 	struct dev_ext_attribute *eattr = container_of(attr,
733e14cfad3SSuzuki K. Poulose 				struct dev_ext_attribute, attr);
734e14cfad3SSuzuki K. Poulose 	return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
735e14cfad3SSuzuki K. Poulose }
736e14cfad3SSuzuki K. Poulose 
737e14cfad3SSuzuki K. Poulose static ssize_t cci_pmu_event_show(struct device *dev,
738e14cfad3SSuzuki K. Poulose 			struct device_attribute *attr, char *buf)
739e14cfad3SSuzuki K. Poulose {
740e14cfad3SSuzuki K. Poulose 	struct dev_ext_attribute *eattr = container_of(attr,
741e14cfad3SSuzuki K. Poulose 				struct dev_ext_attribute, attr);
742e14cfad3SSuzuki K. Poulose 	/* source parameter is mandatory for normal PMU events */
743e14cfad3SSuzuki K. Poulose 	return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
744e14cfad3SSuzuki K. Poulose 					 (unsigned long)eattr->var);
745e14cfad3SSuzuki K. Poulose }
746e14cfad3SSuzuki K. Poulose 
747c6f85cb4SMark Rutland static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
748b91c8f28SPunit Agrawal {
749ab5b316dSSuzuki K. Poulose 	return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
750b91c8f28SPunit Agrawal }
751b91c8f28SPunit Agrawal 
752a1a076d7SSuzuki K. Poulose static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
753b91c8f28SPunit Agrawal {
754ab5b316dSSuzuki K. Poulose 	return readl_relaxed(cci_pmu->base +
755ab5b316dSSuzuki K. Poulose 			     CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
756b91c8f28SPunit Agrawal }
757b91c8f28SPunit Agrawal 
758a1a076d7SSuzuki K. Poulose static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
759a1a076d7SSuzuki K. Poulose 			       int idx, unsigned int offset)
760b91c8f28SPunit Agrawal {
7616ec30702SWill Deacon 	writel_relaxed(value, cci_pmu->base +
762ab5b316dSSuzuki K. Poulose 		       CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
763b91c8f28SPunit Agrawal }
764b91c8f28SPunit Agrawal 
765a1a076d7SSuzuki K. Poulose static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
766b91c8f28SPunit Agrawal {
767a1a076d7SSuzuki K. Poulose 	pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
768b91c8f28SPunit Agrawal }
769b91c8f28SPunit Agrawal 
770a1a076d7SSuzuki K. Poulose static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
771b91c8f28SPunit Agrawal {
772a1a076d7SSuzuki K. Poulose 	pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
773b91c8f28SPunit Agrawal }
774b91c8f28SPunit Agrawal 
7751ce6311bSSuzuki K Poulose static bool __maybe_unused
7761ce6311bSSuzuki K Poulose pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
7771ce6311bSSuzuki K Poulose {
7781ce6311bSSuzuki K Poulose 	return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
7791ce6311bSSuzuki K Poulose }
7801ce6311bSSuzuki K Poulose 
781a1a076d7SSuzuki K. Poulose static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
782b91c8f28SPunit Agrawal {
783a1a076d7SSuzuki K. Poulose 	pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
784b91c8f28SPunit Agrawal }
785b91c8f28SPunit Agrawal 
786ab5b316dSSuzuki K. Poulose /*
787cea16f8bSSuzuki K Poulose  * For all counters on the CCI-PMU, disable any 'enabled' counters,
788cea16f8bSSuzuki K Poulose  * saving the changed counters in the mask, so that we can restore
789cea16f8bSSuzuki K Poulose  * it later using pmu_restore_counters. The mask is private to the
790cea16f8bSSuzuki K Poulose  * caller. We cannot rely on the used_mask maintained by the CCI_PMU
791cea16f8bSSuzuki K Poulose  * as it only tells us if the counter is assigned to perf_event or not.
792cea16f8bSSuzuki K Poulose  * The state of the perf_event cannot be locked by the PMU layer, hence
793cea16f8bSSuzuki K Poulose  * we check the individual counter status (which can be locked by
794cea16f8bSSuzuki K Poulose  * cci_pm->hw_events->pmu_lock).
795cea16f8bSSuzuki K Poulose  *
796cea16f8bSSuzuki K Poulose  * @mask should be initialised to empty by the caller.
797cea16f8bSSuzuki K Poulose  */
798cea16f8bSSuzuki K Poulose static void __maybe_unused
799cea16f8bSSuzuki K Poulose pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
800cea16f8bSSuzuki K Poulose {
801cea16f8bSSuzuki K Poulose 	int i;
802cea16f8bSSuzuki K Poulose 
803cea16f8bSSuzuki K Poulose 	for (i = 0; i < cci_pmu->num_cntrs; i++) {
804cea16f8bSSuzuki K Poulose 		if (pmu_counter_is_enabled(cci_pmu, i)) {
805cea16f8bSSuzuki K Poulose 			set_bit(i, mask);
806cea16f8bSSuzuki K Poulose 			pmu_disable_counter(cci_pmu, i);
807cea16f8bSSuzuki K Poulose 		}
808cea16f8bSSuzuki K Poulose 	}
809cea16f8bSSuzuki K Poulose }
810cea16f8bSSuzuki K Poulose 
811cea16f8bSSuzuki K Poulose /*
812cea16f8bSSuzuki K Poulose  * Restore the status of the counters. Reversal of the pmu_save_counters().
813cea16f8bSSuzuki K Poulose  * For each counter set in the mask, enable the counter back.
814cea16f8bSSuzuki K Poulose  */
815cea16f8bSSuzuki K Poulose static void __maybe_unused
816cea16f8bSSuzuki K Poulose pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
817cea16f8bSSuzuki K Poulose {
818cea16f8bSSuzuki K Poulose 	int i;
819cea16f8bSSuzuki K Poulose 
820cea16f8bSSuzuki K Poulose 	for_each_set_bit(i, mask, cci_pmu->num_cntrs)
821cea16f8bSSuzuki K Poulose 		pmu_enable_counter(cci_pmu, i);
822cea16f8bSSuzuki K Poulose }
823cea16f8bSSuzuki K Poulose 
824cea16f8bSSuzuki K Poulose /*
825ab5b316dSSuzuki K. Poulose  * Returns the number of programmable counters actually implemented
826ab5b316dSSuzuki K. Poulose  * by the cci
827ab5b316dSSuzuki K. Poulose  */
828b91c8f28SPunit Agrawal static u32 pmu_get_max_counters(void)
829b91c8f28SPunit Agrawal {
830ab5b316dSSuzuki K. Poulose 	return (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
831b91c8f28SPunit Agrawal 		CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
832b91c8f28SPunit Agrawal }
833b91c8f28SPunit Agrawal 
834c6f85cb4SMark Rutland static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
835b91c8f28SPunit Agrawal {
836c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
83731216290SSuzuki K. Poulose 	unsigned long cci_event = event->hw.config_base;
838b91c8f28SPunit Agrawal 	int idx;
839b91c8f28SPunit Agrawal 
84031216290SSuzuki K. Poulose 	if (cci_pmu->model->get_event_idx)
84131216290SSuzuki K. Poulose 		return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
842b91c8f28SPunit Agrawal 
84331216290SSuzuki K. Poulose 	/* Generic code to find an unused idx from the mask */
84431216290SSuzuki K. Poulose 	for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
845b91c8f28SPunit Agrawal 		if (!test_and_set_bit(idx, hw->used_mask))
846b91c8f28SPunit Agrawal 			return idx;
847b91c8f28SPunit Agrawal 
848b91c8f28SPunit Agrawal 	/* No counters available */
849b91c8f28SPunit Agrawal 	return -EAGAIN;
850b91c8f28SPunit Agrawal }
851b91c8f28SPunit Agrawal 
852b91c8f28SPunit Agrawal static int pmu_map_event(struct perf_event *event)
853b91c8f28SPunit Agrawal {
85431216290SSuzuki K. Poulose 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
855b91c8f28SPunit Agrawal 
85631216290SSuzuki K. Poulose 	if (event->attr.type < PERF_TYPE_MAX ||
85731216290SSuzuki K. Poulose 			!cci_pmu->model->validate_hw_event)
858b91c8f28SPunit Agrawal 		return -ENOENT;
859b91c8f28SPunit Agrawal 
86031216290SSuzuki K. Poulose 	return	cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
861b91c8f28SPunit Agrawal }
862b91c8f28SPunit Agrawal 
863c6f85cb4SMark Rutland static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
864b91c8f28SPunit Agrawal {
865b91c8f28SPunit Agrawal 	int i;
866b91c8f28SPunit Agrawal 	struct platform_device *pmu_device = cci_pmu->plat_device;
867b91c8f28SPunit Agrawal 
868b91c8f28SPunit Agrawal 	if (unlikely(!pmu_device))
869b91c8f28SPunit Agrawal 		return -ENODEV;
870b91c8f28SPunit Agrawal 
871a1a076d7SSuzuki K. Poulose 	if (cci_pmu->nr_irqs < 1) {
872b91c8f28SPunit Agrawal 		dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
873b91c8f28SPunit Agrawal 		return -ENODEV;
874b91c8f28SPunit Agrawal 	}
875b91c8f28SPunit Agrawal 
876b91c8f28SPunit Agrawal 	/*
877b91c8f28SPunit Agrawal 	 * Register all available CCI PMU interrupts. In the interrupt handler
878b91c8f28SPunit Agrawal 	 * we iterate over the counters checking for interrupt source (the
879b91c8f28SPunit Agrawal 	 * overflowing counter) and clear it.
880b91c8f28SPunit Agrawal 	 *
881b91c8f28SPunit Agrawal 	 * This should allow handling of non-unique interrupt for the counters.
882b91c8f28SPunit Agrawal 	 */
883a1a076d7SSuzuki K. Poulose 	for (i = 0; i < cci_pmu->nr_irqs; i++) {
884a1a076d7SSuzuki K. Poulose 		int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
885b91c8f28SPunit Agrawal 				"arm-cci-pmu", cci_pmu);
886b91c8f28SPunit Agrawal 		if (err) {
887b91c8f28SPunit Agrawal 			dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
888a1a076d7SSuzuki K. Poulose 				cci_pmu->irqs[i]);
889b91c8f28SPunit Agrawal 			return err;
890b91c8f28SPunit Agrawal 		}
891b91c8f28SPunit Agrawal 
892a1a076d7SSuzuki K. Poulose 		set_bit(i, &cci_pmu->active_irqs);
893b91c8f28SPunit Agrawal 	}
894b91c8f28SPunit Agrawal 
895b91c8f28SPunit Agrawal 	return 0;
896b91c8f28SPunit Agrawal }
897b91c8f28SPunit Agrawal 
898c6f85cb4SMark Rutland static void pmu_free_irq(struct cci_pmu *cci_pmu)
899c6f85cb4SMark Rutland {
900c6f85cb4SMark Rutland 	int i;
901c6f85cb4SMark Rutland 
902a1a076d7SSuzuki K. Poulose 	for (i = 0; i < cci_pmu->nr_irqs; i++) {
903a1a076d7SSuzuki K. Poulose 		if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
904c6f85cb4SMark Rutland 			continue;
905c6f85cb4SMark Rutland 
906a1a076d7SSuzuki K. Poulose 		free_irq(cci_pmu->irqs[i], cci_pmu);
907c6f85cb4SMark Rutland 	}
908c6f85cb4SMark Rutland }
909c6f85cb4SMark Rutland 
910c6f85cb4SMark Rutland static u32 pmu_read_counter(struct perf_event *event)
911c6f85cb4SMark Rutland {
912c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
913c6f85cb4SMark Rutland 	struct hw_perf_event *hw_counter = &event->hw;
914c6f85cb4SMark Rutland 	int idx = hw_counter->idx;
915c6f85cb4SMark Rutland 	u32 value;
916c6f85cb4SMark Rutland 
917c6f85cb4SMark Rutland 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
918c6f85cb4SMark Rutland 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
919c6f85cb4SMark Rutland 		return 0;
920c6f85cb4SMark Rutland 	}
921a1a076d7SSuzuki K. Poulose 	value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
922c6f85cb4SMark Rutland 
923c6f85cb4SMark Rutland 	return value;
924c6f85cb4SMark Rutland }
925c6f85cb4SMark Rutland 
926c8bc2b11SSuzuki K Poulose static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
927c6f85cb4SMark Rutland {
928a1a076d7SSuzuki K. Poulose 	pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
929c6f85cb4SMark Rutland }
930c6f85cb4SMark Rutland 
931fff3f1a0SSuzuki K Poulose static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
932a53eb5c6SSuzuki K Poulose {
933a53eb5c6SSuzuki K Poulose 	int i;
934a53eb5c6SSuzuki K Poulose 	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
935a53eb5c6SSuzuki K Poulose 
936a53eb5c6SSuzuki K Poulose 	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
937a53eb5c6SSuzuki K Poulose 		struct perf_event *event = cci_hw->events[i];
938a53eb5c6SSuzuki K Poulose 
939a53eb5c6SSuzuki K Poulose 		if (WARN_ON(!event))
940a53eb5c6SSuzuki K Poulose 			continue;
941c8bc2b11SSuzuki K Poulose 		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
942a53eb5c6SSuzuki K Poulose 	}
943a53eb5c6SSuzuki K Poulose }
944a53eb5c6SSuzuki K Poulose 
945fff3f1a0SSuzuki K Poulose static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
946fff3f1a0SSuzuki K Poulose {
947fff3f1a0SSuzuki K Poulose 	if (cci_pmu->model->write_counters)
948fff3f1a0SSuzuki K Poulose 		cci_pmu->model->write_counters(cci_pmu, mask);
949fff3f1a0SSuzuki K Poulose 	else
950fff3f1a0SSuzuki K Poulose 		__pmu_write_counters(cci_pmu, mask);
951fff3f1a0SSuzuki K Poulose }
952fff3f1a0SSuzuki K Poulose 
9533d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
954a445fcc9SSuzuki K Poulose 
955a445fcc9SSuzuki K Poulose /*
956d7dd5fd7SSuzuki K Poulose  * CCI-500/CCI-550 has advanced power saving policies, which could gate the
957a445fcc9SSuzuki K Poulose  * clocks to the PMU counters, which makes the writes to them ineffective.
958a445fcc9SSuzuki K Poulose  * The only way to write to those counters is when the global counters
959a445fcc9SSuzuki K Poulose  * are enabled and the particular counter is enabled.
960a445fcc9SSuzuki K Poulose  *
961a445fcc9SSuzuki K Poulose  * So we do the following :
962a445fcc9SSuzuki K Poulose  *
963a445fcc9SSuzuki K Poulose  * 1) Disable all the PMU counters, saving their current state
964a445fcc9SSuzuki K Poulose  * 2) Enable the global PMU profiling, now that all counters are
965a445fcc9SSuzuki K Poulose  *    disabled.
966a445fcc9SSuzuki K Poulose  *
967a445fcc9SSuzuki K Poulose  * For each counter to be programmed, repeat steps 3-7:
968a445fcc9SSuzuki K Poulose  *
969a445fcc9SSuzuki K Poulose  * 3) Write an invalid event code to the event control register for the
970a445fcc9SSuzuki K Poulose       counter, so that the counters are not modified.
971a445fcc9SSuzuki K Poulose  * 4) Enable the counter control for the counter.
972a445fcc9SSuzuki K Poulose  * 5) Set the counter value
973a445fcc9SSuzuki K Poulose  * 6) Disable the counter
974a445fcc9SSuzuki K Poulose  * 7) Restore the event in the target counter
975a445fcc9SSuzuki K Poulose  *
976a445fcc9SSuzuki K Poulose  * 8) Disable the global PMU.
977a445fcc9SSuzuki K Poulose  * 9) Restore the status of the rest of the counters.
978a445fcc9SSuzuki K Poulose  *
9793d2e8701SSuzuki K Poulose  * We choose an event which for CCI-5xx is guaranteed not to count.
980a445fcc9SSuzuki K Poulose  * We use the highest possible event code (0x1f) for the master interface 0.
981a445fcc9SSuzuki K Poulose  */
9823d2e8701SSuzuki K Poulose #define CCI5xx_INVALID_EVENT	((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
9833d2e8701SSuzuki K Poulose 				 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
9843d2e8701SSuzuki K Poulose static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
985a445fcc9SSuzuki K Poulose {
986a445fcc9SSuzuki K Poulose 	int i;
987a445fcc9SSuzuki K Poulose 	DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
988a445fcc9SSuzuki K Poulose 
989a445fcc9SSuzuki K Poulose 	bitmap_zero(saved_mask, cci_pmu->num_cntrs);
990a445fcc9SSuzuki K Poulose 	pmu_save_counters(cci_pmu, saved_mask);
991a445fcc9SSuzuki K Poulose 
992a445fcc9SSuzuki K Poulose 	/*
993a445fcc9SSuzuki K Poulose 	 * Now that all the counters are disabled, we can safely turn the PMU on,
994a445fcc9SSuzuki K Poulose 	 * without syncing the status of the counters
995a445fcc9SSuzuki K Poulose 	 */
996a445fcc9SSuzuki K Poulose 	__cci_pmu_enable_nosync(cci_pmu);
997a445fcc9SSuzuki K Poulose 
998a445fcc9SSuzuki K Poulose 	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
999a445fcc9SSuzuki K Poulose 		struct perf_event *event = cci_pmu->hw_events.events[i];
1000a445fcc9SSuzuki K Poulose 
1001a445fcc9SSuzuki K Poulose 		if (WARN_ON(!event))
1002a445fcc9SSuzuki K Poulose 			continue;
1003a445fcc9SSuzuki K Poulose 
10043d2e8701SSuzuki K Poulose 		pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
1005a445fcc9SSuzuki K Poulose 		pmu_enable_counter(cci_pmu, i);
1006a445fcc9SSuzuki K Poulose 		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
1007a445fcc9SSuzuki K Poulose 		pmu_disable_counter(cci_pmu, i);
1008a445fcc9SSuzuki K Poulose 		pmu_set_event(cci_pmu, i, event->hw.config_base);
1009a445fcc9SSuzuki K Poulose 	}
1010a445fcc9SSuzuki K Poulose 
1011a445fcc9SSuzuki K Poulose 	__cci_pmu_disable();
1012a445fcc9SSuzuki K Poulose 
1013a445fcc9SSuzuki K Poulose 	pmu_restore_counters(cci_pmu, saved_mask);
1014a445fcc9SSuzuki K Poulose }
1015a445fcc9SSuzuki K Poulose 
10163d2e8701SSuzuki K Poulose #endif	/* CONFIG_ARM_CCI5xx_PMU */
1017a445fcc9SSuzuki K Poulose 
1018c6f85cb4SMark Rutland static u64 pmu_event_update(struct perf_event *event)
1019c6f85cb4SMark Rutland {
1020c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1021c6f85cb4SMark Rutland 	u64 delta, prev_raw_count, new_raw_count;
1022c6f85cb4SMark Rutland 
1023c6f85cb4SMark Rutland 	do {
1024c6f85cb4SMark Rutland 		prev_raw_count = local64_read(&hwc->prev_count);
1025c6f85cb4SMark Rutland 		new_raw_count = pmu_read_counter(event);
1026c6f85cb4SMark Rutland 	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
1027c6f85cb4SMark Rutland 		 new_raw_count) != prev_raw_count);
1028c6f85cb4SMark Rutland 
1029c6f85cb4SMark Rutland 	delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
1030c6f85cb4SMark Rutland 
1031c6f85cb4SMark Rutland 	local64_add(delta, &event->count);
1032c6f85cb4SMark Rutland 
1033c6f85cb4SMark Rutland 	return new_raw_count;
1034c6f85cb4SMark Rutland }
1035c6f85cb4SMark Rutland 
1036c6f85cb4SMark Rutland static void pmu_read(struct perf_event *event)
1037c6f85cb4SMark Rutland {
1038c6f85cb4SMark Rutland 	pmu_event_update(event);
1039c6f85cb4SMark Rutland }
1040c6f85cb4SMark Rutland 
1041ceb49512SWill Deacon static void pmu_event_set_period(struct perf_event *event)
1042c6f85cb4SMark Rutland {
1043c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1044c6f85cb4SMark Rutland 	/*
1045c6f85cb4SMark Rutland 	 * The CCI PMU counters have a period of 2^32. To account for the
1046c6f85cb4SMark Rutland 	 * possiblity of extreme interrupt latency we program for a period of
1047c6f85cb4SMark Rutland 	 * half that. Hopefully we can handle the interrupt before another 2^31
1048c6f85cb4SMark Rutland 	 * events occur and the counter overtakes its previous value.
1049c6f85cb4SMark Rutland 	 */
1050c6f85cb4SMark Rutland 	u64 val = 1ULL << 31;
1051c6f85cb4SMark Rutland 	local64_set(&hwc->prev_count, val);
1052c66eea5fSSuzuki K Poulose 
1053c66eea5fSSuzuki K Poulose 	/*
1054c66eea5fSSuzuki K Poulose 	 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
1055c66eea5fSSuzuki K Poulose 	 * values needs to be sync-ed with the s/w state before the PMU is
1056c66eea5fSSuzuki K Poulose 	 * enabled.
1057c66eea5fSSuzuki K Poulose 	 * Mark this counter for sync.
1058c66eea5fSSuzuki K Poulose 	 */
1059c66eea5fSSuzuki K Poulose 	hwc->state |= PERF_HES_ARCH;
1060c6f85cb4SMark Rutland }
1061c6f85cb4SMark Rutland 
1062b91c8f28SPunit Agrawal static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
1063b91c8f28SPunit Agrawal {
1064b91c8f28SPunit Agrawal 	unsigned long flags;
1065c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = dev;
1066a1a076d7SSuzuki K. Poulose 	struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
1067b91c8f28SPunit Agrawal 	int idx, handled = IRQ_NONE;
1068b91c8f28SPunit Agrawal 
1069b91c8f28SPunit Agrawal 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1070c66eea5fSSuzuki K Poulose 
1071c66eea5fSSuzuki K Poulose 	/* Disable the PMU while we walk through the counters */
1072c66eea5fSSuzuki K Poulose 	__cci_pmu_disable();
1073b91c8f28SPunit Agrawal 	/*
1074b91c8f28SPunit Agrawal 	 * Iterate over counters and update the corresponding perf events.
1075b91c8f28SPunit Agrawal 	 * This should work regardless of whether we have per-counter overflow
1076b91c8f28SPunit Agrawal 	 * interrupt or a combined overflow interrupt.
1077b91c8f28SPunit Agrawal 	 */
107831216290SSuzuki K. Poulose 	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
1079b91c8f28SPunit Agrawal 		struct perf_event *event = events->events[idx];
1080b91c8f28SPunit Agrawal 
1081b91c8f28SPunit Agrawal 		if (!event)
1082b91c8f28SPunit Agrawal 			continue;
1083b91c8f28SPunit Agrawal 
1084b91c8f28SPunit Agrawal 		/* Did this counter overflow? */
1085a1a076d7SSuzuki K. Poulose 		if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
1086fc5130deSHimangi Saraogi 		      CCI_PMU_OVRFLW_FLAG))
1087b91c8f28SPunit Agrawal 			continue;
1088b91c8f28SPunit Agrawal 
1089a1a076d7SSuzuki K. Poulose 		pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
1090a1a076d7SSuzuki K. Poulose 							CCI_PMU_OVRFLW);
1091b91c8f28SPunit Agrawal 
1092c6f85cb4SMark Rutland 		pmu_event_update(event);
1093c6f85cb4SMark Rutland 		pmu_event_set_period(event);
1094b91c8f28SPunit Agrawal 		handled = IRQ_HANDLED;
1095b91c8f28SPunit Agrawal 	}
1096c66eea5fSSuzuki K Poulose 
1097c66eea5fSSuzuki K Poulose 	/* Enable the PMU and sync possibly overflowed counters */
109811300027SSuzuki K Poulose 	__cci_pmu_enable_sync(cci_pmu);
1099b91c8f28SPunit Agrawal 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1100b91c8f28SPunit Agrawal 
1101b91c8f28SPunit Agrawal 	return IRQ_RETVAL(handled);
1102b91c8f28SPunit Agrawal }
1103b91c8f28SPunit Agrawal 
1104c6f85cb4SMark Rutland static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
1105b91c8f28SPunit Agrawal {
1106c6f85cb4SMark Rutland 	int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
1107c6f85cb4SMark Rutland 	if (ret) {
1108c6f85cb4SMark Rutland 		pmu_free_irq(cci_pmu);
1109c6f85cb4SMark Rutland 		return ret;
1110c6f85cb4SMark Rutland 	}
1111c6f85cb4SMark Rutland 	return 0;
1112c6f85cb4SMark Rutland }
1113b91c8f28SPunit Agrawal 
1114c6f85cb4SMark Rutland static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
1115c6f85cb4SMark Rutland {
1116c6f85cb4SMark Rutland 	pmu_free_irq(cci_pmu);
1117c6f85cb4SMark Rutland }
1118b91c8f28SPunit Agrawal 
1119c6f85cb4SMark Rutland static void hw_perf_event_destroy(struct perf_event *event)
1120c6f85cb4SMark Rutland {
1121c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1122c6f85cb4SMark Rutland 	atomic_t *active_events = &cci_pmu->active_events;
1123c6f85cb4SMark Rutland 	struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
1124c6f85cb4SMark Rutland 
1125c6f85cb4SMark Rutland 	if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
1126c6f85cb4SMark Rutland 		cci_pmu_put_hw(cci_pmu);
1127c6f85cb4SMark Rutland 		mutex_unlock(reserve_mutex);
1128b91c8f28SPunit Agrawal 	}
1129b91c8f28SPunit Agrawal }
1130b91c8f28SPunit Agrawal 
1131c6f85cb4SMark Rutland static void cci_pmu_enable(struct pmu *pmu)
1132b91c8f28SPunit Agrawal {
1133c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1134c6f85cb4SMark Rutland 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1135ab5b316dSSuzuki K. Poulose 	int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
1136b91c8f28SPunit Agrawal 	unsigned long flags;
1137b91c8f28SPunit Agrawal 
1138c6f85cb4SMark Rutland 	if (!enabled)
1139c6f85cb4SMark Rutland 		return;
1140c6f85cb4SMark Rutland 
1141c6f85cb4SMark Rutland 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
114211300027SSuzuki K Poulose 	__cci_pmu_enable_sync(cci_pmu);
1143c6f85cb4SMark Rutland 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1144b91c8f28SPunit Agrawal 
1145b91c8f28SPunit Agrawal }
1146b91c8f28SPunit Agrawal 
1147c6f85cb4SMark Rutland static void cci_pmu_disable(struct pmu *pmu)
1148b91c8f28SPunit Agrawal {
1149c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1150c6f85cb4SMark Rutland 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1151b91c8f28SPunit Agrawal 	unsigned long flags;
1152b91c8f28SPunit Agrawal 
1153c6f85cb4SMark Rutland 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1154a077c52fSSuzuki K Poulose 	__cci_pmu_disable();
1155c6f85cb4SMark Rutland 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1156b91c8f28SPunit Agrawal }
1157b91c8f28SPunit Agrawal 
115831216290SSuzuki K. Poulose /*
115931216290SSuzuki K. Poulose  * Check if the idx represents a non-programmable counter.
116031216290SSuzuki K. Poulose  * All the fixed event counters are mapped before the programmable
116131216290SSuzuki K. Poulose  * counters.
116231216290SSuzuki K. Poulose  */
116331216290SSuzuki K. Poulose static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
116431216290SSuzuki K. Poulose {
116531216290SSuzuki K. Poulose 	return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
116631216290SSuzuki K. Poulose }
116731216290SSuzuki K. Poulose 
1168c6f85cb4SMark Rutland static void cci_pmu_start(struct perf_event *event, int pmu_flags)
1169b91c8f28SPunit Agrawal {
1170c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1171c6f85cb4SMark Rutland 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1172c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1173c6f85cb4SMark Rutland 	int idx = hwc->idx;
1174c6f85cb4SMark Rutland 	unsigned long flags;
1175c6f85cb4SMark Rutland 
1176c6f85cb4SMark Rutland 	/*
1177c6f85cb4SMark Rutland 	 * To handle interrupt latency, we always reprogram the period
1178c6f85cb4SMark Rutland 	 * regardlesss of PERF_EF_RELOAD.
1179c6f85cb4SMark Rutland 	 */
1180c6f85cb4SMark Rutland 	if (pmu_flags & PERF_EF_RELOAD)
1181c6f85cb4SMark Rutland 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1182c6f85cb4SMark Rutland 
1183c6f85cb4SMark Rutland 	hwc->state = 0;
1184b91c8f28SPunit Agrawal 
1185b91c8f28SPunit Agrawal 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1186b91c8f28SPunit Agrawal 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1187c6f85cb4SMark Rutland 		return;
1188c6f85cb4SMark Rutland 	}
1189c6f85cb4SMark Rutland 
1190c6f85cb4SMark Rutland 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
1191c6f85cb4SMark Rutland 
119231216290SSuzuki K. Poulose 	/* Configure the counter unless you are counting a fixed event */
119331216290SSuzuki K. Poulose 	if (!pmu_fixed_hw_idx(cci_pmu, idx))
1194a1a076d7SSuzuki K. Poulose 		pmu_set_event(cci_pmu, idx, hwc->config_base);
1195c6f85cb4SMark Rutland 
1196c6f85cb4SMark Rutland 	pmu_event_set_period(event);
1197a1a076d7SSuzuki K. Poulose 	pmu_enable_counter(cci_pmu, idx);
1198c6f85cb4SMark Rutland 
1199c6f85cb4SMark Rutland 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
1200c6f85cb4SMark Rutland }
1201c6f85cb4SMark Rutland 
1202c6f85cb4SMark Rutland static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
1203c6f85cb4SMark Rutland {
1204c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1205c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1206c6f85cb4SMark Rutland 	int idx = hwc->idx;
1207c6f85cb4SMark Rutland 
1208c6f85cb4SMark Rutland 	if (hwc->state & PERF_HES_STOPPED)
1209c6f85cb4SMark Rutland 		return;
1210c6f85cb4SMark Rutland 
1211c6f85cb4SMark Rutland 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
1212c6f85cb4SMark Rutland 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
1213c6f85cb4SMark Rutland 		return;
1214c6f85cb4SMark Rutland 	}
1215c6f85cb4SMark Rutland 
1216c6f85cb4SMark Rutland 	/*
1217c6f85cb4SMark Rutland 	 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
1218c6f85cb4SMark Rutland 	 * cci_pmu_start()
1219c6f85cb4SMark Rutland 	 */
1220a1a076d7SSuzuki K. Poulose 	pmu_disable_counter(cci_pmu, idx);
1221c6f85cb4SMark Rutland 	pmu_event_update(event);
1222c6f85cb4SMark Rutland 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1223c6f85cb4SMark Rutland }
1224c6f85cb4SMark Rutland 
1225c6f85cb4SMark Rutland static int cci_pmu_add(struct perf_event *event, int flags)
1226c6f85cb4SMark Rutland {
1227c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1228c6f85cb4SMark Rutland 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1229c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1230c6f85cb4SMark Rutland 	int idx;
1231c6f85cb4SMark Rutland 	int err = 0;
1232c6f85cb4SMark Rutland 
1233c6f85cb4SMark Rutland 	perf_pmu_disable(event->pmu);
1234c6f85cb4SMark Rutland 
1235c6f85cb4SMark Rutland 	/* If we don't have a space for the counter then finish early. */
1236c6f85cb4SMark Rutland 	idx = pmu_get_event_idx(hw_events, event);
1237c6f85cb4SMark Rutland 	if (idx < 0) {
1238c6f85cb4SMark Rutland 		err = idx;
1239c6f85cb4SMark Rutland 		goto out;
1240c6f85cb4SMark Rutland 	}
1241c6f85cb4SMark Rutland 
1242c6f85cb4SMark Rutland 	event->hw.idx = idx;
1243c6f85cb4SMark Rutland 	hw_events->events[idx] = event;
1244c6f85cb4SMark Rutland 
1245c6f85cb4SMark Rutland 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1246c6f85cb4SMark Rutland 	if (flags & PERF_EF_START)
1247c6f85cb4SMark Rutland 		cci_pmu_start(event, PERF_EF_RELOAD);
1248c6f85cb4SMark Rutland 
1249c6f85cb4SMark Rutland 	/* Propagate our changes to the userspace mapping. */
1250c6f85cb4SMark Rutland 	perf_event_update_userpage(event);
1251c6f85cb4SMark Rutland 
1252c6f85cb4SMark Rutland out:
1253c6f85cb4SMark Rutland 	perf_pmu_enable(event->pmu);
1254c6f85cb4SMark Rutland 	return err;
1255c6f85cb4SMark Rutland }
1256c6f85cb4SMark Rutland 
1257c6f85cb4SMark Rutland static void cci_pmu_del(struct perf_event *event, int flags)
1258c6f85cb4SMark Rutland {
1259c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1260c6f85cb4SMark Rutland 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
1261c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1262c6f85cb4SMark Rutland 	int idx = hwc->idx;
1263c6f85cb4SMark Rutland 
1264c6f85cb4SMark Rutland 	cci_pmu_stop(event, PERF_EF_UPDATE);
1265c6f85cb4SMark Rutland 	hw_events->events[idx] = NULL;
1266c6f85cb4SMark Rutland 	clear_bit(idx, hw_events->used_mask);
1267c6f85cb4SMark Rutland 
1268c6f85cb4SMark Rutland 	perf_event_update_userpage(event);
1269c6f85cb4SMark Rutland }
1270c6f85cb4SMark Rutland 
1271c6f85cb4SMark Rutland static int
1272b1862199SSuzuki K. Poulose validate_event(struct pmu *cci_pmu,
1273b1862199SSuzuki K. Poulose                struct cci_pmu_hw_events *hw_events,
1274c6f85cb4SMark Rutland                struct perf_event *event)
1275c6f85cb4SMark Rutland {
1276c6f85cb4SMark Rutland 	if (is_software_event(event))
1277c6f85cb4SMark Rutland 		return 1;
1278c6f85cb4SMark Rutland 
1279b1862199SSuzuki K. Poulose 	/*
1280b1862199SSuzuki K. Poulose 	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
1281b1862199SSuzuki K. Poulose 	 * core perf code won't check that the pmu->ctx == leader->ctx
1282b1862199SSuzuki K. Poulose 	 * until after pmu->event_init(event).
1283b1862199SSuzuki K. Poulose 	 */
1284b1862199SSuzuki K. Poulose 	if (event->pmu != cci_pmu)
1285b1862199SSuzuki K. Poulose 		return 0;
1286b1862199SSuzuki K. Poulose 
1287c6f85cb4SMark Rutland 	if (event->state < PERF_EVENT_STATE_OFF)
1288c6f85cb4SMark Rutland 		return 1;
1289c6f85cb4SMark Rutland 
1290c6f85cb4SMark Rutland 	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
1291c6f85cb4SMark Rutland 		return 1;
1292c6f85cb4SMark Rutland 
1293c6f85cb4SMark Rutland 	return pmu_get_event_idx(hw_events, event) >= 0;
1294c6f85cb4SMark Rutland }
1295c6f85cb4SMark Rutland 
1296c6f85cb4SMark Rutland static int
1297c6f85cb4SMark Rutland validate_group(struct perf_event *event)
1298c6f85cb4SMark Rutland {
1299c6f85cb4SMark Rutland 	struct perf_event *sibling, *leader = event->group_leader;
1300ab5b316dSSuzuki K. Poulose 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1301ab5b316dSSuzuki K. Poulose 	unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
1302c6f85cb4SMark Rutland 	struct cci_pmu_hw_events fake_pmu = {
1303c6f85cb4SMark Rutland 		/*
1304c6f85cb4SMark Rutland 		 * Initialise the fake PMU. We only need to populate the
1305c6f85cb4SMark Rutland 		 * used_mask for the purposes of validation.
1306c6f85cb4SMark Rutland 		 */
1307ab5b316dSSuzuki K. Poulose 		.used_mask = mask,
1308c6f85cb4SMark Rutland 	};
1309ab5b316dSSuzuki K. Poulose 	memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
1310c6f85cb4SMark Rutland 
1311b1862199SSuzuki K. Poulose 	if (!validate_event(event->pmu, &fake_pmu, leader))
1312c6f85cb4SMark Rutland 		return -EINVAL;
1313c6f85cb4SMark Rutland 
1314c6f85cb4SMark Rutland 	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
1315b1862199SSuzuki K. Poulose 		if (!validate_event(event->pmu, &fake_pmu, sibling))
1316c6f85cb4SMark Rutland 			return -EINVAL;
1317c6f85cb4SMark Rutland 	}
1318c6f85cb4SMark Rutland 
1319b1862199SSuzuki K. Poulose 	if (!validate_event(event->pmu, &fake_pmu, event))
1320c6f85cb4SMark Rutland 		return -EINVAL;
1321c6f85cb4SMark Rutland 
1322b91c8f28SPunit Agrawal 	return 0;
1323b91c8f28SPunit Agrawal }
1324b91c8f28SPunit Agrawal 
1325c6f85cb4SMark Rutland static int
1326c6f85cb4SMark Rutland __hw_perf_event_init(struct perf_event *event)
1327c6f85cb4SMark Rutland {
1328c6f85cb4SMark Rutland 	struct hw_perf_event *hwc = &event->hw;
1329c6f85cb4SMark Rutland 	int mapping;
1330c6f85cb4SMark Rutland 
1331c6f85cb4SMark Rutland 	mapping = pmu_map_event(event);
1332c6f85cb4SMark Rutland 
1333c6f85cb4SMark Rutland 	if (mapping < 0) {
1334c6f85cb4SMark Rutland 		pr_debug("event %x:%llx not supported\n", event->attr.type,
1335c6f85cb4SMark Rutland 			 event->attr.config);
1336c6f85cb4SMark Rutland 		return mapping;
1337b91c8f28SPunit Agrawal 	}
1338b91c8f28SPunit Agrawal 
1339c6f85cb4SMark Rutland 	/*
1340c6f85cb4SMark Rutland 	 * We don't assign an index until we actually place the event onto
1341c6f85cb4SMark Rutland 	 * hardware. Use -1 to signify that we haven't decided where to put it
1342c6f85cb4SMark Rutland 	 * yet.
1343c6f85cb4SMark Rutland 	 */
1344c6f85cb4SMark Rutland 	hwc->idx		= -1;
1345c6f85cb4SMark Rutland 	hwc->config_base	= 0;
1346c6f85cb4SMark Rutland 	hwc->config		= 0;
1347c6f85cb4SMark Rutland 	hwc->event_base		= 0;
1348b91c8f28SPunit Agrawal 
1349c6f85cb4SMark Rutland 	/*
1350c6f85cb4SMark Rutland 	 * Store the event encoding into the config_base field.
1351c6f85cb4SMark Rutland 	 */
1352c6f85cb4SMark Rutland 	hwc->config_base	    |= (unsigned long)mapping;
1353c6f85cb4SMark Rutland 
1354c6f85cb4SMark Rutland 	/*
1355c6f85cb4SMark Rutland 	 * Limit the sample_period to half of the counter width. That way, the
1356c6f85cb4SMark Rutland 	 * new counter value is far less likely to overtake the previous one
1357c6f85cb4SMark Rutland 	 * unless you have some serious IRQ latency issues.
1358c6f85cb4SMark Rutland 	 */
1359c6f85cb4SMark Rutland 	hwc->sample_period  = CCI_PMU_CNTR_MASK >> 1;
1360c6f85cb4SMark Rutland 	hwc->last_period    = hwc->sample_period;
1361c6f85cb4SMark Rutland 	local64_set(&hwc->period_left, hwc->sample_period);
1362c6f85cb4SMark Rutland 
1363c6f85cb4SMark Rutland 	if (event->group_leader != event) {
1364c6f85cb4SMark Rutland 		if (validate_group(event) != 0)
1365c6f85cb4SMark Rutland 			return -EINVAL;
1366b91c8f28SPunit Agrawal 	}
1367b91c8f28SPunit Agrawal 
1368c6f85cb4SMark Rutland 	return 0;
1369c6f85cb4SMark Rutland }
1370c6f85cb4SMark Rutland 
1371c6f85cb4SMark Rutland static int cci_pmu_event_init(struct perf_event *event)
1372b91c8f28SPunit Agrawal {
1373c6f85cb4SMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
1374c6f85cb4SMark Rutland 	atomic_t *active_events = &cci_pmu->active_events;
1375c6f85cb4SMark Rutland 	int err = 0;
1376c6f85cb4SMark Rutland 	int cpu;
1377c6f85cb4SMark Rutland 
1378c6f85cb4SMark Rutland 	if (event->attr.type != event->pmu->type)
1379c6f85cb4SMark Rutland 		return -ENOENT;
1380c6f85cb4SMark Rutland 
1381c6f85cb4SMark Rutland 	/* Shared by all CPUs, no meaningful state to sample */
1382c6f85cb4SMark Rutland 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1383c6f85cb4SMark Rutland 		return -EOPNOTSUPP;
1384c6f85cb4SMark Rutland 
1385c6f85cb4SMark Rutland 	/* We have no filtering of any kind */
1386c6f85cb4SMark Rutland 	if (event->attr.exclude_user	||
1387c6f85cb4SMark Rutland 	    event->attr.exclude_kernel	||
1388c6f85cb4SMark Rutland 	    event->attr.exclude_hv	||
1389c6f85cb4SMark Rutland 	    event->attr.exclude_idle	||
1390c6f85cb4SMark Rutland 	    event->attr.exclude_host	||
1391c6f85cb4SMark Rutland 	    event->attr.exclude_guest)
1392c6f85cb4SMark Rutland 		return -EINVAL;
1393c6f85cb4SMark Rutland 
1394c6f85cb4SMark Rutland 	/*
1395c6f85cb4SMark Rutland 	 * Following the example set by other "uncore" PMUs, we accept any CPU
1396c6f85cb4SMark Rutland 	 * and rewrite its affinity dynamically rather than having perf core
1397c6f85cb4SMark Rutland 	 * handle cpu == -1 and pid == -1 for this case.
1398c6f85cb4SMark Rutland 	 *
1399c6f85cb4SMark Rutland 	 * The perf core will pin online CPUs for the duration of this call and
1400c6f85cb4SMark Rutland 	 * the event being installed into its context, so the PMU's CPU can't
1401c6f85cb4SMark Rutland 	 * change under our feet.
1402c6f85cb4SMark Rutland 	 */
1403c6f85cb4SMark Rutland 	cpu = cpumask_first(&cci_pmu->cpus);
1404c6f85cb4SMark Rutland 	if (event->cpu < 0 || cpu < 0)
1405c6f85cb4SMark Rutland 		return -EINVAL;
1406c6f85cb4SMark Rutland 	event->cpu = cpu;
1407c6f85cb4SMark Rutland 
1408c6f85cb4SMark Rutland 	event->destroy = hw_perf_event_destroy;
1409c6f85cb4SMark Rutland 	if (!atomic_inc_not_zero(active_events)) {
1410c6f85cb4SMark Rutland 		mutex_lock(&cci_pmu->reserve_mutex);
1411c6f85cb4SMark Rutland 		if (atomic_read(active_events) == 0)
1412c6f85cb4SMark Rutland 			err = cci_pmu_get_hw(cci_pmu);
1413c6f85cb4SMark Rutland 		if (!err)
1414c6f85cb4SMark Rutland 			atomic_inc(active_events);
1415c6f85cb4SMark Rutland 		mutex_unlock(&cci_pmu->reserve_mutex);
1416c6f85cb4SMark Rutland 	}
1417c6f85cb4SMark Rutland 	if (err)
1418c6f85cb4SMark Rutland 		return err;
1419c6f85cb4SMark Rutland 
1420c6f85cb4SMark Rutland 	err = __hw_perf_event_init(event);
1421c6f85cb4SMark Rutland 	if (err)
1422c6f85cb4SMark Rutland 		hw_perf_event_destroy(event);
1423c6f85cb4SMark Rutland 
1424c6f85cb4SMark Rutland 	return err;
1425c6f85cb4SMark Rutland }
1426c6f85cb4SMark Rutland 
1427a1a076d7SSuzuki K. Poulose static ssize_t pmu_cpumask_attr_show(struct device *dev,
1428c6f85cb4SMark Rutland 				     struct device_attribute *attr, char *buf)
1429c6f85cb4SMark Rutland {
14305e442ebaSMark Rutland 	struct pmu *pmu = dev_get_drvdata(dev);
14315e442ebaSMark Rutland 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
1432a1a076d7SSuzuki K. Poulose 
1433660e5ec0STejun Heo 	int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
1434a1a076d7SSuzuki K. Poulose 			  cpumask_pr_args(&cci_pmu->cpus));
1435c6f85cb4SMark Rutland 	buf[n++] = '\n';
1436c6f85cb4SMark Rutland 	buf[n] = '\0';
1437c6f85cb4SMark Rutland 	return n;
1438c6f85cb4SMark Rutland }
1439c6f85cb4SMark Rutland 
14405e442ebaSMark Rutland static struct device_attribute pmu_cpumask_attr =
14415e442ebaSMark Rutland 	__ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
1442c6f85cb4SMark Rutland 
1443c6f85cb4SMark Rutland static struct attribute *pmu_attrs[] = {
14445e442ebaSMark Rutland 	&pmu_cpumask_attr.attr,
1445c6f85cb4SMark Rutland 	NULL,
1446c6f85cb4SMark Rutland };
1447c6f85cb4SMark Rutland 
1448c6f85cb4SMark Rutland static struct attribute_group pmu_attr_group = {
1449c6f85cb4SMark Rutland 	.attrs = pmu_attrs,
1450c6f85cb4SMark Rutland };
1451c6f85cb4SMark Rutland 
1452e14cfad3SSuzuki K. Poulose static struct attribute_group pmu_format_attr_group = {
1453e14cfad3SSuzuki K. Poulose 	.name = "format",
1454e14cfad3SSuzuki K. Poulose 	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
1455e14cfad3SSuzuki K. Poulose };
1456e14cfad3SSuzuki K. Poulose 
1457e14cfad3SSuzuki K. Poulose static struct attribute_group pmu_event_attr_group = {
1458e14cfad3SSuzuki K. Poulose 	.name = "events",
1459e14cfad3SSuzuki K. Poulose 	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
1460e14cfad3SSuzuki K. Poulose };
1461e14cfad3SSuzuki K. Poulose 
1462c6f85cb4SMark Rutland static const struct attribute_group *pmu_attr_groups[] = {
1463c6f85cb4SMark Rutland 	&pmu_attr_group,
1464e14cfad3SSuzuki K. Poulose 	&pmu_format_attr_group,
1465e14cfad3SSuzuki K. Poulose 	&pmu_event_attr_group,
1466c6f85cb4SMark Rutland 	NULL
1467c6f85cb4SMark Rutland };
1468c6f85cb4SMark Rutland 
1469c6f85cb4SMark Rutland static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1470c6f85cb4SMark Rutland {
14715e442ebaSMark Rutland 	const struct cci_pmu_model *model = cci_pmu->model;
14725e442ebaSMark Rutland 	char *name = model->name;
1473ab5b316dSSuzuki K. Poulose 	u32 num_cntrs;
1474a1a076d7SSuzuki K. Poulose 
14755e442ebaSMark Rutland 	pmu_event_attr_group.attrs = model->event_attrs;
14765e442ebaSMark Rutland 	pmu_format_attr_group.attrs = model->format_attrs;
1477e14cfad3SSuzuki K. Poulose 
1478c6f85cb4SMark Rutland 	cci_pmu->pmu = (struct pmu) {
1479fc17c839SSuzuki K. Poulose 		.name		= cci_pmu->model->name,
1480c6f85cb4SMark Rutland 		.task_ctx_nr	= perf_invalid_context,
1481c6f85cb4SMark Rutland 		.pmu_enable	= cci_pmu_enable,
1482c6f85cb4SMark Rutland 		.pmu_disable	= cci_pmu_disable,
1483c6f85cb4SMark Rutland 		.event_init	= cci_pmu_event_init,
1484c6f85cb4SMark Rutland 		.add		= cci_pmu_add,
1485c6f85cb4SMark Rutland 		.del		= cci_pmu_del,
1486c6f85cb4SMark Rutland 		.start		= cci_pmu_start,
1487c6f85cb4SMark Rutland 		.stop		= cci_pmu_stop,
1488c6f85cb4SMark Rutland 		.read		= pmu_read,
1489c6f85cb4SMark Rutland 		.attr_groups	= pmu_attr_groups,
1490b91c8f28SPunit Agrawal 	};
1491b91c8f28SPunit Agrawal 
1492b91c8f28SPunit Agrawal 	cci_pmu->plat_device = pdev;
1493ab5b316dSSuzuki K. Poulose 	num_cntrs = pmu_get_max_counters();
1494ab5b316dSSuzuki K. Poulose 	if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1495ab5b316dSSuzuki K. Poulose 		dev_warn(&pdev->dev,
1496ab5b316dSSuzuki K. Poulose 			"PMU implements more counters(%d) than supported by"
1497ab5b316dSSuzuki K. Poulose 			" the model(%d), truncated.",
1498ab5b316dSSuzuki K. Poulose 			num_cntrs, cci_pmu->model->num_hw_cntrs);
1499ab5b316dSSuzuki K. Poulose 		num_cntrs = cci_pmu->model->num_hw_cntrs;
1500ab5b316dSSuzuki K. Poulose 	}
1501ab5b316dSSuzuki K. Poulose 	cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
1502b91c8f28SPunit Agrawal 
1503c6f85cb4SMark Rutland 	return perf_pmu_register(&cci_pmu->pmu, name, -1);
1504b91c8f28SPunit Agrawal }
1505b91c8f28SPunit Agrawal 
1506b230f0dbSSebastian Andrzej Siewior static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1507c6f85cb4SMark Rutland {
1508b230f0dbSSebastian Andrzej Siewior 	struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
1509c6f85cb4SMark Rutland 	unsigned int target;
1510c6f85cb4SMark Rutland 
1511a1a076d7SSuzuki K. Poulose 	if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
1512b230f0dbSSebastian Andrzej Siewior 		return 0;
1513c6f85cb4SMark Rutland 	target = cpumask_any_but(cpu_online_mask, cpu);
151428c94843SSebastian Andrzej Siewior 	if (target >= nr_cpu_ids)
1515b230f0dbSSebastian Andrzej Siewior 		return 0;
1516c6f85cb4SMark Rutland 	/*
1517c6f85cb4SMark Rutland 	 * TODO: migrate context once core races on event->ctx have
1518c6f85cb4SMark Rutland 	 * been fixed.
1519c6f85cb4SMark Rutland 	 */
1520a1a076d7SSuzuki K. Poulose 	cpumask_set_cpu(target, &cci_pmu->cpus);
152128c94843SSebastian Andrzej Siewior 	return 0;
1522c6f85cb4SMark Rutland }
1523c6f85cb4SMark Rutland 
1524fc17c839SSuzuki K. Poulose static struct cci_pmu_model cci_pmu_models[] = {
1525f4d58938SSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PMU
1526f4d58938SSuzuki K. Poulose 	[CCI400_R0] = {
1527fc17c839SSuzuki K. Poulose 		.name = "CCI_400",
1528ab5b316dSSuzuki K. Poulose 		.fixed_hw_cntrs = 1,	/* Cycle counter */
1529ab5b316dSSuzuki K. Poulose 		.num_hw_cntrs = 4,
1530ab5b316dSSuzuki K. Poulose 		.cntr_size = SZ_4K,
1531e14cfad3SSuzuki K. Poulose 		.format_attrs = cci400_pmu_format_attrs,
1532e14cfad3SSuzuki K. Poulose 		.event_attrs = cci400_r0_pmu_event_attrs,
1533fc17c839SSuzuki K. Poulose 		.event_ranges = {
1534fc17c839SSuzuki K. Poulose 			[CCI_IF_SLAVE] = {
1535f4d58938SSuzuki K. Poulose 				CCI400_R0_SLAVE_PORT_MIN_EV,
1536f4d58938SSuzuki K. Poulose 				CCI400_R0_SLAVE_PORT_MAX_EV,
1537fc17c839SSuzuki K. Poulose 			},
1538fc17c839SSuzuki K. Poulose 			[CCI_IF_MASTER] = {
1539f4d58938SSuzuki K. Poulose 				CCI400_R0_MASTER_PORT_MIN_EV,
1540f4d58938SSuzuki K. Poulose 				CCI400_R0_MASTER_PORT_MAX_EV,
1541fc17c839SSuzuki K. Poulose 			},
1542fc17c839SSuzuki K. Poulose 		},
154331216290SSuzuki K. Poulose 		.validate_hw_event = cci400_validate_hw_event,
154431216290SSuzuki K. Poulose 		.get_event_idx = cci400_get_event_idx,
1545fc17c839SSuzuki K. Poulose 	},
1546f4d58938SSuzuki K. Poulose 	[CCI400_R1] = {
1547fc17c839SSuzuki K. Poulose 		.name = "CCI_400_r1",
1548ab5b316dSSuzuki K. Poulose 		.fixed_hw_cntrs = 1,	/* Cycle counter */
1549ab5b316dSSuzuki K. Poulose 		.num_hw_cntrs = 4,
1550ab5b316dSSuzuki K. Poulose 		.cntr_size = SZ_4K,
1551e14cfad3SSuzuki K. Poulose 		.format_attrs = cci400_pmu_format_attrs,
1552e14cfad3SSuzuki K. Poulose 		.event_attrs = cci400_r1_pmu_event_attrs,
1553fc17c839SSuzuki K. Poulose 		.event_ranges = {
1554fc17c839SSuzuki K. Poulose 			[CCI_IF_SLAVE] = {
1555f4d58938SSuzuki K. Poulose 				CCI400_R1_SLAVE_PORT_MIN_EV,
1556f4d58938SSuzuki K. Poulose 				CCI400_R1_SLAVE_PORT_MAX_EV,
1557fc17c839SSuzuki K. Poulose 			},
1558fc17c839SSuzuki K. Poulose 			[CCI_IF_MASTER] = {
1559f4d58938SSuzuki K. Poulose 				CCI400_R1_MASTER_PORT_MIN_EV,
1560f4d58938SSuzuki K. Poulose 				CCI400_R1_MASTER_PORT_MAX_EV,
1561fc17c839SSuzuki K. Poulose 			},
1562fc17c839SSuzuki K. Poulose 		},
156331216290SSuzuki K. Poulose 		.validate_hw_event = cci400_validate_hw_event,
156431216290SSuzuki K. Poulose 		.get_event_idx = cci400_get_event_idx,
1565fc17c839SSuzuki K. Poulose 	},
1566f4d58938SSuzuki K. Poulose #endif
15673d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
1568a95791efSSuzuki K. Poulose 	[CCI500_R0] = {
1569a95791efSSuzuki K. Poulose 		.name = "CCI_500",
1570a95791efSSuzuki K. Poulose 		.fixed_hw_cntrs = 0,
1571a95791efSSuzuki K. Poulose 		.num_hw_cntrs = 8,
1572a95791efSSuzuki K. Poulose 		.cntr_size = SZ_64K,
15733d2e8701SSuzuki K Poulose 		.format_attrs = cci5xx_pmu_format_attrs,
15743d2e8701SSuzuki K Poulose 		.event_attrs = cci5xx_pmu_event_attrs,
1575a95791efSSuzuki K. Poulose 		.event_ranges = {
1576a95791efSSuzuki K. Poulose 			[CCI_IF_SLAVE] = {
15773d2e8701SSuzuki K Poulose 				CCI5xx_SLAVE_PORT_MIN_EV,
15783d2e8701SSuzuki K Poulose 				CCI5xx_SLAVE_PORT_MAX_EV,
1579a95791efSSuzuki K. Poulose 			},
1580a95791efSSuzuki K. Poulose 			[CCI_IF_MASTER] = {
15813d2e8701SSuzuki K Poulose 				CCI5xx_MASTER_PORT_MIN_EV,
15823d2e8701SSuzuki K Poulose 				CCI5xx_MASTER_PORT_MAX_EV,
1583a95791efSSuzuki K. Poulose 			},
1584a95791efSSuzuki K. Poulose 			[CCI_IF_GLOBAL] = {
15853d2e8701SSuzuki K Poulose 				CCI5xx_GLOBAL_PORT_MIN_EV,
15863d2e8701SSuzuki K Poulose 				CCI5xx_GLOBAL_PORT_MAX_EV,
1587a95791efSSuzuki K. Poulose 			},
1588a95791efSSuzuki K. Poulose 		},
1589a95791efSSuzuki K. Poulose 		.validate_hw_event = cci500_validate_hw_event,
15903d2e8701SSuzuki K Poulose 		.write_counters	= cci5xx_pmu_write_counters,
1591a95791efSSuzuki K. Poulose 	},
1592d7dd5fd7SSuzuki K Poulose 	[CCI550_R0] = {
1593d7dd5fd7SSuzuki K Poulose 		.name = "CCI_550",
1594d7dd5fd7SSuzuki K Poulose 		.fixed_hw_cntrs = 0,
1595d7dd5fd7SSuzuki K Poulose 		.num_hw_cntrs = 8,
1596d7dd5fd7SSuzuki K Poulose 		.cntr_size = SZ_64K,
1597d7dd5fd7SSuzuki K Poulose 		.format_attrs = cci5xx_pmu_format_attrs,
1598d7dd5fd7SSuzuki K Poulose 		.event_attrs = cci5xx_pmu_event_attrs,
1599d7dd5fd7SSuzuki K Poulose 		.event_ranges = {
1600d7dd5fd7SSuzuki K Poulose 			[CCI_IF_SLAVE] = {
1601d7dd5fd7SSuzuki K Poulose 				CCI5xx_SLAVE_PORT_MIN_EV,
1602d7dd5fd7SSuzuki K Poulose 				CCI5xx_SLAVE_PORT_MAX_EV,
1603d7dd5fd7SSuzuki K Poulose 			},
1604d7dd5fd7SSuzuki K Poulose 			[CCI_IF_MASTER] = {
1605d7dd5fd7SSuzuki K Poulose 				CCI5xx_MASTER_PORT_MIN_EV,
1606d7dd5fd7SSuzuki K Poulose 				CCI5xx_MASTER_PORT_MAX_EV,
1607d7dd5fd7SSuzuki K Poulose 			},
1608d7dd5fd7SSuzuki K Poulose 			[CCI_IF_GLOBAL] = {
1609d7dd5fd7SSuzuki K Poulose 				CCI5xx_GLOBAL_PORT_MIN_EV,
1610d7dd5fd7SSuzuki K Poulose 				CCI5xx_GLOBAL_PORT_MAX_EV,
1611d7dd5fd7SSuzuki K Poulose 			},
1612d7dd5fd7SSuzuki K Poulose 		},
1613d7dd5fd7SSuzuki K Poulose 		.validate_hw_event = cci550_validate_hw_event,
1614d7dd5fd7SSuzuki K Poulose 		.write_counters	= cci5xx_pmu_write_counters,
1615d7dd5fd7SSuzuki K Poulose 	},
1616a95791efSSuzuki K. Poulose #endif
1617fc17c839SSuzuki K. Poulose };
1618fc17c839SSuzuki K. Poulose 
1619b91c8f28SPunit Agrawal static const struct of_device_id arm_cci_pmu_matches[] = {
1620f4d58938SSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PMU
1621b91c8f28SPunit Agrawal 	{
1622b91c8f28SPunit Agrawal 		.compatible = "arm,cci-400-pmu",
1623772742a6SSuzuki K. Poulose 		.data	= NULL,
1624772742a6SSuzuki K. Poulose 	},
1625772742a6SSuzuki K. Poulose 	{
1626772742a6SSuzuki K. Poulose 		.compatible = "arm,cci-400-pmu,r0",
1627f4d58938SSuzuki K. Poulose 		.data	= &cci_pmu_models[CCI400_R0],
1628772742a6SSuzuki K. Poulose 	},
1629772742a6SSuzuki K. Poulose 	{
1630772742a6SSuzuki K. Poulose 		.compatible = "arm,cci-400-pmu,r1",
1631f4d58938SSuzuki K. Poulose 		.data	= &cci_pmu_models[CCI400_R1],
1632b91c8f28SPunit Agrawal 	},
1633f4d58938SSuzuki K. Poulose #endif
16343d2e8701SSuzuki K Poulose #ifdef CONFIG_ARM_CCI5xx_PMU
1635a95791efSSuzuki K. Poulose 	{
1636a95791efSSuzuki K. Poulose 		.compatible = "arm,cci-500-pmu,r0",
1637a95791efSSuzuki K. Poulose 		.data = &cci_pmu_models[CCI500_R0],
1638a95791efSSuzuki K. Poulose 	},
1639d7dd5fd7SSuzuki K Poulose 	{
1640d7dd5fd7SSuzuki K Poulose 		.compatible = "arm,cci-550-pmu,r0",
1641d7dd5fd7SSuzuki K Poulose 		.data = &cci_pmu_models[CCI550_R0],
1642d7dd5fd7SSuzuki K Poulose 	},
1643a95791efSSuzuki K. Poulose #endif
1644b91c8f28SPunit Agrawal 	{},
1645b91c8f28SPunit Agrawal };
1646b91c8f28SPunit Agrawal 
1647fc17c839SSuzuki K. Poulose static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
1648fc17c839SSuzuki K. Poulose {
1649fc17c839SSuzuki K. Poulose 	const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
1650fc17c839SSuzuki K. Poulose 							pdev->dev.of_node);
1651fc17c839SSuzuki K. Poulose 	if (!match)
1652fc17c839SSuzuki K. Poulose 		return NULL;
1653772742a6SSuzuki K. Poulose 	if (match->data)
1654772742a6SSuzuki K. Poulose 		return match->data;
1655fc17c839SSuzuki K. Poulose 
1656772742a6SSuzuki K. Poulose 	dev_warn(&pdev->dev, "DEPRECATED compatible property,"
1657772742a6SSuzuki K. Poulose 			 "requires secure access to CCI registers");
1658fc17c839SSuzuki K. Poulose 	return probe_cci_model(pdev);
1659fc17c839SSuzuki K. Poulose }
1660fc17c839SSuzuki K. Poulose 
1661f6b9e83cSSuzuki K. Poulose static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1662f6b9e83cSSuzuki K. Poulose {
1663f6b9e83cSSuzuki K. Poulose 	int i;
1664f6b9e83cSSuzuki K. Poulose 
1665f6b9e83cSSuzuki K. Poulose 	for (i = 0; i < nr_irqs; i++)
1666f6b9e83cSSuzuki K. Poulose 		if (irq == irqs[i])
1667f6b9e83cSSuzuki K. Poulose 			return true;
1668f6b9e83cSSuzuki K. Poulose 
1669f6b9e83cSSuzuki K. Poulose 	return false;
1670f6b9e83cSSuzuki K. Poulose }
1671f6b9e83cSSuzuki K. Poulose 
1672ab5b316dSSuzuki K. Poulose static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev)
1673ab5b316dSSuzuki K. Poulose {
1674ab5b316dSSuzuki K. Poulose 	struct cci_pmu *cci_pmu;
1675ab5b316dSSuzuki K. Poulose 	const struct cci_pmu_model *model;
1676ab5b316dSSuzuki K. Poulose 
1677ab5b316dSSuzuki K. Poulose 	/*
1678ab5b316dSSuzuki K. Poulose 	 * All allocations are devm_* hence we don't have to free
1679ab5b316dSSuzuki K. Poulose 	 * them explicitly on an error, as it would end up in driver
1680ab5b316dSSuzuki K. Poulose 	 * detach.
1681ab5b316dSSuzuki K. Poulose 	 */
1682ab5b316dSSuzuki K. Poulose 	model = get_cci_model(pdev);
1683ab5b316dSSuzuki K. Poulose 	if (!model) {
1684ab5b316dSSuzuki K. Poulose 		dev_warn(&pdev->dev, "CCI PMU version not supported\n");
1685ab5b316dSSuzuki K. Poulose 		return ERR_PTR(-ENODEV);
1686ab5b316dSSuzuki K. Poulose 	}
1687ab5b316dSSuzuki K. Poulose 
1688ab5b316dSSuzuki K. Poulose 	cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL);
1689ab5b316dSSuzuki K. Poulose 	if (!cci_pmu)
1690ab5b316dSSuzuki K. Poulose 		return ERR_PTR(-ENOMEM);
1691ab5b316dSSuzuki K. Poulose 
1692ab5b316dSSuzuki K. Poulose 	cci_pmu->model = model;
1693ab5b316dSSuzuki K. Poulose 	cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model),
1694ab5b316dSSuzuki K. Poulose 					sizeof(*cci_pmu->irqs), GFP_KERNEL);
1695ab5b316dSSuzuki K. Poulose 	if (!cci_pmu->irqs)
1696ab5b316dSSuzuki K. Poulose 		return ERR_PTR(-ENOMEM);
1697ab5b316dSSuzuki K. Poulose 	cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev,
1698ab5b316dSSuzuki K. Poulose 					     CCI_PMU_MAX_HW_CNTRS(model),
1699ab5b316dSSuzuki K. Poulose 					     sizeof(*cci_pmu->hw_events.events),
1700ab5b316dSSuzuki K. Poulose 					     GFP_KERNEL);
1701ab5b316dSSuzuki K. Poulose 	if (!cci_pmu->hw_events.events)
1702ab5b316dSSuzuki K. Poulose 		return ERR_PTR(-ENOMEM);
1703ab5b316dSSuzuki K. Poulose 	cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev,
1704ab5b316dSSuzuki K. Poulose 						BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
1705ab5b316dSSuzuki K. Poulose 						sizeof(*cci_pmu->hw_events.used_mask),
1706ab5b316dSSuzuki K. Poulose 						GFP_KERNEL);
1707ab5b316dSSuzuki K. Poulose 	if (!cci_pmu->hw_events.used_mask)
1708ab5b316dSSuzuki K. Poulose 		return ERR_PTR(-ENOMEM);
1709ab5b316dSSuzuki K. Poulose 
1710ab5b316dSSuzuki K. Poulose 	return cci_pmu;
1711ab5b316dSSuzuki K. Poulose }
1712ab5b316dSSuzuki K. Poulose 
1713ab5b316dSSuzuki K. Poulose 
1714b91c8f28SPunit Agrawal static int cci_pmu_probe(struct platform_device *pdev)
1715b91c8f28SPunit Agrawal {
1716b91c8f28SPunit Agrawal 	struct resource *res;
1717a1a076d7SSuzuki K. Poulose 	struct cci_pmu *cci_pmu;
1718b91c8f28SPunit Agrawal 	int i, ret, irq;
1719fc17c839SSuzuki K. Poulose 
1720ab5b316dSSuzuki K. Poulose 	cci_pmu = cci_pmu_alloc(pdev);
1721ab5b316dSSuzuki K. Poulose 	if (IS_ERR(cci_pmu))
1722ab5b316dSSuzuki K. Poulose 		return PTR_ERR(cci_pmu);
1723b91c8f28SPunit Agrawal 
1724b91c8f28SPunit Agrawal 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1725a1a076d7SSuzuki K. Poulose 	cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
1726a1a076d7SSuzuki K. Poulose 	if (IS_ERR(cci_pmu->base))
1727fee4f2c6SWei Yongjun 		return -ENOMEM;
1728b91c8f28SPunit Agrawal 
1729b91c8f28SPunit Agrawal 	/*
1730ab5b316dSSuzuki K. Poulose 	 * CCI PMU has one overflow interrupt per counter; but some may be tied
1731b91c8f28SPunit Agrawal 	 * together to a common interrupt.
1732b91c8f28SPunit Agrawal 	 */
1733a1a076d7SSuzuki K. Poulose 	cci_pmu->nr_irqs = 0;
1734ab5b316dSSuzuki K. Poulose 	for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
1735b91c8f28SPunit Agrawal 		irq = platform_get_irq(pdev, i);
1736b91c8f28SPunit Agrawal 		if (irq < 0)
1737b91c8f28SPunit Agrawal 			break;
1738b91c8f28SPunit Agrawal 
1739a1a076d7SSuzuki K. Poulose 		if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
1740b91c8f28SPunit Agrawal 			continue;
1741b91c8f28SPunit Agrawal 
1742a1a076d7SSuzuki K. Poulose 		cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
1743b91c8f28SPunit Agrawal 	}
1744b91c8f28SPunit Agrawal 
1745b91c8f28SPunit Agrawal 	/*
1746b91c8f28SPunit Agrawal 	 * Ensure that the device tree has as many interrupts as the number
1747b91c8f28SPunit Agrawal 	 * of counters.
1748b91c8f28SPunit Agrawal 	 */
1749ab5b316dSSuzuki K. Poulose 	if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
1750b91c8f28SPunit Agrawal 		dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
1751ab5b316dSSuzuki K. Poulose 			i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
1752fee4f2c6SWei Yongjun 		return -EINVAL;
1753b91c8f28SPunit Agrawal 	}
1754b91c8f28SPunit Agrawal 
1755a1a076d7SSuzuki K. Poulose 	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1756a1a076d7SSuzuki K. Poulose 	mutex_init(&cci_pmu->reserve_mutex);
1757a1a076d7SSuzuki K. Poulose 	atomic_set(&cci_pmu->active_events, 0);
1758a1a076d7SSuzuki K. Poulose 	cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
1759b91c8f28SPunit Agrawal 
176028c94843SSebastian Andrzej Siewior 	ret = cci_pmu_init(cci_pmu, pdev);
1761c6f85cb4SMark Rutland 	if (ret)
1762c6f85cb4SMark Rutland 		return ret;
1763c6f85cb4SMark Rutland 
1764b230f0dbSSebastian Andrzej Siewior 	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1765b230f0dbSSebastian Andrzej Siewior 					 &cci_pmu->node);
1766a1a076d7SSuzuki K. Poulose 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1767b91c8f28SPunit Agrawal 	return 0;
1768b91c8f28SPunit Agrawal }
1769b91c8f28SPunit Agrawal 
1770b91c8f28SPunit Agrawal static int cci_platform_probe(struct platform_device *pdev)
1771b91c8f28SPunit Agrawal {
1772b91c8f28SPunit Agrawal 	if (!cci_probed())
1773b91c8f28SPunit Agrawal 		return -ENODEV;
1774b91c8f28SPunit Agrawal 
1775b91c8f28SPunit Agrawal 	return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1776b91c8f28SPunit Agrawal }
1777b91c8f28SPunit Agrawal 
1778f6b9e83cSSuzuki K. Poulose static struct platform_driver cci_pmu_driver = {
1779f6b9e83cSSuzuki K. Poulose 	.driver = {
1780f6b9e83cSSuzuki K. Poulose 		   .name = DRIVER_NAME_PMU,
1781f6b9e83cSSuzuki K. Poulose 		   .of_match_table = arm_cci_pmu_matches,
1782f6b9e83cSSuzuki K. Poulose 		  },
1783f6b9e83cSSuzuki K. Poulose 	.probe = cci_pmu_probe,
1784f6b9e83cSSuzuki K. Poulose };
1785f6b9e83cSSuzuki K. Poulose 
1786f6b9e83cSSuzuki K. Poulose static struct platform_driver cci_platform_driver = {
1787f6b9e83cSSuzuki K. Poulose 	.driver = {
1788f6b9e83cSSuzuki K. Poulose 		   .name = DRIVER_NAME,
1789f6b9e83cSSuzuki K. Poulose 		   .of_match_table = arm_cci_matches,
1790f6b9e83cSSuzuki K. Poulose 		  },
1791f6b9e83cSSuzuki K. Poulose 	.probe = cci_platform_probe,
1792f6b9e83cSSuzuki K. Poulose };
1793f6b9e83cSSuzuki K. Poulose 
1794f6b9e83cSSuzuki K. Poulose static int __init cci_platform_init(void)
1795f6b9e83cSSuzuki K. Poulose {
1796f6b9e83cSSuzuki K. Poulose 	int ret;
1797f6b9e83cSSuzuki K. Poulose 
1798b230f0dbSSebastian Andrzej Siewior 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
179928c94843SSebastian Andrzej Siewior 				      "AP_PERF_ARM_CCI_ONLINE", NULL,
180028c94843SSebastian Andrzej Siewior 				      cci_pmu_offline_cpu);
180128c94843SSebastian Andrzej Siewior 	if (ret)
180228c94843SSebastian Andrzej Siewior 		return ret;
180328c94843SSebastian Andrzej Siewior 
1804f6b9e83cSSuzuki K. Poulose 	ret = platform_driver_register(&cci_pmu_driver);
1805f6b9e83cSSuzuki K. Poulose 	if (ret)
1806f6b9e83cSSuzuki K. Poulose 		return ret;
1807f6b9e83cSSuzuki K. Poulose 
1808f6b9e83cSSuzuki K. Poulose 	return platform_driver_register(&cci_platform_driver);
1809f6b9e83cSSuzuki K. Poulose }
1810f6b9e83cSSuzuki K. Poulose 
1811f4d58938SSuzuki K. Poulose #else /* !CONFIG_ARM_CCI_PMU */
1812f6b9e83cSSuzuki K. Poulose 
1813f6b9e83cSSuzuki K. Poulose static int __init cci_platform_init(void)
1814f6b9e83cSSuzuki K. Poulose {
1815f6b9e83cSSuzuki K. Poulose 	return 0;
1816f6b9e83cSSuzuki K. Poulose }
1817f6b9e83cSSuzuki K. Poulose 
1818f4d58938SSuzuki K. Poulose #endif /* CONFIG_ARM_CCI_PMU */
1819ee8e5d5fSSuzuki K. Poulose 
1820ee8e5d5fSSuzuki K. Poulose #ifdef CONFIG_ARM_CCI400_PORT_CTRL
1821b91c8f28SPunit Agrawal 
1822f6b9e83cSSuzuki K. Poulose #define CCI_PORT_CTRL		0x0
1823f6b9e83cSSuzuki K. Poulose #define CCI_CTRL_STATUS		0xc
1824f6b9e83cSSuzuki K. Poulose 
1825f6b9e83cSSuzuki K. Poulose #define CCI_ENABLE_SNOOP_REQ	0x1
1826f6b9e83cSSuzuki K. Poulose #define CCI_ENABLE_DVM_REQ	0x2
1827f6b9e83cSSuzuki K. Poulose #define CCI_ENABLE_REQ		(CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1828f6b9e83cSSuzuki K. Poulose 
1829f6b9e83cSSuzuki K. Poulose enum cci_ace_port_type {
1830f6b9e83cSSuzuki K. Poulose 	ACE_INVALID_PORT = 0x0,
1831f6b9e83cSSuzuki K. Poulose 	ACE_PORT,
1832f6b9e83cSSuzuki K. Poulose 	ACE_LITE_PORT,
1833f6b9e83cSSuzuki K. Poulose };
1834f6b9e83cSSuzuki K. Poulose 
1835f6b9e83cSSuzuki K. Poulose struct cci_ace_port {
1836f6b9e83cSSuzuki K. Poulose 	void __iomem *base;
1837f6b9e83cSSuzuki K. Poulose 	unsigned long phys;
1838f6b9e83cSSuzuki K. Poulose 	enum cci_ace_port_type type;
1839f6b9e83cSSuzuki K. Poulose 	struct device_node *dn;
1840f6b9e83cSSuzuki K. Poulose };
1841f6b9e83cSSuzuki K. Poulose 
1842f6b9e83cSSuzuki K. Poulose static struct cci_ace_port *ports;
1843f6b9e83cSSuzuki K. Poulose static unsigned int nb_cci_ports;
1844f6b9e83cSSuzuki K. Poulose 
1845ed69bdd8SLorenzo Pieralisi struct cpu_port {
1846ed69bdd8SLorenzo Pieralisi 	u64 mpidr;
1847ed69bdd8SLorenzo Pieralisi 	u32 port;
1848ed69bdd8SLorenzo Pieralisi };
184962158f81SNicolas Pitre 
1850ed69bdd8SLorenzo Pieralisi /*
1851ed69bdd8SLorenzo Pieralisi  * Use the port MSB as valid flag, shift can be made dynamic
1852ed69bdd8SLorenzo Pieralisi  * by computing number of bits required for port indexes.
1853ed69bdd8SLorenzo Pieralisi  * Code disabling CCI cpu ports runs with D-cache invalidated
1854ed69bdd8SLorenzo Pieralisi  * and SCTLR bit clear so data accesses must be kept to a minimum
1855ed69bdd8SLorenzo Pieralisi  * to improve performance; for now shift is left static to
1856ed69bdd8SLorenzo Pieralisi  * avoid one more data access while disabling the CCI port.
1857ed69bdd8SLorenzo Pieralisi  */
1858ed69bdd8SLorenzo Pieralisi #define PORT_VALID_SHIFT	31
1859ed69bdd8SLorenzo Pieralisi #define PORT_VALID		(0x1 << PORT_VALID_SHIFT)
1860ed69bdd8SLorenzo Pieralisi 
1861ed69bdd8SLorenzo Pieralisi static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
1862ed69bdd8SLorenzo Pieralisi {
1863ed69bdd8SLorenzo Pieralisi 	port->port = PORT_VALID | index;
1864ed69bdd8SLorenzo Pieralisi 	port->mpidr = mpidr;
1865ed69bdd8SLorenzo Pieralisi }
1866ed69bdd8SLorenzo Pieralisi 
1867ed69bdd8SLorenzo Pieralisi static inline bool cpu_port_is_valid(struct cpu_port *port)
1868ed69bdd8SLorenzo Pieralisi {
1869ed69bdd8SLorenzo Pieralisi 	return !!(port->port & PORT_VALID);
1870ed69bdd8SLorenzo Pieralisi }
1871ed69bdd8SLorenzo Pieralisi 
1872ed69bdd8SLorenzo Pieralisi static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
1873ed69bdd8SLorenzo Pieralisi {
1874ed69bdd8SLorenzo Pieralisi 	return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
1875ed69bdd8SLorenzo Pieralisi }
1876ed69bdd8SLorenzo Pieralisi 
1877ed69bdd8SLorenzo Pieralisi static struct cpu_port cpu_port[NR_CPUS];
1878ed69bdd8SLorenzo Pieralisi 
1879ed69bdd8SLorenzo Pieralisi /**
1880ed69bdd8SLorenzo Pieralisi  * __cci_ace_get_port - Function to retrieve the port index connected to
1881ed69bdd8SLorenzo Pieralisi  *			a cpu or device.
1882ed69bdd8SLorenzo Pieralisi  *
1883ed69bdd8SLorenzo Pieralisi  * @dn: device node of the device to look-up
1884ed69bdd8SLorenzo Pieralisi  * @type: port type
1885ed69bdd8SLorenzo Pieralisi  *
1886ed69bdd8SLorenzo Pieralisi  * Return value:
1887ed69bdd8SLorenzo Pieralisi  *	- CCI port index if success
1888ed69bdd8SLorenzo Pieralisi  *	- -ENODEV if failure
1889ed69bdd8SLorenzo Pieralisi  */
1890ed69bdd8SLorenzo Pieralisi static int __cci_ace_get_port(struct device_node *dn, int type)
1891ed69bdd8SLorenzo Pieralisi {
1892ed69bdd8SLorenzo Pieralisi 	int i;
1893ed69bdd8SLorenzo Pieralisi 	bool ace_match;
1894ed69bdd8SLorenzo Pieralisi 	struct device_node *cci_portn;
1895ed69bdd8SLorenzo Pieralisi 
1896ed69bdd8SLorenzo Pieralisi 	cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
1897ed69bdd8SLorenzo Pieralisi 	for (i = 0; i < nb_cci_ports; i++) {
1898ed69bdd8SLorenzo Pieralisi 		ace_match = ports[i].type == type;
1899ed69bdd8SLorenzo Pieralisi 		if (ace_match && cci_portn == ports[i].dn)
1900ed69bdd8SLorenzo Pieralisi 			return i;
1901ed69bdd8SLorenzo Pieralisi 	}
1902ed69bdd8SLorenzo Pieralisi 	return -ENODEV;
1903ed69bdd8SLorenzo Pieralisi }
1904ed69bdd8SLorenzo Pieralisi 
1905ed69bdd8SLorenzo Pieralisi int cci_ace_get_port(struct device_node *dn)
1906ed69bdd8SLorenzo Pieralisi {
1907ed69bdd8SLorenzo Pieralisi 	return __cci_ace_get_port(dn, ACE_LITE_PORT);
1908ed69bdd8SLorenzo Pieralisi }
1909ed69bdd8SLorenzo Pieralisi EXPORT_SYMBOL_GPL(cci_ace_get_port);
1910ed69bdd8SLorenzo Pieralisi 
1911b91c8f28SPunit Agrawal static void cci_ace_init_ports(void)
1912ed69bdd8SLorenzo Pieralisi {
191378b4d6e0SSudeep KarkadaNagesha 	int port, cpu;
191478b4d6e0SSudeep KarkadaNagesha 	struct device_node *cpun;
1915ed69bdd8SLorenzo Pieralisi 
1916ed69bdd8SLorenzo Pieralisi 	/*
1917ed69bdd8SLorenzo Pieralisi 	 * Port index look-up speeds up the function disabling ports by CPU,
1918ed69bdd8SLorenzo Pieralisi 	 * since the logical to port index mapping is done once and does
1919ed69bdd8SLorenzo Pieralisi 	 * not change after system boot.
1920ed69bdd8SLorenzo Pieralisi 	 * The stashed index array is initialized for all possible CPUs
1921ed69bdd8SLorenzo Pieralisi 	 * at probe time.
1922ed69bdd8SLorenzo Pieralisi 	 */
192378b4d6e0SSudeep KarkadaNagesha 	for_each_possible_cpu(cpu) {
192478b4d6e0SSudeep KarkadaNagesha 		/* too early to use cpu->of_node */
192578b4d6e0SSudeep KarkadaNagesha 		cpun = of_get_cpu_node(cpu, NULL);
192678b4d6e0SSudeep KarkadaNagesha 
192778b4d6e0SSudeep KarkadaNagesha 		if (WARN(!cpun, "Missing cpu device node\n"))
1928ed69bdd8SLorenzo Pieralisi 			continue;
1929ed69bdd8SLorenzo Pieralisi 
1930ed69bdd8SLorenzo Pieralisi 		port = __cci_ace_get_port(cpun, ACE_PORT);
1931ed69bdd8SLorenzo Pieralisi 		if (port < 0)
1932ed69bdd8SLorenzo Pieralisi 			continue;
1933ed69bdd8SLorenzo Pieralisi 
1934ed69bdd8SLorenzo Pieralisi 		init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
1935ed69bdd8SLorenzo Pieralisi 	}
1936ed69bdd8SLorenzo Pieralisi 
1937ed69bdd8SLorenzo Pieralisi 	for_each_possible_cpu(cpu) {
1938ed69bdd8SLorenzo Pieralisi 		WARN(!cpu_port_is_valid(&cpu_port[cpu]),
1939ed69bdd8SLorenzo Pieralisi 			"CPU %u does not have an associated CCI port\n",
1940ed69bdd8SLorenzo Pieralisi 			cpu);
1941ed69bdd8SLorenzo Pieralisi 	}
1942ed69bdd8SLorenzo Pieralisi }
1943ed69bdd8SLorenzo Pieralisi /*
1944ed69bdd8SLorenzo Pieralisi  * Functions to enable/disable a CCI interconnect slave port
1945ed69bdd8SLorenzo Pieralisi  *
1946ed69bdd8SLorenzo Pieralisi  * They are called by low-level power management code to disable slave
1947ed69bdd8SLorenzo Pieralisi  * interfaces snoops and DVM broadcast.
1948ed69bdd8SLorenzo Pieralisi  * Since they may execute with cache data allocation disabled and
1949ed69bdd8SLorenzo Pieralisi  * after the caches have been cleaned and invalidated the functions provide
1950ed69bdd8SLorenzo Pieralisi  * no explicit locking since they may run with D-cache disabled, so normal
1951ed69bdd8SLorenzo Pieralisi  * cacheable kernel locks based on ldrex/strex may not work.
1952ed69bdd8SLorenzo Pieralisi  * Locking has to be provided by BSP implementations to ensure proper
1953ed69bdd8SLorenzo Pieralisi  * operations.
1954ed69bdd8SLorenzo Pieralisi  */
1955ed69bdd8SLorenzo Pieralisi 
1956ed69bdd8SLorenzo Pieralisi /**
1957ed69bdd8SLorenzo Pieralisi  * cci_port_control() - function to control a CCI port
1958ed69bdd8SLorenzo Pieralisi  *
1959ed69bdd8SLorenzo Pieralisi  * @port: index of the port to setup
1960ed69bdd8SLorenzo Pieralisi  * @enable: if true enables the port, if false disables it
1961ed69bdd8SLorenzo Pieralisi  */
1962ed69bdd8SLorenzo Pieralisi static void notrace cci_port_control(unsigned int port, bool enable)
1963ed69bdd8SLorenzo Pieralisi {
1964ed69bdd8SLorenzo Pieralisi 	void __iomem *base = ports[port].base;
1965ed69bdd8SLorenzo Pieralisi 
1966ed69bdd8SLorenzo Pieralisi 	writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
1967ed69bdd8SLorenzo Pieralisi 	/*
1968ed69bdd8SLorenzo Pieralisi 	 * This function is called from power down procedures
1969ed69bdd8SLorenzo Pieralisi 	 * and must not execute any instruction that might
1970ed69bdd8SLorenzo Pieralisi 	 * cause the processor to be put in a quiescent state
1971ed69bdd8SLorenzo Pieralisi 	 * (eg wfi). Hence, cpu_relax() can not be added to this
1972ed69bdd8SLorenzo Pieralisi 	 * read loop to optimize power, since it might hide possibly
1973ed69bdd8SLorenzo Pieralisi 	 * disruptive operations.
1974ed69bdd8SLorenzo Pieralisi 	 */
1975ed69bdd8SLorenzo Pieralisi 	while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
1976ed69bdd8SLorenzo Pieralisi 			;
1977ed69bdd8SLorenzo Pieralisi }
1978ed69bdd8SLorenzo Pieralisi 
1979ed69bdd8SLorenzo Pieralisi /**
1980ed69bdd8SLorenzo Pieralisi  * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1981ed69bdd8SLorenzo Pieralisi  *			       reference
1982ed69bdd8SLorenzo Pieralisi  *
1983ed69bdd8SLorenzo Pieralisi  * @mpidr: mpidr of the CPU whose CCI port should be disabled
1984ed69bdd8SLorenzo Pieralisi  *
1985ed69bdd8SLorenzo Pieralisi  * Disabling a CCI port for a CPU implies disabling the CCI port
1986ed69bdd8SLorenzo Pieralisi  * controlling that CPU cluster. Code disabling CPU CCI ports
1987ed69bdd8SLorenzo Pieralisi  * must make sure that the CPU running the code is the last active CPU
1988ed69bdd8SLorenzo Pieralisi  * in the cluster ie all other CPUs are quiescent in a low power state.
1989ed69bdd8SLorenzo Pieralisi  *
1990ed69bdd8SLorenzo Pieralisi  * Return:
1991ed69bdd8SLorenzo Pieralisi  *	0 on success
1992ed69bdd8SLorenzo Pieralisi  *	-ENODEV on port look-up failure
1993ed69bdd8SLorenzo Pieralisi  */
1994ed69bdd8SLorenzo Pieralisi int notrace cci_disable_port_by_cpu(u64 mpidr)
1995ed69bdd8SLorenzo Pieralisi {
1996ed69bdd8SLorenzo Pieralisi 	int cpu;
1997ed69bdd8SLorenzo Pieralisi 	bool is_valid;
1998ed69bdd8SLorenzo Pieralisi 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1999ed69bdd8SLorenzo Pieralisi 		is_valid = cpu_port_is_valid(&cpu_port[cpu]);
2000ed69bdd8SLorenzo Pieralisi 		if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
2001ed69bdd8SLorenzo Pieralisi 			cci_port_control(cpu_port[cpu].port, false);
2002ed69bdd8SLorenzo Pieralisi 			return 0;
2003ed69bdd8SLorenzo Pieralisi 		}
2004ed69bdd8SLorenzo Pieralisi 	}
2005ed69bdd8SLorenzo Pieralisi 	return -ENODEV;
2006ed69bdd8SLorenzo Pieralisi }
2007ed69bdd8SLorenzo Pieralisi EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
2008ed69bdd8SLorenzo Pieralisi 
2009ed69bdd8SLorenzo Pieralisi /**
201062158f81SNicolas Pitre  * cci_enable_port_for_self() - enable a CCI port for calling CPU
201162158f81SNicolas Pitre  *
201262158f81SNicolas Pitre  * Enabling a CCI port for the calling CPU implies enabling the CCI
201362158f81SNicolas Pitre  * port controlling that CPU's cluster. Caller must make sure that the
201462158f81SNicolas Pitre  * CPU running the code is the first active CPU in the cluster and all
201562158f81SNicolas Pitre  * other CPUs are quiescent in a low power state  or waiting for this CPU
201662158f81SNicolas Pitre  * to complete the CCI initialization.
201762158f81SNicolas Pitre  *
201862158f81SNicolas Pitre  * Because this is called when the MMU is still off and with no stack,
201962158f81SNicolas Pitre  * the code must be position independent and ideally rely on callee
202062158f81SNicolas Pitre  * clobbered registers only.  To achieve this we must code this function
202162158f81SNicolas Pitre  * entirely in assembler.
202262158f81SNicolas Pitre  *
202362158f81SNicolas Pitre  * On success this returns with the proper CCI port enabled.  In case of
202462158f81SNicolas Pitre  * any failure this never returns as the inability to enable the CCI is
202562158f81SNicolas Pitre  * fatal and there is no possible recovery at this stage.
202662158f81SNicolas Pitre  */
202762158f81SNicolas Pitre asmlinkage void __naked cci_enable_port_for_self(void)
202862158f81SNicolas Pitre {
202962158f81SNicolas Pitre 	asm volatile ("\n"
2030f4902492SArnd Bergmann "	.arch armv7-a\n"
203162158f81SNicolas Pitre "	mrc	p15, 0, r0, c0, c0, 5	@ get MPIDR value \n"
203262158f81SNicolas Pitre "	and	r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
203362158f81SNicolas Pitre "	adr	r1, 5f \n"
203462158f81SNicolas Pitre "	ldr	r2, [r1] \n"
203562158f81SNicolas Pitre "	add	r1, r1, r2		@ &cpu_port \n"
203662158f81SNicolas Pitre "	add	ip, r1, %[sizeof_cpu_port] \n"
203762158f81SNicolas Pitre 
203862158f81SNicolas Pitre 	/* Loop over the cpu_port array looking for a matching MPIDR */
203962158f81SNicolas Pitre "1:	ldr	r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
204062158f81SNicolas Pitre "	cmp	r2, r0 			@ compare MPIDR \n"
204162158f81SNicolas Pitre "	bne	2f \n"
204262158f81SNicolas Pitre 
204362158f81SNicolas Pitre 	/* Found a match, now test port validity */
204462158f81SNicolas Pitre "	ldr	r3, [r1, %[offsetof_cpu_port_port]] \n"
204562158f81SNicolas Pitre "	tst	r3, #"__stringify(PORT_VALID)" \n"
204662158f81SNicolas Pitre "	bne	3f \n"
204762158f81SNicolas Pitre 
204862158f81SNicolas Pitre 	/* no match, loop with the next cpu_port entry */
204962158f81SNicolas Pitre "2:	add	r1, r1, %[sizeof_struct_cpu_port] \n"
205062158f81SNicolas Pitre "	cmp	r1, ip			@ done? \n"
205162158f81SNicolas Pitre "	blo	1b \n"
205262158f81SNicolas Pitre 
205362158f81SNicolas Pitre 	/* CCI port not found -- cheaply try to stall this CPU */
205462158f81SNicolas Pitre "cci_port_not_found: \n"
205562158f81SNicolas Pitre "	wfi \n"
205662158f81SNicolas Pitre "	wfe \n"
205762158f81SNicolas Pitre "	b	cci_port_not_found \n"
205862158f81SNicolas Pitre 
205962158f81SNicolas Pitre 	/* Use matched port index to look up the corresponding ports entry */
206062158f81SNicolas Pitre "3:	bic	r3, r3, #"__stringify(PORT_VALID)" \n"
206162158f81SNicolas Pitre "	adr	r0, 6f \n"
206262158f81SNicolas Pitre "	ldmia	r0, {r1, r2} \n"
206362158f81SNicolas Pitre "	sub	r1, r1, r0 		@ virt - phys \n"
206462158f81SNicolas Pitre "	ldr	r0, [r0, r2] 		@ *(&ports) \n"
206562158f81SNicolas Pitre "	mov	r2, %[sizeof_struct_ace_port] \n"
206662158f81SNicolas Pitre "	mla	r0, r2, r3, r0		@ &ports[index] \n"
206762158f81SNicolas Pitre "	sub	r0, r0, r1		@ virt_to_phys() \n"
206862158f81SNicolas Pitre 
206962158f81SNicolas Pitre 	/* Enable the CCI port */
207062158f81SNicolas Pitre "	ldr	r0, [r0, %[offsetof_port_phys]] \n"
2071fdb07aeeSVictor Kamensky "	mov	r3, %[cci_enable_req]\n"
207262158f81SNicolas Pitre "	str	r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
207362158f81SNicolas Pitre 
207462158f81SNicolas Pitre 	/* poll the status reg for completion */
207562158f81SNicolas Pitre "	adr	r1, 7f \n"
207662158f81SNicolas Pitre "	ldr	r0, [r1] \n"
207762158f81SNicolas Pitre "	ldr	r0, [r0, r1]		@ cci_ctrl_base \n"
207862158f81SNicolas Pitre "4:	ldr	r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
2079fdb07aeeSVictor Kamensky "	tst	r1, %[cci_control_status_bits] \n"
208062158f81SNicolas Pitre "	bne	4b \n"
208162158f81SNicolas Pitre 
208262158f81SNicolas Pitre "	mov	r0, #0 \n"
208362158f81SNicolas Pitre "	bx	lr \n"
208462158f81SNicolas Pitre 
208562158f81SNicolas Pitre "	.align	2 \n"
208662158f81SNicolas Pitre "5:	.word	cpu_port - . \n"
208762158f81SNicolas Pitre "6:	.word	. \n"
208862158f81SNicolas Pitre "	.word	ports - 6b \n"
208962158f81SNicolas Pitre "7:	.word	cci_ctrl_phys - . \n"
209062158f81SNicolas Pitre 	: :
209162158f81SNicolas Pitre 	[sizeof_cpu_port] "i" (sizeof(cpu_port)),
2092fdb07aeeSVictor Kamensky 	[cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
2093fdb07aeeSVictor Kamensky 	[cci_control_status_bits] "i" cpu_to_le32(1),
209462158f81SNicolas Pitre #ifndef __ARMEB__
209562158f81SNicolas Pitre 	[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
209662158f81SNicolas Pitre #else
209762158f81SNicolas Pitre 	[offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
209862158f81SNicolas Pitre #endif
209962158f81SNicolas Pitre 	[offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
210062158f81SNicolas Pitre 	[sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
210162158f81SNicolas Pitre 	[sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
210262158f81SNicolas Pitre 	[offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
210362158f81SNicolas Pitre 
210462158f81SNicolas Pitre 	unreachable();
210562158f81SNicolas Pitre }
210662158f81SNicolas Pitre 
210762158f81SNicolas Pitre /**
2108ed69bdd8SLorenzo Pieralisi  * __cci_control_port_by_device() - function to control a CCI port by device
2109ed69bdd8SLorenzo Pieralisi  *				    reference
2110ed69bdd8SLorenzo Pieralisi  *
2111ed69bdd8SLorenzo Pieralisi  * @dn: device node pointer of the device whose CCI port should be
2112ed69bdd8SLorenzo Pieralisi  *      controlled
2113ed69bdd8SLorenzo Pieralisi  * @enable: if true enables the port, if false disables it
2114ed69bdd8SLorenzo Pieralisi  *
2115ed69bdd8SLorenzo Pieralisi  * Return:
2116ed69bdd8SLorenzo Pieralisi  *	0 on success
2117ed69bdd8SLorenzo Pieralisi  *	-ENODEV on port look-up failure
2118ed69bdd8SLorenzo Pieralisi  */
2119ed69bdd8SLorenzo Pieralisi int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
2120ed69bdd8SLorenzo Pieralisi {
2121ed69bdd8SLorenzo Pieralisi 	int port;
2122ed69bdd8SLorenzo Pieralisi 
2123ed69bdd8SLorenzo Pieralisi 	if (!dn)
2124ed69bdd8SLorenzo Pieralisi 		return -ENODEV;
2125ed69bdd8SLorenzo Pieralisi 
2126ed69bdd8SLorenzo Pieralisi 	port = __cci_ace_get_port(dn, ACE_LITE_PORT);
2127ed69bdd8SLorenzo Pieralisi 	if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
2128ed69bdd8SLorenzo Pieralisi 				dn->full_name))
2129ed69bdd8SLorenzo Pieralisi 		return -ENODEV;
2130ed69bdd8SLorenzo Pieralisi 	cci_port_control(port, enable);
2131ed69bdd8SLorenzo Pieralisi 	return 0;
2132ed69bdd8SLorenzo Pieralisi }
2133ed69bdd8SLorenzo Pieralisi EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
2134ed69bdd8SLorenzo Pieralisi 
2135ed69bdd8SLorenzo Pieralisi /**
2136ed69bdd8SLorenzo Pieralisi  * __cci_control_port_by_index() - function to control a CCI port by port index
2137ed69bdd8SLorenzo Pieralisi  *
2138ed69bdd8SLorenzo Pieralisi  * @port: port index previously retrieved with cci_ace_get_port()
2139ed69bdd8SLorenzo Pieralisi  * @enable: if true enables the port, if false disables it
2140ed69bdd8SLorenzo Pieralisi  *
2141ed69bdd8SLorenzo Pieralisi  * Return:
2142ed69bdd8SLorenzo Pieralisi  *	0 on success
2143ed69bdd8SLorenzo Pieralisi  *	-ENODEV on port index out of range
2144ed69bdd8SLorenzo Pieralisi  *	-EPERM if operation carried out on an ACE PORT
2145ed69bdd8SLorenzo Pieralisi  */
2146ed69bdd8SLorenzo Pieralisi int notrace __cci_control_port_by_index(u32 port, bool enable)
2147ed69bdd8SLorenzo Pieralisi {
2148ed69bdd8SLorenzo Pieralisi 	if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
2149ed69bdd8SLorenzo Pieralisi 		return -ENODEV;
2150ed69bdd8SLorenzo Pieralisi 	/*
2151ed69bdd8SLorenzo Pieralisi 	 * CCI control for ports connected to CPUS is extremely fragile
2152ed69bdd8SLorenzo Pieralisi 	 * and must be made to go through a specific and controlled
2153ed69bdd8SLorenzo Pieralisi 	 * interface (ie cci_disable_port_by_cpu(); control by general purpose
2154ed69bdd8SLorenzo Pieralisi 	 * indexing is therefore disabled for ACE ports.
2155ed69bdd8SLorenzo Pieralisi 	 */
2156ed69bdd8SLorenzo Pieralisi 	if (ports[port].type == ACE_PORT)
2157ed69bdd8SLorenzo Pieralisi 		return -EPERM;
2158ed69bdd8SLorenzo Pieralisi 
2159ed69bdd8SLorenzo Pieralisi 	cci_port_control(port, enable);
2160ed69bdd8SLorenzo Pieralisi 	return 0;
2161ed69bdd8SLorenzo Pieralisi }
2162ed69bdd8SLorenzo Pieralisi EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
2163ed69bdd8SLorenzo Pieralisi 
2164ed69bdd8SLorenzo Pieralisi static const struct of_device_id arm_cci_ctrl_if_matches[] = {
2165ed69bdd8SLorenzo Pieralisi 	{.compatible = "arm,cci-400-ctrl-if", },
2166ed69bdd8SLorenzo Pieralisi 	{},
2167ed69bdd8SLorenzo Pieralisi };
2168ed69bdd8SLorenzo Pieralisi 
2169f6b9e83cSSuzuki K. Poulose static int cci_probe_ports(struct device_node *np)
2170ed69bdd8SLorenzo Pieralisi {
2171ed69bdd8SLorenzo Pieralisi 	struct cci_nb_ports const *cci_config;
2172ed69bdd8SLorenzo Pieralisi 	int ret, i, nb_ace = 0, nb_ace_lite = 0;
2173f6b9e83cSSuzuki K. Poulose 	struct device_node *cp;
217462158f81SNicolas Pitre 	struct resource res;
2175ed69bdd8SLorenzo Pieralisi 	const char *match_str;
2176ed69bdd8SLorenzo Pieralisi 	bool is_ace;
2177ed69bdd8SLorenzo Pieralisi 
2178896ddd60SAbhilash Kesavan 
2179ed69bdd8SLorenzo Pieralisi 	cci_config = of_match_node(arm_cci_matches, np)->data;
2180ed69bdd8SLorenzo Pieralisi 	if (!cci_config)
2181ed69bdd8SLorenzo Pieralisi 		return -ENODEV;
2182ed69bdd8SLorenzo Pieralisi 
2183ed69bdd8SLorenzo Pieralisi 	nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
2184ed69bdd8SLorenzo Pieralisi 
21857c762036SLorenzo Pieralisi 	ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
2186ed69bdd8SLorenzo Pieralisi 	if (!ports)
2187ed69bdd8SLorenzo Pieralisi 		return -ENOMEM;
2188ed69bdd8SLorenzo Pieralisi 
2189ed69bdd8SLorenzo Pieralisi 	for_each_child_of_node(np, cp) {
2190ed69bdd8SLorenzo Pieralisi 		if (!of_match_node(arm_cci_ctrl_if_matches, cp))
2191ed69bdd8SLorenzo Pieralisi 			continue;
2192ed69bdd8SLorenzo Pieralisi 
2193801f33beSLorenzo Pieralisi 		if (!of_device_is_available(cp))
2194801f33beSLorenzo Pieralisi 			continue;
2195801f33beSLorenzo Pieralisi 
2196ed69bdd8SLorenzo Pieralisi 		i = nb_ace + nb_ace_lite;
2197ed69bdd8SLorenzo Pieralisi 
2198ed69bdd8SLorenzo Pieralisi 		if (i >= nb_cci_ports)
2199ed69bdd8SLorenzo Pieralisi 			break;
2200ed69bdd8SLorenzo Pieralisi 
2201ed69bdd8SLorenzo Pieralisi 		if (of_property_read_string(cp, "interface-type",
2202ed69bdd8SLorenzo Pieralisi 					&match_str)) {
2203ed69bdd8SLorenzo Pieralisi 			WARN(1, "node %s missing interface-type property\n",
2204ed69bdd8SLorenzo Pieralisi 				  cp->full_name);
2205ed69bdd8SLorenzo Pieralisi 			continue;
2206ed69bdd8SLorenzo Pieralisi 		}
2207ed69bdd8SLorenzo Pieralisi 		is_ace = strcmp(match_str, "ace") == 0;
2208ed69bdd8SLorenzo Pieralisi 		if (!is_ace && strcmp(match_str, "ace-lite")) {
2209ed69bdd8SLorenzo Pieralisi 			WARN(1, "node %s containing invalid interface-type property, skipping it\n",
2210ed69bdd8SLorenzo Pieralisi 					cp->full_name);
2211ed69bdd8SLorenzo Pieralisi 			continue;
2212ed69bdd8SLorenzo Pieralisi 		}
2213ed69bdd8SLorenzo Pieralisi 
221462158f81SNicolas Pitre 		ret = of_address_to_resource(cp, 0, &res);
221562158f81SNicolas Pitre 		if (!ret) {
221662158f81SNicolas Pitre 			ports[i].base = ioremap(res.start, resource_size(&res));
221762158f81SNicolas Pitre 			ports[i].phys = res.start;
221862158f81SNicolas Pitre 		}
221962158f81SNicolas Pitre 		if (ret || !ports[i].base) {
2220ed69bdd8SLorenzo Pieralisi 			WARN(1, "unable to ioremap CCI port %d\n", i);
2221ed69bdd8SLorenzo Pieralisi 			continue;
2222ed69bdd8SLorenzo Pieralisi 		}
2223ed69bdd8SLorenzo Pieralisi 
2224ed69bdd8SLorenzo Pieralisi 		if (is_ace) {
2225ed69bdd8SLorenzo Pieralisi 			if (WARN_ON(nb_ace >= cci_config->nb_ace))
2226ed69bdd8SLorenzo Pieralisi 				continue;
2227ed69bdd8SLorenzo Pieralisi 			ports[i].type = ACE_PORT;
2228ed69bdd8SLorenzo Pieralisi 			++nb_ace;
2229ed69bdd8SLorenzo Pieralisi 		} else {
2230ed69bdd8SLorenzo Pieralisi 			if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
2231ed69bdd8SLorenzo Pieralisi 				continue;
2232ed69bdd8SLorenzo Pieralisi 			ports[i].type = ACE_LITE_PORT;
2233ed69bdd8SLorenzo Pieralisi 			++nb_ace_lite;
2234ed69bdd8SLorenzo Pieralisi 		}
2235ed69bdd8SLorenzo Pieralisi 		ports[i].dn = cp;
2236ed69bdd8SLorenzo Pieralisi 	}
2237ed69bdd8SLorenzo Pieralisi 
2238801f33beSLorenzo Pieralisi 	/*
2239801f33beSLorenzo Pieralisi 	 * If there is no CCI port that is under kernel control
2240801f33beSLorenzo Pieralisi 	 * return early and report probe status.
2241801f33beSLorenzo Pieralisi 	 */
2242801f33beSLorenzo Pieralisi 	if (!nb_ace && !nb_ace_lite)
2243801f33beSLorenzo Pieralisi 		return -ENODEV;
2244801f33beSLorenzo Pieralisi 
2245ed69bdd8SLorenzo Pieralisi 	 /* initialize a stashed array of ACE ports to speed-up look-up */
2246ed69bdd8SLorenzo Pieralisi 	cci_ace_init_ports();
2247ed69bdd8SLorenzo Pieralisi 
2248ed69bdd8SLorenzo Pieralisi 	/*
2249ed69bdd8SLorenzo Pieralisi 	 * Multi-cluster systems may need this data when non-coherent, during
2250ed69bdd8SLorenzo Pieralisi 	 * cluster power-up/power-down. Make sure it reaches main memory.
2251ed69bdd8SLorenzo Pieralisi 	 */
2252ed69bdd8SLorenzo Pieralisi 	sync_cache_w(&cci_ctrl_base);
225362158f81SNicolas Pitre 	sync_cache_w(&cci_ctrl_phys);
2254ed69bdd8SLorenzo Pieralisi 	sync_cache_w(&ports);
2255ed69bdd8SLorenzo Pieralisi 	sync_cache_w(&cpu_port);
2256ed69bdd8SLorenzo Pieralisi 	__sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
2257ed69bdd8SLorenzo Pieralisi 	pr_info("ARM CCI driver probed\n");
2258f6b9e83cSSuzuki K. Poulose 
2259ed69bdd8SLorenzo Pieralisi 	return 0;
2260f6b9e83cSSuzuki K. Poulose }
2261ee8e5d5fSSuzuki K. Poulose #else /* !CONFIG_ARM_CCI400_PORT_CTRL */
2262ee8e5d5fSSuzuki K. Poulose static inline int cci_probe_ports(struct device_node *np)
2263ee8e5d5fSSuzuki K. Poulose {
2264ee8e5d5fSSuzuki K. Poulose 	return 0;
2265ee8e5d5fSSuzuki K. Poulose }
2266ee8e5d5fSSuzuki K. Poulose #endif /* CONFIG_ARM_CCI400_PORT_CTRL */
2267ed69bdd8SLorenzo Pieralisi 
2268f6b9e83cSSuzuki K. Poulose static int cci_probe(void)
2269f6b9e83cSSuzuki K. Poulose {
2270f6b9e83cSSuzuki K. Poulose 	int ret;
2271f6b9e83cSSuzuki K. Poulose 	struct device_node *np;
2272f6b9e83cSSuzuki K. Poulose 	struct resource res;
2273ed69bdd8SLorenzo Pieralisi 
2274f6b9e83cSSuzuki K. Poulose 	np = of_find_matching_node(NULL, arm_cci_matches);
2275f6b9e83cSSuzuki K. Poulose 	if(!np || !of_device_is_available(np))
2276f6b9e83cSSuzuki K. Poulose 		return -ENODEV;
2277f6b9e83cSSuzuki K. Poulose 
2278f6b9e83cSSuzuki K. Poulose 	ret = of_address_to_resource(np, 0, &res);
2279f6b9e83cSSuzuki K. Poulose 	if (!ret) {
2280f6b9e83cSSuzuki K. Poulose 		cci_ctrl_base = ioremap(res.start, resource_size(&res));
2281f6b9e83cSSuzuki K. Poulose 		cci_ctrl_phys =	res.start;
2282f6b9e83cSSuzuki K. Poulose 	}
2283f6b9e83cSSuzuki K. Poulose 	if (ret || !cci_ctrl_base) {
2284f6b9e83cSSuzuki K. Poulose 		WARN(1, "unable to ioremap CCI ctrl\n");
2285f6b9e83cSSuzuki K. Poulose 		return -ENXIO;
2286f6b9e83cSSuzuki K. Poulose 	}
2287f6b9e83cSSuzuki K. Poulose 
2288f6b9e83cSSuzuki K. Poulose 	return cci_probe_ports(np);
2289ed69bdd8SLorenzo Pieralisi }
2290ed69bdd8SLorenzo Pieralisi 
2291ed69bdd8SLorenzo Pieralisi static int cci_init_status = -EAGAIN;
2292ed69bdd8SLorenzo Pieralisi static DEFINE_MUTEX(cci_probing);
2293ed69bdd8SLorenzo Pieralisi 
2294b91c8f28SPunit Agrawal static int cci_init(void)
2295ed69bdd8SLorenzo Pieralisi {
2296ed69bdd8SLorenzo Pieralisi 	if (cci_init_status != -EAGAIN)
2297ed69bdd8SLorenzo Pieralisi 		return cci_init_status;
2298ed69bdd8SLorenzo Pieralisi 
2299ed69bdd8SLorenzo Pieralisi 	mutex_lock(&cci_probing);
2300ed69bdd8SLorenzo Pieralisi 	if (cci_init_status == -EAGAIN)
2301ed69bdd8SLorenzo Pieralisi 		cci_init_status = cci_probe();
2302ed69bdd8SLorenzo Pieralisi 	mutex_unlock(&cci_probing);
2303ed69bdd8SLorenzo Pieralisi 	return cci_init_status;
2304ed69bdd8SLorenzo Pieralisi }
2305ed69bdd8SLorenzo Pieralisi 
2306ed69bdd8SLorenzo Pieralisi /*
2307ed69bdd8SLorenzo Pieralisi  * To sort out early init calls ordering a helper function is provided to
2308ed69bdd8SLorenzo Pieralisi  * check if the CCI driver has beed initialized. Function check if the driver
2309ed69bdd8SLorenzo Pieralisi  * has been initialized, if not it calls the init function that probes
2310ed69bdd8SLorenzo Pieralisi  * the driver and updates the return value.
2311ed69bdd8SLorenzo Pieralisi  */
2312b91c8f28SPunit Agrawal bool cci_probed(void)
2313ed69bdd8SLorenzo Pieralisi {
2314ed69bdd8SLorenzo Pieralisi 	return cci_init() == 0;
2315ed69bdd8SLorenzo Pieralisi }
2316ed69bdd8SLorenzo Pieralisi EXPORT_SYMBOL_GPL(cci_probed);
2317ed69bdd8SLorenzo Pieralisi 
2318ed69bdd8SLorenzo Pieralisi early_initcall(cci_init);
2319b91c8f28SPunit Agrawal core_initcall(cci_platform_init);
2320ed69bdd8SLorenzo Pieralisi MODULE_LICENSE("GPL");
2321ed69bdd8SLorenzo Pieralisi MODULE_DESCRIPTION("ARM CCI support");
2322