xref: /openbmc/linux/include/linux/perf/arm_pmu.h (revision 7c8e4a25)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  linux/arch/arm/include/asm/pmu.h
4  *
5  *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
6  */
7 
8 #ifndef __ARM_PMU_H__
9 #define __ARM_PMU_H__
10 
11 #include <linux/interrupt.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <linux/sysfs.h>
15 #include <asm/cputype.h>
16 
17 #ifdef CONFIG_ARM_PMU
18 
19 /*
20  * The ARMv7 CPU PMU supports up to 32 event counters.
21  */
22 #define ARMPMU_MAX_HWEVENTS		32
23 
24 /*
25  * ARM PMU hw_event flags
26  */
27 /* Event uses a 64bit counter */
28 #define ARMPMU_EVT_64BIT		1
29 /* Event uses a 47bit counter */
30 #define ARMPMU_EVT_47BIT		2
31 
32 #define HW_OP_UNSUPPORTED		0xFFFF
33 #define C(_x)				PERF_COUNT_HW_CACHE_##_x
34 #define CACHE_OP_UNSUPPORTED		0xFFFF
35 
36 #define PERF_MAP_ALL_UNSUPPORTED					\
37 	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
38 
39 #define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
40 [0 ... C(MAX) - 1] = {							\
41 	[0 ... C(OP_MAX) - 1] = {					\
42 		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
43 	},								\
44 }
45 
46 /* The events for a given PMU register set. */
47 struct pmu_hw_events {
48 	/*
49 	 * The events that are active on the PMU for the given index.
50 	 */
51 	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
52 
53 	/*
54 	 * A 1 bit for an index indicates that the counter is being used for
55 	 * an event. A 0 means that the counter can be used.
56 	 */
57 	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
58 
59 	/*
60 	 * Hardware lock to serialize accesses to PMU registers. Needed for the
61 	 * read/modify/write sequences.
62 	 */
63 	raw_spinlock_t		pmu_lock;
64 
65 	/*
66 	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
67 	 * already have to allocate this struct per cpu.
68 	 */
69 	struct arm_pmu		*percpu_pmu;
70 
71 	int irq;
72 };
73 
74 enum armpmu_attr_groups {
75 	ARMPMU_ATTR_GROUP_COMMON,
76 	ARMPMU_ATTR_GROUP_EVENTS,
77 	ARMPMU_ATTR_GROUP_FORMATS,
78 	ARMPMU_ATTR_GROUP_CAPS,
79 	ARMPMU_NR_ATTR_GROUPS
80 };
81 
82 struct arm_pmu {
83 	struct pmu	pmu;
84 	cpumask_t	supported_cpus;
85 	char		*name;
86 	int		pmuver;
87 	irqreturn_t	(*handle_irq)(struct arm_pmu *pmu);
88 	void		(*enable)(struct perf_event *event);
89 	void		(*disable)(struct perf_event *event);
90 	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
91 					 struct perf_event *event);
92 	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
93 					 struct perf_event *event);
94 	int		(*set_event_filter)(struct hw_perf_event *evt,
95 					    struct perf_event_attr *attr);
96 	u64		(*read_counter)(struct perf_event *event);
97 	void		(*write_counter)(struct perf_event *event, u64 val);
98 	void		(*start)(struct arm_pmu *);
99 	void		(*stop)(struct arm_pmu *);
100 	void		(*reset)(void *);
101 	int		(*map_event)(struct perf_event *event);
102 	int		(*filter_match)(struct perf_event *event);
103 	int		num_events;
104 	bool		secure_access; /* 32-bit ARM only */
105 #define ARMV8_PMUV3_MAX_COMMON_EVENTS		0x40
106 	DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
107 #define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE	0x4000
108 	DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
109 	struct platform_device	*plat_device;
110 	struct pmu_hw_events	__percpu *hw_events;
111 	struct hlist_node	node;
112 	struct notifier_block	cpu_pm_nb;
113 	/* the attr_groups array must be NULL-terminated */
114 	const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
115 	/* store the PMMIR_EL1 to expose slots */
116 	u64		reg_pmmir;
117 
118 	/* Only to be used by ACPI probing code */
119 	unsigned long acpi_cpuid;
120 };
121 
122 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
123 
124 u64 armpmu_event_update(struct perf_event *event);
125 
126 int armpmu_event_set_period(struct perf_event *event);
127 
128 int armpmu_map_event(struct perf_event *event,
129 		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
130 		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
131 						[PERF_COUNT_HW_CACHE_OP_MAX]
132 						[PERF_COUNT_HW_CACHE_RESULT_MAX],
133 		     u32 raw_event_mask);
134 
135 typedef int (*armpmu_init_fn)(struct arm_pmu *);
136 
137 struct pmu_probe_info {
138 	unsigned int cpuid;
139 	unsigned int mask;
140 	armpmu_init_fn init;
141 };
142 
143 #define PMU_PROBE(_cpuid, _mask, _fn)	\
144 {					\
145 	.cpuid = (_cpuid),		\
146 	.mask = (_mask),		\
147 	.init = (_fn),			\
148 }
149 
150 #define ARM_PMU_PROBE(_cpuid, _fn) \
151 	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
152 
153 #define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
154 
155 #define XSCALE_PMU_PROBE(_version, _fn) \
156 	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
157 
158 int arm_pmu_device_probe(struct platform_device *pdev,
159 			 const struct of_device_id *of_table,
160 			 const struct pmu_probe_info *probe_table);
161 
162 #ifdef CONFIG_ACPI
163 int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
164 #else
165 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
166 #endif
167 
168 #ifdef CONFIG_KVM
169 void kvm_host_pmu_init(struct arm_pmu *pmu);
170 #else
171 #define kvm_host_pmu_init(x)	do { } while(0)
172 #endif
173 
174 /* Internal functions only for core arm_pmu code */
175 struct arm_pmu *armpmu_alloc(void);
176 struct arm_pmu *armpmu_alloc_atomic(void);
177 void armpmu_free(struct arm_pmu *pmu);
178 int armpmu_register(struct arm_pmu *pmu);
179 int armpmu_request_irq(int irq, int cpu);
180 void armpmu_free_irq(int irq, int cpu);
181 
182 #define ARMV8_PMU_PDEV_NAME "armv8-pmu"
183 
184 #endif /* CONFIG_ARM_PMU */
185 
186 #define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
187 
188 #endif /* __ARM_PMU_H__ */
189