1 /* 2 * Performance event support - PowerPC classic/server specific definitions. 3 * 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <asm/hw_irq.h> 14 #include <linux/device.h> 15 #include <uapi/asm/perf_event.h> 16 17 /* Update perf_event_print_debug() if this changes */ 18 #define MAX_HWEVENTS 8 19 #define MAX_EVENT_ALTERNATIVES 8 20 #define MAX_LIMITED_HWCOUNTERS 2 21 22 struct perf_event; 23 24 /* 25 * This struct provides the constants and functions needed to 26 * describe the PMU on a particular POWER-family CPU. 27 */ 28 struct power_pmu { 29 const char *name; 30 int n_counter; 31 int max_alternatives; 32 unsigned long add_fields; 33 unsigned long test_adder; 34 int (*compute_mmcr)(u64 events[], int n_ev, 35 unsigned int hwc[], unsigned long mmcr[], 36 struct perf_event *pevents[]); 37 int (*get_constraint)(u64 event_id, unsigned long *mskp, 38 unsigned long *valp); 39 int (*get_alternatives)(u64 event_id, unsigned int flags, 40 u64 alt[]); 41 void (*get_mem_data_src)(union perf_mem_data_src *dsrc, 42 u32 flags, struct pt_regs *regs); 43 void (*get_mem_weight)(u64 *weight); 44 u64 (*bhrb_filter_map)(u64 branch_sample_type); 45 void (*config_bhrb)(u64 pmu_bhrb_filter); 46 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 47 int (*limited_pmc_event)(u64 event_id); 48 u32 flags; 49 const struct attribute_group **attr_groups; 50 int n_generic; 51 int *generic_events; 52 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 53 [PERF_COUNT_HW_CACHE_OP_MAX] 54 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 55 56 /* BHRB entries in the PMU */ 57 int bhrb_nr; 58 }; 59 60 /* 61 * Values for power_pmu.flags 62 */ 63 #define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */ 64 #define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */ 65 #define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */ 66 #define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */ 67 #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ 68 #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 69 #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 70 #define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ 71 #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ 72 73 /* 74 * Values for flags to get_alternatives() 75 */ 76 #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ 77 #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 78 #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 79 80 extern int register_power_pmu(struct power_pmu *); 81 82 struct pt_regs; 83 extern unsigned long perf_misc_flags(struct pt_regs *regs); 84 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 85 extern unsigned long int read_bhrb(int n); 86 87 /* 88 * Only override the default definitions in include/linux/perf_event.h 89 * if we have hardware PMU support. 90 */ 91 #ifdef CONFIG_PPC_PERF_CTRS 92 #define perf_misc_flags(regs) perf_misc_flags(regs) 93 #endif 94 95 /* 96 * The power_pmu.get_constraint function returns a 32/64-bit value and 97 * a 32/64-bit mask that express the constraints between this event_id and 98 * other events. 99 * 100 * The value and mask are divided up into (non-overlapping) bitfields 101 * of three different types: 102 * 103 * Select field: this expresses the constraint that some set of bits 104 * in MMCR* needs to be set to a specific value for this event_id. For a 105 * select field, the mask contains 1s in every bit of the field, and 106 * the value contains a unique value for each possible setting of the 107 * MMCR* bits. The constraint checking code will ensure that two events 108 * that set the same field in their masks have the same value in their 109 * value dwords. 110 * 111 * Add field: this expresses the constraint that there can be at most 112 * N events in a particular class. A field of k bits can be used for 113 * N <= 2^(k-1) - 1. The mask has the most significant bit of the field 114 * set (and the other bits 0), and the value has only the least significant 115 * bit of the field set. In addition, the 'add_fields' and 'test_adder' 116 * in the struct power_pmu for this processor come into play. The 117 * add_fields value contains 1 in the LSB of the field, and the 118 * test_adder contains 2^(k-1) - 1 - N in the field. 119 * 120 * NAND field: this expresses the constraint that you may not have events 121 * in all of a set of classes. (For example, on PPC970, you can't select 122 * events from the FPU, ISU and IDU simultaneously, although any two are 123 * possible.) For N classes, the field is N+1 bits wide, and each class 124 * is assigned one bit from the least-significant N bits. The mask has 125 * only the most-significant bit set, and the value has only the bit 126 * for the event_id's class set. The test_adder has the least significant 127 * bit set in the field. 128 * 129 * If an event_id is not subject to the constraint expressed by a particular 130 * field, then it will have 0 in both the mask and value for that field. 131 */ 132 133 extern ssize_t power_events_sysfs_show(struct device *dev, 134 struct device_attribute *attr, char *page); 135 136 /* 137 * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. 138 * 139 * Having a suffix allows us to have aliases in sysfs - eg: the generic 140 * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and 141 * 'PM_CYC' where the latter is the name by which the event is known in 142 * POWER CPU specification. 143 * 144 * Similarly, some hardware and cache events use the same event code. Eg. 145 * on POWER8, both "cache-references" and "L1-dcache-loads" events refer 146 * to the same event, PM_LD_REF_L1. The suffix, allows us to have two 147 * sysfs objects for the same event and thus two entries/aliases in sysfs. 148 */ 149 #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix 150 #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr 151 152 #define EVENT_ATTR(_name, _id, _suffix) \ 153 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id, \ 154 power_events_sysfs_show) 155 156 #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) 157 #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) 158 159 #define CACHE_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _c) 160 #define CACHE_EVENT_PTR(_id) EVENT_PTR(_id, _c) 161 162 #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p) 163 #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) 164