xref: /openbmc/linux/arch/x86/events/amd/core.c (revision 55ed6c47)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
239b0332aSBorislav Petkov #include <linux/perf_event.h>
3d5616bacSStephane Eranian #include <linux/jump_label.h>
439b0332aSBorislav Petkov #include <linux/export.h>
539b0332aSBorislav Petkov #include <linux/types.h>
639b0332aSBorislav Petkov #include <linux/init.h>
739b0332aSBorislav Petkov #include <linux/slab.h>
8914123faSLendacky, Thomas #include <linux/delay.h>
9df4d2973STom Lendacky #include <linux/jiffies.h>
1039b0332aSBorislav Petkov #include <asm/apicdef.h>
117685665cSSandipan Das #include <asm/apic.h>
123966c3feSLendacky, Thomas #include <asm/nmi.h>
1339b0332aSBorislav Petkov 
1427f6d22bSBorislav Petkov #include "../perf_event.h"
1539b0332aSBorislav Petkov 
16df4d2973STom Lendacky static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
17df4d2973STom Lendacky static unsigned long perf_nmi_window;
186d3edaaeSLendacky, Thomas 
1957388912SKim Phillips /* AMD Event 0xFFF: Merge.  Used with Large Increment per Cycle events */
2057388912SKim Phillips #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
2157388912SKim Phillips #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
2257388912SKim Phillips 
2321d59e3eSSandipan Das /* PMC Enable and Overflow bits for PerfCntrGlobal* registers */
2421d59e3eSSandipan Das static u64 amd_pmu_global_cntr_mask __read_mostly;
2521d59e3eSSandipan Das 
2639b0332aSBorislav Petkov static __initconst const u64 amd_hw_cache_event_ids
2739b0332aSBorislav Petkov 				[PERF_COUNT_HW_CACHE_MAX]
2839b0332aSBorislav Petkov 				[PERF_COUNT_HW_CACHE_OP_MAX]
2939b0332aSBorislav Petkov 				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
3039b0332aSBorislav Petkov {
3139b0332aSBorislav Petkov  [ C(L1D) ] = {
3239b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
3339b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
3439b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
3539b0332aSBorislav Petkov 	},
3639b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
3739b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0,
3839b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
3939b0332aSBorislav Petkov 	},
4039b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
4139b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
4239b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
4339b0332aSBorislav Petkov 	},
4439b0332aSBorislav Petkov  },
4539b0332aSBorislav Petkov  [ C(L1I ) ] = {
4639b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
4739b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
4839b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
4939b0332aSBorislav Petkov 	},
5039b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
5139b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
5239b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
5339b0332aSBorislav Petkov 	},
5439b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
5539b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
5639b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
5739b0332aSBorislav Petkov 	},
5839b0332aSBorislav Petkov  },
5939b0332aSBorislav Petkov  [ C(LL  ) ] = {
6039b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
6139b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
6239b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
6339b0332aSBorislav Petkov 	},
6439b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
6539b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
6639b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
6739b0332aSBorislav Petkov 	},
6839b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
6939b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0,
7039b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
7139b0332aSBorislav Petkov 	},
7239b0332aSBorislav Petkov  },
7339b0332aSBorislav Petkov  [ C(DTLB) ] = {
7439b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
7539b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
7639b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
7739b0332aSBorislav Petkov 	},
7839b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
7939b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0,
8039b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
8139b0332aSBorislav Petkov 	},
8239b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
8339b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0,
8439b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0,
8539b0332aSBorislav Petkov 	},
8639b0332aSBorislav Petkov  },
8739b0332aSBorislav Petkov  [ C(ITLB) ] = {
8839b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
8939b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
9039b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
9139b0332aSBorislav Petkov 	},
9239b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
9339b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
9439b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
9539b0332aSBorislav Petkov 	},
9639b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
9739b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
9839b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
9939b0332aSBorislav Petkov 	},
10039b0332aSBorislav Petkov  },
10139b0332aSBorislav Petkov  [ C(BPU ) ] = {
10239b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
10339b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
10439b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
10539b0332aSBorislav Petkov 	},
10639b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
10739b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
10839b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
10939b0332aSBorislav Petkov 	},
11039b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
11139b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
11239b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
11339b0332aSBorislav Petkov 	},
11439b0332aSBorislav Petkov  },
11539b0332aSBorislav Petkov  [ C(NODE) ] = {
11639b0332aSBorislav Petkov 	[ C(OP_READ) ] = {
11739b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
11839b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
11939b0332aSBorislav Petkov 	},
12039b0332aSBorislav Petkov 	[ C(OP_WRITE) ] = {
12139b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
12239b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
12339b0332aSBorislav Petkov 	},
12439b0332aSBorislav Petkov 	[ C(OP_PREFETCH) ] = {
12539b0332aSBorislav Petkov 		[ C(RESULT_ACCESS) ] = -1,
12639b0332aSBorislav Petkov 		[ C(RESULT_MISS)   ] = -1,
12739b0332aSBorislav Petkov 	},
12839b0332aSBorislav Petkov  },
12939b0332aSBorislav Petkov };
13039b0332aSBorislav Petkov 
1310e3b74e2SKim Phillips static __initconst const u64 amd_hw_cache_event_ids_f17h
1320e3b74e2SKim Phillips 				[PERF_COUNT_HW_CACHE_MAX]
1330e3b74e2SKim Phillips 				[PERF_COUNT_HW_CACHE_OP_MAX]
1340e3b74e2SKim Phillips 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1350e3b74e2SKim Phillips [C(L1D)] = {
1360e3b74e2SKim Phillips 	[C(OP_READ)] = {
1370e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
1380e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
1390e3b74e2SKim Phillips 	},
1400e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
1410e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1420e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1430e3b74e2SKim Phillips 	},
1440e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
1450e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
1460e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1470e3b74e2SKim Phillips 	},
1480e3b74e2SKim Phillips },
1490e3b74e2SKim Phillips [C(L1I)] = {
1500e3b74e2SKim Phillips 	[C(OP_READ)] = {
1510e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
1520e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
1530e3b74e2SKim Phillips 	},
1540e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
1550e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
1560e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
1570e3b74e2SKim Phillips 	},
1580e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
1590e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1600e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1610e3b74e2SKim Phillips 	},
1620e3b74e2SKim Phillips },
1630e3b74e2SKim Phillips [C(LL)] = {
1640e3b74e2SKim Phillips 	[C(OP_READ)] = {
1650e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1660e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1670e3b74e2SKim Phillips 	},
1680e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
1690e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1700e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1710e3b74e2SKim Phillips 	},
1720e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
1730e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1740e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1750e3b74e2SKim Phillips 	},
1760e3b74e2SKim Phillips },
1770e3b74e2SKim Phillips [C(DTLB)] = {
1780e3b74e2SKim Phillips 	[C(OP_READ)] = {
1790e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
1800e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
1810e3b74e2SKim Phillips 	},
1820e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
1830e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1840e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1850e3b74e2SKim Phillips 	},
1860e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
1870e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
1880e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
1890e3b74e2SKim Phillips 	},
1900e3b74e2SKim Phillips },
1910e3b74e2SKim Phillips [C(ITLB)] = {
1920e3b74e2SKim Phillips 	[C(OP_READ)] = {
1930e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
1940e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
1950e3b74e2SKim Phillips 	},
1960e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
1970e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
1980e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
1990e3b74e2SKim Phillips 	},
2000e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
2010e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
2020e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
2030e3b74e2SKim Phillips 	},
2040e3b74e2SKim Phillips },
2050e3b74e2SKim Phillips [C(BPU)] = {
2060e3b74e2SKim Phillips 	[C(OP_READ)] = {
2070e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
2080e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
2090e3b74e2SKim Phillips 	},
2100e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
2110e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
2120e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
2130e3b74e2SKim Phillips 	},
2140e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
2150e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
2160e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
2170e3b74e2SKim Phillips 	},
2180e3b74e2SKim Phillips },
2190e3b74e2SKim Phillips [C(NODE)] = {
2200e3b74e2SKim Phillips 	[C(OP_READ)] = {
2210e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = 0,
2220e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = 0,
2230e3b74e2SKim Phillips 	},
2240e3b74e2SKim Phillips 	[C(OP_WRITE)] = {
2250e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
2260e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
2270e3b74e2SKim Phillips 	},
2280e3b74e2SKim Phillips 	[C(OP_PREFETCH)] = {
2290e3b74e2SKim Phillips 		[C(RESULT_ACCESS)] = -1,
2300e3b74e2SKim Phillips 		[C(RESULT_MISS)]   = -1,
2310e3b74e2SKim Phillips 	},
2320e3b74e2SKim Phillips },
2330e3b74e2SKim Phillips };
2340e3b74e2SKim Phillips 
23539b0332aSBorislav Petkov /*
2363fe3331bSKim Phillips  * AMD Performance Monitor K7 and later, up to and including Family 16h:
23739b0332aSBorislav Petkov  */
2380a25556fSAdam Borowski static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
23939b0332aSBorislav Petkov {
24039b0332aSBorislav Petkov 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
24139b0332aSBorislav Petkov 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
242080fe0b7SMatt Fleming 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x077d,
243080fe0b7SMatt Fleming 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x077e,
24439b0332aSBorislav Petkov 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
24539b0332aSBorislav Petkov 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
24639b0332aSBorislav Petkov 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
24739b0332aSBorislav Petkov 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
24839b0332aSBorislav Petkov };
24939b0332aSBorislav Petkov 
2503fe3331bSKim Phillips /*
2513fe3331bSKim Phillips  * AMD Performance Monitor Family 17h and later:
2523fe3331bSKim Phillips  */
25369fe5f17SSandipan Das static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
2543fe3331bSKim Phillips {
2553fe3331bSKim Phillips 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
2563fe3331bSKim Phillips 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
2573fe3331bSKim Phillips 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0xff60,
25825d38728SKim Phillips 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x0964,
2593fe3331bSKim Phillips 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
2603fe3331bSKim Phillips 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
2613fe3331bSKim Phillips 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x0287,
2623fe3331bSKim Phillips 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x0187,
2633fe3331bSKim Phillips };
2643fe3331bSKim Phillips 
26569fe5f17SSandipan Das static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
26669fe5f17SSandipan Das {
26769fe5f17SSandipan Das 	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0076,
26869fe5f17SSandipan Das 	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
26969fe5f17SSandipan Das 	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0xff60,
27069fe5f17SSandipan Das 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x0964,
27169fe5f17SSandipan Das 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c2,
27269fe5f17SSandipan Das 	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c3,
27369fe5f17SSandipan Das 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00a9,
27469fe5f17SSandipan Das };
27569fe5f17SSandipan Das 
amd_pmu_event_map(int hw_event)27639b0332aSBorislav Petkov static u64 amd_pmu_event_map(int hw_event)
27739b0332aSBorislav Petkov {
27869fe5f17SSandipan Das 	if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
27969fe5f17SSandipan Das 		return amd_zen2_perfmon_event_map[hw_event];
28069fe5f17SSandipan Das 
28169fe5f17SSandipan Das 	if (cpu_feature_enabled(X86_FEATURE_ZEN1))
28269fe5f17SSandipan Das 		return amd_zen1_perfmon_event_map[hw_event];
2833fe3331bSKim Phillips 
28439b0332aSBorislav Petkov 	return amd_perfmon_event_map[hw_event];
28539b0332aSBorislav Petkov }
28639b0332aSBorislav Petkov 
28739b0332aSBorislav Petkov /*
28839b0332aSBorislav Petkov  * Previously calculated offsets
28939b0332aSBorislav Petkov  */
29039b0332aSBorislav Petkov static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
29139b0332aSBorislav Petkov static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
29239b0332aSBorislav Petkov 
29339b0332aSBorislav Petkov /*
29439b0332aSBorislav Petkov  * Legacy CPUs:
29539b0332aSBorislav Petkov  *   4 counters starting at 0xc0010000 each offset by 1
29639b0332aSBorislav Petkov  *
29739b0332aSBorislav Petkov  * CPUs with core performance counter extensions:
29839b0332aSBorislav Petkov  *   6 counters starting at 0xc0010200 each offset by 2
29939b0332aSBorislav Petkov  */
amd_pmu_addr_offset(int index,bool eventsel)30039b0332aSBorislav Petkov static inline int amd_pmu_addr_offset(int index, bool eventsel)
30139b0332aSBorislav Petkov {
30239b0332aSBorislav Petkov 	int offset;
30339b0332aSBorislav Petkov 
30439b0332aSBorislav Petkov 	if (!index)
30539b0332aSBorislav Petkov 		return index;
30639b0332aSBorislav Petkov 
30739b0332aSBorislav Petkov 	if (eventsel)
30839b0332aSBorislav Petkov 		offset = event_offsets[index];
30939b0332aSBorislav Petkov 	else
31039b0332aSBorislav Petkov 		offset = count_offsets[index];
31139b0332aSBorislav Petkov 
31239b0332aSBorislav Petkov 	if (offset)
31339b0332aSBorislav Petkov 		return offset;
31439b0332aSBorislav Petkov 
31539b0332aSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
31639b0332aSBorislav Petkov 		offset = index;
31739b0332aSBorislav Petkov 	else
31839b0332aSBorislav Petkov 		offset = index << 1;
31939b0332aSBorislav Petkov 
32039b0332aSBorislav Petkov 	if (eventsel)
32139b0332aSBorislav Petkov 		event_offsets[index] = offset;
32239b0332aSBorislav Petkov 	else
32339b0332aSBorislav Petkov 		count_offsets[index] = offset;
32439b0332aSBorislav Petkov 
32539b0332aSBorislav Petkov 	return offset;
32639b0332aSBorislav Petkov }
32739b0332aSBorislav Petkov 
328471af006SKim Phillips /*
329471af006SKim Phillips  * AMD64 events are detected based on their event codes.
330471af006SKim Phillips  */
amd_get_event_code(struct hw_perf_event * hwc)331471af006SKim Phillips static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
332471af006SKim Phillips {
333471af006SKim Phillips 	return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
334471af006SKim Phillips }
335471af006SKim Phillips 
amd_is_pair_event_code(struct hw_perf_event * hwc)336471af006SKim Phillips static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
337471af006SKim Phillips {
338471af006SKim Phillips 	if (!(x86_pmu.flags & PMU_FL_PAIR))
339471af006SKim Phillips 		return false;
340471af006SKim Phillips 
341471af006SKim Phillips 	switch (amd_get_event_code(hwc)) {
342471af006SKim Phillips 	case 0x003:	return true;	/* Retired SSE/AVX FLOPs */
343471af006SKim Phillips 	default:	return false;
344471af006SKim Phillips 	}
345471af006SKim Phillips }
346471af006SKim Phillips 
347706460a9SSandipan Das DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
348706460a9SSandipan Das 
amd_core_hw_config(struct perf_event * event)34939b0332aSBorislav Petkov static int amd_core_hw_config(struct perf_event *event)
35039b0332aSBorislav Petkov {
35139b0332aSBorislav Petkov 	if (event->attr.exclude_host && event->attr.exclude_guest)
35239b0332aSBorislav Petkov 		/*
35339b0332aSBorislav Petkov 		 * When HO == GO == 1 the hardware treats that as GO == HO == 0
35439b0332aSBorislav Petkov 		 * and will count in both modes. We don't want to count in that
35539b0332aSBorislav Petkov 		 * case so we emulate no-counting by setting US = OS = 0.
35639b0332aSBorislav Petkov 		 */
35739b0332aSBorislav Petkov 		event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
35839b0332aSBorislav Petkov 				      ARCH_PERFMON_EVENTSEL_OS);
35939b0332aSBorislav Petkov 	else if (event->attr.exclude_host)
36039b0332aSBorislav Petkov 		event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
36139b0332aSBorislav Petkov 	else if (event->attr.exclude_guest)
36239b0332aSBorislav Petkov 		event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
36339b0332aSBorislav Petkov 
36457388912SKim Phillips 	if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
36557388912SKim Phillips 		event->hw.flags |= PERF_X86_EVENT_PAIR;
36657388912SKim Phillips 
367b40d0156SSandipan Das 	if (has_branch_stack(event))
368706460a9SSandipan Das 		return static_call(amd_pmu_branch_hw_config)(event);
369ada54345SStephane Eranian 
370b40d0156SSandipan Das 	return 0;
37139b0332aSBorislav Petkov }
37239b0332aSBorislav Petkov 
amd_is_nb_event(struct hw_perf_event * hwc)37339b0332aSBorislav Petkov static inline int amd_is_nb_event(struct hw_perf_event *hwc)
37439b0332aSBorislav Petkov {
37539b0332aSBorislav Petkov 	return (hwc->config & 0xe0) == 0xe0;
37639b0332aSBorislav Petkov }
37739b0332aSBorislav Petkov 
amd_has_nb(struct cpu_hw_events * cpuc)37839b0332aSBorislav Petkov static inline int amd_has_nb(struct cpu_hw_events *cpuc)
37939b0332aSBorislav Petkov {
38039b0332aSBorislav Petkov 	struct amd_nb *nb = cpuc->amd_nb;
38139b0332aSBorislav Petkov 
38239b0332aSBorislav Petkov 	return nb && nb->nb_id != -1;
38339b0332aSBorislav Petkov }
38439b0332aSBorislav Petkov 
amd_pmu_hw_config(struct perf_event * event)38539b0332aSBorislav Petkov static int amd_pmu_hw_config(struct perf_event *event)
38639b0332aSBorislav Petkov {
38739b0332aSBorislav Petkov 	int ret;
38839b0332aSBorislav Petkov 
38939b0332aSBorislav Petkov 	/* pass precise event sampling to ibs: */
39039b0332aSBorislav Petkov 	if (event->attr.precise_ip && get_ibs_caps())
3912fad201fSRavi Bangoria 		return forward_event_to_ibs(event);
39239b0332aSBorislav Petkov 
393ada54345SStephane Eranian 	if (has_branch_stack(event) && !x86_pmu.lbr_nr)
39439b0332aSBorislav Petkov 		return -EOPNOTSUPP;
39539b0332aSBorislav Petkov 
39639b0332aSBorislav Petkov 	ret = x86_pmu_hw_config(event);
39739b0332aSBorislav Petkov 	if (ret)
39839b0332aSBorislav Petkov 		return ret;
39939b0332aSBorislav Petkov 
40039b0332aSBorislav Petkov 	if (event->attr.type == PERF_TYPE_RAW)
40139b0332aSBorislav Petkov 		event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
40239b0332aSBorislav Petkov 
40339b0332aSBorislav Petkov 	return amd_core_hw_config(event);
40439b0332aSBorislav Petkov }
40539b0332aSBorislav Petkov 
__amd_put_nb_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)40639b0332aSBorislav Petkov static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
40739b0332aSBorislav Petkov 					   struct perf_event *event)
40839b0332aSBorislav Petkov {
40939b0332aSBorislav Petkov 	struct amd_nb *nb = cpuc->amd_nb;
41039b0332aSBorislav Petkov 	int i;
41139b0332aSBorislav Petkov 
41239b0332aSBorislav Petkov 	/*
41339b0332aSBorislav Petkov 	 * need to scan whole list because event may not have
41439b0332aSBorislav Petkov 	 * been assigned during scheduling
41539b0332aSBorislav Petkov 	 *
41639b0332aSBorislav Petkov 	 * no race condition possible because event can only
41739b0332aSBorislav Petkov 	 * be removed on one CPU at a time AND PMU is disabled
41839b0332aSBorislav Petkov 	 * when we come here
41939b0332aSBorislav Petkov 	 */
42039b0332aSBorislav Petkov 	for (i = 0; i < x86_pmu.num_counters; i++) {
42139b0332aSBorislav Petkov 		if (cmpxchg(nb->owners + i, event, NULL) == event)
42239b0332aSBorislav Petkov 			break;
42339b0332aSBorislav Petkov 	}
42439b0332aSBorislav Petkov }
42539b0332aSBorislav Petkov 
42639b0332aSBorislav Petkov  /*
42739b0332aSBorislav Petkov   * AMD64 NorthBridge events need special treatment because
42839b0332aSBorislav Petkov   * counter access needs to be synchronized across all cores
42939b0332aSBorislav Petkov   * of a package. Refer to BKDG section 3.12
43039b0332aSBorislav Petkov   *
43139b0332aSBorislav Petkov   * NB events are events measuring L3 cache, Hypertransport
43239b0332aSBorislav Petkov   * traffic. They are identified by an event code >= 0xe00.
43339b0332aSBorislav Petkov   * They measure events on the NorthBride which is shared
43439b0332aSBorislav Petkov   * by all cores on a package. NB events are counted on a
43539b0332aSBorislav Petkov   * shared set of counters. When a NB event is programmed
43639b0332aSBorislav Petkov   * in a counter, the data actually comes from a shared
43739b0332aSBorislav Petkov   * counter. Thus, access to those counters needs to be
43839b0332aSBorislav Petkov   * synchronized.
43939b0332aSBorislav Petkov   *
44039b0332aSBorislav Petkov   * We implement the synchronization such that no two cores
44139b0332aSBorislav Petkov   * can be measuring NB events using the same counters. Thus,
44239b0332aSBorislav Petkov   * we maintain a per-NB allocation table. The available slot
44339b0332aSBorislav Petkov   * is propagated using the event_constraint structure.
44439b0332aSBorislav Petkov   *
44539b0332aSBorislav Petkov   * We provide only one choice for each NB event based on
44639b0332aSBorislav Petkov   * the fact that only NB events have restrictions. Consequently,
44739b0332aSBorislav Petkov   * if a counter is available, there is a guarantee the NB event
44839b0332aSBorislav Petkov   * will be assigned to it. If no slot is available, an empty
44939b0332aSBorislav Petkov   * constraint is returned and scheduling will eventually fail
45039b0332aSBorislav Petkov   * for this event.
45139b0332aSBorislav Petkov   *
45239b0332aSBorislav Petkov   * Note that all cores attached the same NB compete for the same
45339b0332aSBorislav Petkov   * counters to host NB events, this is why we use atomic ops. Some
45439b0332aSBorislav Petkov   * multi-chip CPUs may have more than one NB.
45539b0332aSBorislav Petkov   *
45639b0332aSBorislav Petkov   * Given that resources are allocated (cmpxchg), they must be
45739b0332aSBorislav Petkov   * eventually freed for others to use. This is accomplished by
45839b0332aSBorislav Petkov   * calling __amd_put_nb_event_constraints()
45939b0332aSBorislav Petkov   *
46039b0332aSBorislav Petkov   * Non NB events are not impacted by this restriction.
46139b0332aSBorislav Petkov   */
46239b0332aSBorislav Petkov static struct event_constraint *
__amd_get_nb_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event,struct event_constraint * c)46339b0332aSBorislav Petkov __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
46439b0332aSBorislav Petkov 			       struct event_constraint *c)
46539b0332aSBorislav Petkov {
46639b0332aSBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
46739b0332aSBorislav Petkov 	struct amd_nb *nb = cpuc->amd_nb;
46839b0332aSBorislav Petkov 	struct perf_event *old;
46939b0332aSBorislav Petkov 	int idx, new = -1;
47039b0332aSBorislav Petkov 
47139b0332aSBorislav Petkov 	if (!c)
47239b0332aSBorislav Petkov 		c = &unconstrained;
47339b0332aSBorislav Petkov 
47439b0332aSBorislav Petkov 	if (cpuc->is_fake)
47539b0332aSBorislav Petkov 		return c;
47639b0332aSBorislav Petkov 
47739b0332aSBorislav Petkov 	/*
47839b0332aSBorislav Petkov 	 * detect if already present, if so reuse
47939b0332aSBorislav Petkov 	 *
48039b0332aSBorislav Petkov 	 * cannot merge with actual allocation
48139b0332aSBorislav Petkov 	 * because of possible holes
48239b0332aSBorislav Petkov 	 *
48339b0332aSBorislav Petkov 	 * event can already be present yet not assigned (in hwc->idx)
48439b0332aSBorislav Petkov 	 * because of successive calls to x86_schedule_events() from
48539b0332aSBorislav Petkov 	 * hw_perf_group_sched_in() without hw_perf_enable()
48639b0332aSBorislav Petkov 	 */
48739b0332aSBorislav Petkov 	for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
48839b0332aSBorislav Petkov 		if (new == -1 || hwc->idx == idx)
48939b0332aSBorislav Petkov 			/* assign free slot, prefer hwc->idx */
49039b0332aSBorislav Petkov 			old = cmpxchg(nb->owners + idx, NULL, event);
49139b0332aSBorislav Petkov 		else if (nb->owners[idx] == event)
49239b0332aSBorislav Petkov 			/* event already present */
49339b0332aSBorislav Petkov 			old = event;
49439b0332aSBorislav Petkov 		else
49539b0332aSBorislav Petkov 			continue;
49639b0332aSBorislav Petkov 
49739b0332aSBorislav Petkov 		if (old && old != event)
49839b0332aSBorislav Petkov 			continue;
49939b0332aSBorislav Petkov 
50039b0332aSBorislav Petkov 		/* reassign to this slot */
50139b0332aSBorislav Petkov 		if (new != -1)
50239b0332aSBorislav Petkov 			cmpxchg(nb->owners + new, event, NULL);
50339b0332aSBorislav Petkov 		new = idx;
50439b0332aSBorislav Petkov 
50539b0332aSBorislav Petkov 		/* already present, reuse */
50639b0332aSBorislav Petkov 		if (old == event)
50739b0332aSBorislav Petkov 			break;
50839b0332aSBorislav Petkov 	}
50939b0332aSBorislav Petkov 
51039b0332aSBorislav Petkov 	if (new == -1)
51139b0332aSBorislav Petkov 		return &emptyconstraint;
51239b0332aSBorislav Petkov 
51339b0332aSBorislav Petkov 	return &nb->event_constraints[new];
51439b0332aSBorislav Petkov }
51539b0332aSBorislav Petkov 
amd_alloc_nb(int cpu)51639b0332aSBorislav Petkov static struct amd_nb *amd_alloc_nb(int cpu)
51739b0332aSBorislav Petkov {
51839b0332aSBorislav Petkov 	struct amd_nb *nb;
51939b0332aSBorislav Petkov 	int i;
52039b0332aSBorislav Petkov 
52139b0332aSBorislav Petkov 	nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
52239b0332aSBorislav Petkov 	if (!nb)
52339b0332aSBorislav Petkov 		return NULL;
52439b0332aSBorislav Petkov 
52539b0332aSBorislav Petkov 	nb->nb_id = -1;
52639b0332aSBorislav Petkov 
52739b0332aSBorislav Petkov 	/*
52839b0332aSBorislav Petkov 	 * initialize all possible NB constraints
52939b0332aSBorislav Petkov 	 */
53039b0332aSBorislav Petkov 	for (i = 0; i < x86_pmu.num_counters; i++) {
53139b0332aSBorislav Petkov 		__set_bit(i, nb->event_constraints[i].idxmsk);
53239b0332aSBorislav Petkov 		nb->event_constraints[i].weight = 1;
53339b0332aSBorislav Petkov 	}
53439b0332aSBorislav Petkov 	return nb;
53539b0332aSBorislav Petkov }
53639b0332aSBorislav Petkov 
537706460a9SSandipan Das typedef void (amd_pmu_branch_reset_t)(void);
538706460a9SSandipan Das DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t);
539706460a9SSandipan Das 
amd_pmu_cpu_reset(int cpu)54021d59e3eSSandipan Das static void amd_pmu_cpu_reset(int cpu)
54121d59e3eSSandipan Das {
542706460a9SSandipan Das 	if (x86_pmu.lbr_nr)
543706460a9SSandipan Das 		static_call(amd_pmu_branch_reset)();
544706460a9SSandipan Das 
54521d59e3eSSandipan Das 	if (x86_pmu.version < 2)
54621d59e3eSSandipan Das 		return;
54721d59e3eSSandipan Das 
54821d59e3eSSandipan Das 	/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
54921d59e3eSSandipan Das 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
55021d59e3eSSandipan Das 
55123d2626bSSandipan Das 	/*
55223d2626bSSandipan Das 	 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
55323d2626bSSandipan Das 	 * and PerfCntrGLobalStatus.PerfCntrOvfl
55423d2626bSSandipan Das 	 */
55523d2626bSSandipan Das 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
55623d2626bSSandipan Das 	       GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
55721d59e3eSSandipan Das }
55821d59e3eSSandipan Das 
amd_pmu_cpu_prepare(int cpu)55939b0332aSBorislav Petkov static int amd_pmu_cpu_prepare(int cpu)
56039b0332aSBorislav Petkov {
56139b0332aSBorislav Petkov 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
56239b0332aSBorislav Petkov 
563f4f925daSSandipan Das 	cpuc->lbr_sel = kzalloc_node(sizeof(struct er_account), GFP_KERNEL,
564f4f925daSSandipan Das 				     cpu_to_node(cpu));
565f4f925daSSandipan Das 	if (!cpuc->lbr_sel)
566f4f925daSSandipan Das 		return -ENOMEM;
567f4f925daSSandipan Das 
56839b0332aSBorislav Petkov 	WARN_ON_ONCE(cpuc->amd_nb);
56939b0332aSBorislav Petkov 
57032b62f44SPeter Zijlstra 	if (!x86_pmu.amd_nb_constraints)
57195ca792cSThomas Gleixner 		return 0;
57239b0332aSBorislav Petkov 
57339b0332aSBorislav Petkov 	cpuc->amd_nb = amd_alloc_nb(cpu);
574f4f925daSSandipan Das 	if (cpuc->amd_nb)
57595ca792cSThomas Gleixner 		return 0;
576f4f925daSSandipan Das 
577f4f925daSSandipan Das 	kfree(cpuc->lbr_sel);
578f4f925daSSandipan Das 	cpuc->lbr_sel = NULL;
579f4f925daSSandipan Das 
580f4f925daSSandipan Das 	return -ENOMEM;
58139b0332aSBorislav Petkov }
58239b0332aSBorislav Petkov 
amd_pmu_cpu_starting(int cpu)58339b0332aSBorislav Petkov static void amd_pmu_cpu_starting(int cpu)
58439b0332aSBorislav Petkov {
58539b0332aSBorislav Petkov 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
58639b0332aSBorislav Petkov 	void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
58739b0332aSBorislav Petkov 	struct amd_nb *nb;
58839b0332aSBorislav Petkov 	int i, nb_id;
58939b0332aSBorislav Petkov 
59039b0332aSBorislav Petkov 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
59123d2626bSSandipan Das 	amd_pmu_cpu_reset(cpu);
59239b0332aSBorislav Petkov 
59332b62f44SPeter Zijlstra 	if (!x86_pmu.amd_nb_constraints)
59439b0332aSBorislav Petkov 		return;
59539b0332aSBorislav Petkov 
596db970bd2SYazen Ghannam 	nb_id = topology_die_id(cpu);
59739b0332aSBorislav Petkov 	WARN_ON_ONCE(nb_id == BAD_APICID);
59839b0332aSBorislav Petkov 
59939b0332aSBorislav Petkov 	for_each_online_cpu(i) {
60039b0332aSBorislav Petkov 		nb = per_cpu(cpu_hw_events, i).amd_nb;
60139b0332aSBorislav Petkov 		if (WARN_ON_ONCE(!nb))
60239b0332aSBorislav Petkov 			continue;
60339b0332aSBorislav Petkov 
60439b0332aSBorislav Petkov 		if (nb->nb_id == nb_id) {
60539b0332aSBorislav Petkov 			*onln = cpuc->amd_nb;
60639b0332aSBorislav Petkov 			cpuc->amd_nb = nb;
60739b0332aSBorislav Petkov 			break;
60839b0332aSBorislav Petkov 		}
60939b0332aSBorislav Petkov 	}
61039b0332aSBorislav Petkov 
61139b0332aSBorislav Petkov 	cpuc->amd_nb->nb_id = nb_id;
61239b0332aSBorislav Petkov 	cpuc->amd_nb->refcnt++;
61339b0332aSBorislav Petkov }
61439b0332aSBorislav Petkov 
amd_pmu_cpu_dead(int cpu)61539b0332aSBorislav Petkov static void amd_pmu_cpu_dead(int cpu)
61639b0332aSBorislav Petkov {
617f4f925daSSandipan Das 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
618f4f925daSSandipan Das 
619f4f925daSSandipan Das 	kfree(cpuhw->lbr_sel);
620f4f925daSSandipan Das 	cpuhw->lbr_sel = NULL;
62139b0332aSBorislav Petkov 
62232b62f44SPeter Zijlstra 	if (!x86_pmu.amd_nb_constraints)
62339b0332aSBorislav Petkov 		return;
62439b0332aSBorislav Petkov 
62539b0332aSBorislav Petkov 	if (cpuhw->amd_nb) {
62639b0332aSBorislav Petkov 		struct amd_nb *nb = cpuhw->amd_nb;
62739b0332aSBorislav Petkov 
62839b0332aSBorislav Petkov 		if (nb->nb_id == -1 || --nb->refcnt == 0)
62939b0332aSBorislav Petkov 			kfree(nb);
63039b0332aSBorislav Petkov 
63139b0332aSBorislav Petkov 		cpuhw->amd_nb = NULL;
63239b0332aSBorislav Petkov 	}
63339b0332aSBorislav Petkov }
63439b0332aSBorislav Petkov 
amd_pmu_set_global_ctl(u64 ctl)6359622e67eSSandipan Das static inline void amd_pmu_set_global_ctl(u64 ctl)
6369622e67eSSandipan Das {
6379622e67eSSandipan Das 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
6389622e67eSSandipan Das }
6399622e67eSSandipan Das 
amd_pmu_get_global_status(void)6407685665cSSandipan Das static inline u64 amd_pmu_get_global_status(void)
6417685665cSSandipan Das {
6427685665cSSandipan Das 	u64 status;
6437685665cSSandipan Das 
6447685665cSSandipan Das 	/* PerfCntrGlobalStatus is read-only */
6457685665cSSandipan Das 	rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
6467685665cSSandipan Das 
647ca5b7c0dSSandipan Das 	return status;
6487685665cSSandipan Das }
6497685665cSSandipan Das 
amd_pmu_ack_global_status(u64 status)6507685665cSSandipan Das static inline void amd_pmu_ack_global_status(u64 status)
6517685665cSSandipan Das {
6527685665cSSandipan Das 	/*
6537685665cSSandipan Das 	 * PerfCntrGlobalStatus is read-only but an overflow acknowledgment
6547685665cSSandipan Das 	 * mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr
6557685665cSSandipan Das 	 * clears the same bit in PerfCntrGlobalStatus
6567685665cSSandipan Das 	 */
6577685665cSSandipan Das 
6587685665cSSandipan Das 	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
6597685665cSSandipan Das }
6607685665cSSandipan Das 
amd_pmu_test_overflow_topbit(int idx)6617685665cSSandipan Das static bool amd_pmu_test_overflow_topbit(int idx)
6627685665cSSandipan Das {
6637685665cSSandipan Das 	u64 counter;
6647685665cSSandipan Das 
6657685665cSSandipan Das 	rdmsrl(x86_pmu_event_addr(idx), counter);
6667685665cSSandipan Das 
6677685665cSSandipan Das 	return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
6687685665cSSandipan Das }
6697685665cSSandipan Das 
amd_pmu_test_overflow_status(int idx)6707685665cSSandipan Das static bool amd_pmu_test_overflow_status(int idx)
6717685665cSSandipan Das {
6727685665cSSandipan Das 	return amd_pmu_get_global_status() & BIT_ULL(idx);
6737685665cSSandipan Das }
6747685665cSSandipan Das 
6757685665cSSandipan Das DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit);
6767685665cSSandipan Das 
677914123faSLendacky, Thomas /*
678914123faSLendacky, Thomas  * When a PMC counter overflows, an NMI is used to process the event and
679914123faSLendacky, Thomas  * reset the counter. NMI latency can result in the counter being updated
680914123faSLendacky, Thomas  * before the NMI can run, which can result in what appear to be spurious
681914123faSLendacky, Thomas  * NMIs. This function is intended to wait for the NMI to run and reset
682914123faSLendacky, Thomas  * the counter to avoid possible unhandled NMI messages.
683914123faSLendacky, Thomas  */
684914123faSLendacky, Thomas #define OVERFLOW_WAIT_COUNT	50
685914123faSLendacky, Thomas 
amd_pmu_wait_on_overflow(int idx)686914123faSLendacky, Thomas static void amd_pmu_wait_on_overflow(int idx)
687914123faSLendacky, Thomas {
688914123faSLendacky, Thomas 	unsigned int i;
689914123faSLendacky, Thomas 
690914123faSLendacky, Thomas 	/*
691914123faSLendacky, Thomas 	 * Wait for the counter to be reset if it has overflowed. This loop
692914123faSLendacky, Thomas 	 * should exit very, very quickly, but just in case, don't wait
693914123faSLendacky, Thomas 	 * forever...
694914123faSLendacky, Thomas 	 */
695914123faSLendacky, Thomas 	for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
6967685665cSSandipan Das 		if (!static_call(amd_pmu_test_overflow)(idx))
697914123faSLendacky, Thomas 			break;
698914123faSLendacky, Thomas 
699914123faSLendacky, Thomas 		/* Might be in IRQ context, so can't sleep */
700914123faSLendacky, Thomas 		udelay(1);
701914123faSLendacky, Thomas 	}
702914123faSLendacky, Thomas }
703914123faSLendacky, Thomas 
amd_pmu_check_overflow(void)7049622e67eSSandipan Das static void amd_pmu_check_overflow(void)
705914123faSLendacky, Thomas {
706914123faSLendacky, Thomas 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
707914123faSLendacky, Thomas 	int idx;
708914123faSLendacky, Thomas 
709914123faSLendacky, Thomas 	/*
710914123faSLendacky, Thomas 	 * This shouldn't be called from NMI context, but add a safeguard here
711914123faSLendacky, Thomas 	 * to return, since if we're in NMI context we can't wait for an NMI
712914123faSLendacky, Thomas 	 * to reset an overflowed counter value.
713914123faSLendacky, Thomas 	 */
714914123faSLendacky, Thomas 	if (in_nmi())
715914123faSLendacky, Thomas 		return;
716914123faSLendacky, Thomas 
717914123faSLendacky, Thomas 	/*
718914123faSLendacky, Thomas 	 * Check each counter for overflow and wait for it to be reset by the
719914123faSLendacky, Thomas 	 * NMI if it has overflowed. This relies on the fact that all active
720d9f6e12fSIngo Molnar 	 * counters are always enabled when this function is called and
721914123faSLendacky, Thomas 	 * ARCH_PERFMON_EVENTSEL_INT is always set.
722914123faSLendacky, Thomas 	 */
723914123faSLendacky, Thomas 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
724914123faSLendacky, Thomas 		if (!test_bit(idx, cpuc->active_mask))
725914123faSLendacky, Thomas 			continue;
726914123faSLendacky, Thomas 
727914123faSLendacky, Thomas 		amd_pmu_wait_on_overflow(idx);
728914123faSLendacky, Thomas 	}
729914123faSLendacky, Thomas }
730914123faSLendacky, Thomas 
amd_pmu_enable_event(struct perf_event * event)731ada54345SStephane Eranian static void amd_pmu_enable_event(struct perf_event *event)
732ada54345SStephane Eranian {
733ada54345SStephane Eranian 	x86_pmu_enable_event(event);
734ada54345SStephane Eranian }
735ada54345SStephane Eranian 
amd_pmu_enable_all(int added)736ada54345SStephane Eranian static void amd_pmu_enable_all(int added)
737ada54345SStephane Eranian {
738ada54345SStephane Eranian 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
739ada54345SStephane Eranian 	int idx;
740ada54345SStephane Eranian 
741ada54345SStephane Eranian 	amd_brs_enable_all();
742ada54345SStephane Eranian 
743ada54345SStephane Eranian 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
744ada54345SStephane Eranian 		/* only activate events which are marked as active */
745ada54345SStephane Eranian 		if (!test_bit(idx, cpuc->active_mask))
746ada54345SStephane Eranian 			continue;
747ada54345SStephane Eranian 
748ada54345SStephane Eranian 		amd_pmu_enable_event(cpuc->events[idx]);
749ada54345SStephane Eranian 	}
750ada54345SStephane Eranian }
751ada54345SStephane Eranian 
amd_pmu_v2_enable_event(struct perf_event * event)7529622e67eSSandipan Das static void amd_pmu_v2_enable_event(struct perf_event *event)
7539622e67eSSandipan Das {
7549622e67eSSandipan Das 	struct hw_perf_event *hwc = &event->hw;
7559622e67eSSandipan Das 
7569622e67eSSandipan Das 	/*
7579622e67eSSandipan Das 	 * Testing cpu_hw_events.enabled should be skipped in this case unlike
7589622e67eSSandipan Das 	 * in x86_pmu_enable_event().
7599622e67eSSandipan Das 	 *
7609622e67eSSandipan Das 	 * Since cpu_hw_events.enabled is set only after returning from
7619622e67eSSandipan Das 	 * x86_pmu_start(), the PMCs must be programmed and kept ready.
7629622e67eSSandipan Das 	 * Counting starts only after x86_pmu_enable_all() is called.
7639622e67eSSandipan Das 	 */
7649622e67eSSandipan Das 	__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
7659622e67eSSandipan Das }
7669622e67eSSandipan Das 
amd_pmu_core_enable_all(void)767ca5b7c0dSSandipan Das static __always_inline void amd_pmu_core_enable_all(void)
7689622e67eSSandipan Das {
7699622e67eSSandipan Das 	amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask);
7709622e67eSSandipan Das }
7719622e67eSSandipan Das 
amd_pmu_v2_enable_all(int added)772ca5b7c0dSSandipan Das static void amd_pmu_v2_enable_all(int added)
773ca5b7c0dSSandipan Das {
774ca5b7c0dSSandipan Das 	amd_pmu_lbr_enable_all();
775ca5b7c0dSSandipan Das 	amd_pmu_core_enable_all();
776ca5b7c0dSSandipan Das }
777ca5b7c0dSSandipan Das 
amd_pmu_disable_event(struct perf_event * event)7783966c3feSLendacky, Thomas static void amd_pmu_disable_event(struct perf_event *event)
7793966c3feSLendacky, Thomas {
7803966c3feSLendacky, Thomas 	x86_pmu_disable_event(event);
7813966c3feSLendacky, Thomas 
7823966c3feSLendacky, Thomas 	/*
7833966c3feSLendacky, Thomas 	 * This can be called from NMI context (via x86_pmu_stop). The counter
7843966c3feSLendacky, Thomas 	 * may have overflowed, but either way, we'll never see it get reset
7853966c3feSLendacky, Thomas 	 * by the NMI if we're already in the NMI. And the NMI latency support
7863966c3feSLendacky, Thomas 	 * below will take care of any pending NMI that might have been
7873966c3feSLendacky, Thomas 	 * generated by the overflow.
7883966c3feSLendacky, Thomas 	 */
7893966c3feSLendacky, Thomas 	if (in_nmi())
7903966c3feSLendacky, Thomas 		return;
7913966c3feSLendacky, Thomas 
7923966c3feSLendacky, Thomas 	amd_pmu_wait_on_overflow(event->hw.idx);
7933966c3feSLendacky, Thomas }
7943966c3feSLendacky, Thomas 
amd_pmu_disable_all(void)7959622e67eSSandipan Das static void amd_pmu_disable_all(void)
7969622e67eSSandipan Das {
7979622e67eSSandipan Das 	amd_brs_disable_all();
7989622e67eSSandipan Das 	x86_pmu_disable_all();
7999622e67eSSandipan Das 	amd_pmu_check_overflow();
8009622e67eSSandipan Das }
8019622e67eSSandipan Das 
amd_pmu_core_disable_all(void)802ca5b7c0dSSandipan Das static __always_inline void amd_pmu_core_disable_all(void)
803ca5b7c0dSSandipan Das {
804ca5b7c0dSSandipan Das 	amd_pmu_set_global_ctl(0);
805ca5b7c0dSSandipan Das }
806ca5b7c0dSSandipan Das 
amd_pmu_v2_disable_all(void)8079622e67eSSandipan Das static void amd_pmu_v2_disable_all(void)
8089622e67eSSandipan Das {
809ca5b7c0dSSandipan Das 	amd_pmu_core_disable_all();
810ca5b7c0dSSandipan Das 	amd_pmu_lbr_disable_all();
8119622e67eSSandipan Das 	amd_pmu_check_overflow();
8129622e67eSSandipan Das }
8139622e67eSSandipan Das 
814706460a9SSandipan Das DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add);
815706460a9SSandipan Das 
amd_pmu_add_event(struct perf_event * event)816ada54345SStephane Eranian static void amd_pmu_add_event(struct perf_event *event)
817ada54345SStephane Eranian {
818ada54345SStephane Eranian 	if (needs_branch_stack(event))
819706460a9SSandipan Das 		static_call(amd_pmu_branch_add)(event);
820ada54345SStephane Eranian }
821ada54345SStephane Eranian 
822706460a9SSandipan Das DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del);
823706460a9SSandipan Das 
amd_pmu_del_event(struct perf_event * event)824ada54345SStephane Eranian static void amd_pmu_del_event(struct perf_event *event)
825ada54345SStephane Eranian {
826ada54345SStephane Eranian 	if (needs_branch_stack(event))
827706460a9SSandipan Das 		static_call(amd_pmu_branch_del)(event);
828ada54345SStephane Eranian }
829ada54345SStephane Eranian 
8306d3edaaeSLendacky, Thomas /*
8316d3edaaeSLendacky, Thomas  * Because of NMI latency, if multiple PMC counters are active or other sources
8326d3edaaeSLendacky, Thomas  * of NMIs are received, the perf NMI handler can handle one or more overflowed
8336d3edaaeSLendacky, Thomas  * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
8346d3edaaeSLendacky, Thomas  * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
8356d3edaaeSLendacky, Thomas  * back-to-back NMI support won't be active. This PMC handler needs to take into
8366d3edaaeSLendacky, Thomas  * account that this can occur, otherwise this could result in unknown NMI
8376d3edaaeSLendacky, Thomas  * messages being issued. Examples of this is PMC overflow while in the NMI
8386d3edaaeSLendacky, Thomas  * handler when multiple PMCs are active or PMC overflow while handling some
8396d3edaaeSLendacky, Thomas  * other source of an NMI.
8406d3edaaeSLendacky, Thomas  *
841df4d2973STom Lendacky  * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
842df4d2973STom Lendacky  * received during this window will be claimed. This prevents extending the
843df4d2973STom Lendacky  * window past when it is possible that latent NMIs should be received. The
844df4d2973STom Lendacky  * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
845df4d2973STom Lendacky  * handled a counter. When an un-handled NMI is received, it will be claimed
846df4d2973STom Lendacky  * only if arriving within that window.
8476d3edaaeSLendacky, Thomas  */
amd_pmu_adjust_nmi_window(int handled)8487685665cSSandipan Das static inline int amd_pmu_adjust_nmi_window(int handled)
8497685665cSSandipan Das {
8507685665cSSandipan Das 	/*
8517685665cSSandipan Das 	 * If a counter was handled, record a timestamp such that un-handled
8527685665cSSandipan Das 	 * NMIs will be claimed if arriving within that window.
8537685665cSSandipan Das 	 */
8547685665cSSandipan Das 	if (handled) {
8557685665cSSandipan Das 		this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
8567685665cSSandipan Das 
8577685665cSSandipan Das 		return handled;
8587685665cSSandipan Das 	}
8597685665cSSandipan Das 
8607685665cSSandipan Das 	if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
8617685665cSSandipan Das 		return NMI_DONE;
8627685665cSSandipan Das 
8637685665cSSandipan Das 	return NMI_HANDLED;
8647685665cSSandipan Das }
8657685665cSSandipan Das 
amd_pmu_handle_irq(struct pt_regs * regs)8666d3edaaeSLendacky, Thomas static int amd_pmu_handle_irq(struct pt_regs *regs)
8676d3edaaeSLendacky, Thomas {
868ada54345SStephane Eranian 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
8698f05c1ffSZheng Yongjun 	int handled;
870ada54345SStephane Eranian 	int pmu_enabled;
871ada54345SStephane Eranian 
872ada54345SStephane Eranian 	/*
873ada54345SStephane Eranian 	 * Save the PMU state.
874ada54345SStephane Eranian 	 * It needs to be restored when leaving the handler.
875ada54345SStephane Eranian 	 */
876ada54345SStephane Eranian 	pmu_enabled = cpuc->enabled;
877ada54345SStephane Eranian 	cpuc->enabled = 0;
878ada54345SStephane Eranian 
879baa014b9SRavi Bangoria 	amd_brs_disable_all();
880ada54345SStephane Eranian 
881ada54345SStephane Eranian 	/* Drain BRS is in use (could be inactive) */
882ada54345SStephane Eranian 	if (cpuc->lbr_users)
883ada54345SStephane Eranian 		amd_brs_drain();
8846d3edaaeSLendacky, Thomas 
8856d3edaaeSLendacky, Thomas 	/* Process any counter overflows */
8866d3edaaeSLendacky, Thomas 	handled = x86_pmu_handle_irq(regs);
8876d3edaaeSLendacky, Thomas 
888ada54345SStephane Eranian 	cpuc->enabled = pmu_enabled;
889ada54345SStephane Eranian 	if (pmu_enabled)
890baa014b9SRavi Bangoria 		amd_brs_enable_all();
891ada54345SStephane Eranian 
8927685665cSSandipan Das 	return amd_pmu_adjust_nmi_window(handled);
8936d3edaaeSLendacky, Thomas }
8946d3edaaeSLendacky, Thomas 
amd_pmu_v2_handle_irq(struct pt_regs * regs)8957685665cSSandipan Das static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
8967685665cSSandipan Das {
8977685665cSSandipan Das 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
8987685665cSSandipan Das 	struct perf_sample_data data;
8997685665cSSandipan Das 	struct hw_perf_event *hwc;
9007685665cSSandipan Das 	struct perf_event *event;
9017685665cSSandipan Das 	int handled = 0, idx;
902599522d9SBreno Leitao 	u64 reserved, status, mask;
9037685665cSSandipan Das 	bool pmu_enabled;
9046d3edaaeSLendacky, Thomas 
9057685665cSSandipan Das 	/*
9067685665cSSandipan Das 	 * Save the PMU state as it needs to be restored when leaving the
9077685665cSSandipan Das 	 * handler
9087685665cSSandipan Das 	 */
9097685665cSSandipan Das 	pmu_enabled = cpuc->enabled;
9107685665cSSandipan Das 	cpuc->enabled = 0;
9117685665cSSandipan Das 
912ca5b7c0dSSandipan Das 	/* Stop counting but do not disable LBR */
913ca5b7c0dSSandipan Das 	amd_pmu_core_disable_all();
9147685665cSSandipan Das 
9157685665cSSandipan Das 	status = amd_pmu_get_global_status();
9167685665cSSandipan Das 
9177685665cSSandipan Das 	/* Check if any overflows are pending */
9187685665cSSandipan Das 	if (!status)
9197685665cSSandipan Das 		goto done;
9207685665cSSandipan Das 
921*55ed6c47SSandipan Das 	/* Read branch records */
922*55ed6c47SSandipan Das 	if (x86_pmu.lbr_nr) {
923ca5b7c0dSSandipan Das 		amd_pmu_lbr_read();
924ca5b7c0dSSandipan Das 		status &= ~GLOBAL_STATUS_LBRS_FROZEN;
925ca5b7c0dSSandipan Das 	}
926ca5b7c0dSSandipan Das 
927599522d9SBreno Leitao 	reserved = status & ~amd_pmu_global_cntr_mask;
928599522d9SBreno Leitao 	if (reserved)
929599522d9SBreno Leitao 		pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
930599522d9SBreno Leitao 			     reserved);
931599522d9SBreno Leitao 
932599522d9SBreno Leitao 	/* Clear any reserved bits set by buggy microcode */
933599522d9SBreno Leitao 	status &= amd_pmu_global_cntr_mask;
934599522d9SBreno Leitao 
9357685665cSSandipan Das 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
9367685665cSSandipan Das 		if (!test_bit(idx, cpuc->active_mask))
9377685665cSSandipan Das 			continue;
9387685665cSSandipan Das 
9397685665cSSandipan Das 		event = cpuc->events[idx];
9407685665cSSandipan Das 		hwc = &event->hw;
9417685665cSSandipan Das 		x86_perf_event_update(event);
9427685665cSSandipan Das 		mask = BIT_ULL(idx);
9437685665cSSandipan Das 
9447685665cSSandipan Das 		if (!(status & mask))
9457685665cSSandipan Das 			continue;
9467685665cSSandipan Das 
9477685665cSSandipan Das 		/* Event overflow */
9487685665cSSandipan Das 		handled++;
949263f5ecaSBreno Leitao 		status &= ~mask;
9507685665cSSandipan Das 		perf_sample_data_init(&data, 0, hwc->last_period);
9517685665cSSandipan Das 
9527685665cSSandipan Das 		if (!x86_perf_event_set_period(event))
9537685665cSSandipan Das 			continue;
9547685665cSSandipan Das 
955eb55b455SNamhyung Kim 		if (has_branch_stack(event))
956eb55b455SNamhyung Kim 			perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
957ca5b7c0dSSandipan Das 
9587685665cSSandipan Das 		if (perf_event_overflow(event, &data, regs))
9597685665cSSandipan Das 			x86_pmu_stop(event, 0);
9607685665cSSandipan Das 	}
9617685665cSSandipan Das 
9627685665cSSandipan Das 	/*
9637685665cSSandipan Das 	 * It should never be the case that some overflows are not handled as
9647685665cSSandipan Das 	 * the corresponding PMCs are expected to be inactive according to the
9657685665cSSandipan Das 	 * active_mask
9667685665cSSandipan Das 	 */
9677685665cSSandipan Das 	WARN_ON(status > 0);
9687685665cSSandipan Das 
969ca5b7c0dSSandipan Das 	/* Clear overflow and freeze bits */
9707685665cSSandipan Das 	amd_pmu_ack_global_status(~status);
9717685665cSSandipan Das 
9727685665cSSandipan Das 	/*
9737685665cSSandipan Das 	 * Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
9747685665cSSandipan Das 	 * PMI entry is not set by the local APIC when a PMC overflow occurs
9757685665cSSandipan Das 	 */
9767685665cSSandipan Das 	inc_irq_stat(apic_perf_irqs);
9777685665cSSandipan Das 
9787685665cSSandipan Das done:
9797685665cSSandipan Das 	cpuc->enabled = pmu_enabled;
9807685665cSSandipan Das 
9817685665cSSandipan Das 	/* Resume counting only if PMU is active */
9827685665cSSandipan Das 	if (pmu_enabled)
983ca5b7c0dSSandipan Das 		amd_pmu_core_enable_all();
9847685665cSSandipan Das 
9857685665cSSandipan Das 	return amd_pmu_adjust_nmi_window(handled);
9866d3edaaeSLendacky, Thomas }
9876d3edaaeSLendacky, Thomas 
98839b0332aSBorislav Petkov static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)98939b0332aSBorislav Petkov amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
99039b0332aSBorislav Petkov 			  struct perf_event *event)
99139b0332aSBorislav Petkov {
99239b0332aSBorislav Petkov 	/*
99339b0332aSBorislav Petkov 	 * if not NB event or no NB, then no constraints
99439b0332aSBorislav Petkov 	 */
99539b0332aSBorislav Petkov 	if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
99639b0332aSBorislav Petkov 		return &unconstrained;
99739b0332aSBorislav Petkov 
99839b0332aSBorislav Petkov 	return __amd_get_nb_event_constraints(cpuc, event, NULL);
99939b0332aSBorislav Petkov }
100039b0332aSBorislav Petkov 
amd_put_event_constraints(struct cpu_hw_events * cpuc,struct perf_event * event)100139b0332aSBorislav Petkov static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
100239b0332aSBorislav Petkov 				      struct perf_event *event)
100339b0332aSBorislav Petkov {
100439b0332aSBorislav Petkov 	if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
100539b0332aSBorislav Petkov 		__amd_put_nb_event_constraints(cpuc, event);
100639b0332aSBorislav Petkov }
100739b0332aSBorislav Petkov 
100839b0332aSBorislav Petkov PMU_FORMAT_ATTR(event,	"config:0-7,32-35");
100939b0332aSBorislav Petkov PMU_FORMAT_ATTR(umask,	"config:8-15"	);
101039b0332aSBorislav Petkov PMU_FORMAT_ATTR(edge,	"config:18"	);
101139b0332aSBorislav Petkov PMU_FORMAT_ATTR(inv,	"config:23"	);
101239b0332aSBorislav Petkov PMU_FORMAT_ATTR(cmask,	"config:24-31"	);
101339b0332aSBorislav Petkov 
101439b0332aSBorislav Petkov static struct attribute *amd_format_attr[] = {
101539b0332aSBorislav Petkov 	&format_attr_event.attr,
101639b0332aSBorislav Petkov 	&format_attr_umask.attr,
101739b0332aSBorislav Petkov 	&format_attr_edge.attr,
101839b0332aSBorislav Petkov 	&format_attr_inv.attr,
101939b0332aSBorislav Petkov 	&format_attr_cmask.attr,
102039b0332aSBorislav Petkov 	NULL,
102139b0332aSBorislav Petkov };
102239b0332aSBorislav Petkov 
102339b0332aSBorislav Petkov /* AMD Family 15h */
102439b0332aSBorislav Petkov 
102539b0332aSBorislav Petkov #define AMD_EVENT_TYPE_MASK	0x000000F0ULL
102639b0332aSBorislav Petkov 
102739b0332aSBorislav Petkov #define AMD_EVENT_FP		0x00000000ULL ... 0x00000010ULL
102839b0332aSBorislav Petkov #define AMD_EVENT_LS		0x00000020ULL ... 0x00000030ULL
102939b0332aSBorislav Petkov #define AMD_EVENT_DC		0x00000040ULL ... 0x00000050ULL
103039b0332aSBorislav Petkov #define AMD_EVENT_CU		0x00000060ULL ... 0x00000070ULL
103139b0332aSBorislav Petkov #define AMD_EVENT_IC_DE		0x00000080ULL ... 0x00000090ULL
103239b0332aSBorislav Petkov #define AMD_EVENT_EX_LS		0x000000C0ULL
103339b0332aSBorislav Petkov #define AMD_EVENT_DE		0x000000D0ULL
103439b0332aSBorislav Petkov #define AMD_EVENT_NB		0x000000E0ULL ... 0x000000F0ULL
103539b0332aSBorislav Petkov 
103639b0332aSBorislav Petkov /*
103739b0332aSBorislav Petkov  * AMD family 15h event code/PMC mappings:
103839b0332aSBorislav Petkov  *
103939b0332aSBorislav Petkov  * type = event_code & 0x0F0:
104039b0332aSBorislav Petkov  *
104139b0332aSBorislav Petkov  * 0x000	FP	PERF_CTL[5:3]
104239b0332aSBorislav Petkov  * 0x010	FP	PERF_CTL[5:3]
104339b0332aSBorislav Petkov  * 0x020	LS	PERF_CTL[5:0]
104439b0332aSBorislav Petkov  * 0x030	LS	PERF_CTL[5:0]
104539b0332aSBorislav Petkov  * 0x040	DC	PERF_CTL[5:0]
104639b0332aSBorislav Petkov  * 0x050	DC	PERF_CTL[5:0]
104739b0332aSBorislav Petkov  * 0x060	CU	PERF_CTL[2:0]
104839b0332aSBorislav Petkov  * 0x070	CU	PERF_CTL[2:0]
104939b0332aSBorislav Petkov  * 0x080	IC/DE	PERF_CTL[2:0]
105039b0332aSBorislav Petkov  * 0x090	IC/DE	PERF_CTL[2:0]
105139b0332aSBorislav Petkov  * 0x0A0	---
105239b0332aSBorislav Petkov  * 0x0B0	---
105339b0332aSBorislav Petkov  * 0x0C0	EX/LS	PERF_CTL[5:0]
105439b0332aSBorislav Petkov  * 0x0D0	DE	PERF_CTL[2:0]
105539b0332aSBorislav Petkov  * 0x0E0	NB	NB_PERF_CTL[3:0]
105639b0332aSBorislav Petkov  * 0x0F0	NB	NB_PERF_CTL[3:0]
105739b0332aSBorislav Petkov  *
105839b0332aSBorislav Petkov  * Exceptions:
105939b0332aSBorislav Petkov  *
106039b0332aSBorislav Petkov  * 0x000	FP	PERF_CTL[3], PERF_CTL[5:3] (*)
106139b0332aSBorislav Petkov  * 0x003	FP	PERF_CTL[3]
106239b0332aSBorislav Petkov  * 0x004	FP	PERF_CTL[3], PERF_CTL[5:3] (*)
106339b0332aSBorislav Petkov  * 0x00B	FP	PERF_CTL[3]
106439b0332aSBorislav Petkov  * 0x00D	FP	PERF_CTL[3]
106539b0332aSBorislav Petkov  * 0x023	DE	PERF_CTL[2:0]
106639b0332aSBorislav Petkov  * 0x02D	LS	PERF_CTL[3]
106739b0332aSBorislav Petkov  * 0x02E	LS	PERF_CTL[3,0]
106839b0332aSBorislav Petkov  * 0x031	LS	PERF_CTL[2:0] (**)
106939b0332aSBorislav Petkov  * 0x043	CU	PERF_CTL[2:0]
107039b0332aSBorislav Petkov  * 0x045	CU	PERF_CTL[2:0]
107139b0332aSBorislav Petkov  * 0x046	CU	PERF_CTL[2:0]
107239b0332aSBorislav Petkov  * 0x054	CU	PERF_CTL[2:0]
107339b0332aSBorislav Petkov  * 0x055	CU	PERF_CTL[2:0]
107439b0332aSBorislav Petkov  * 0x08F	IC	PERF_CTL[0]
107539b0332aSBorislav Petkov  * 0x187	DE	PERF_CTL[0]
107639b0332aSBorislav Petkov  * 0x188	DE	PERF_CTL[0]
107739b0332aSBorislav Petkov  * 0x0DB	EX	PERF_CTL[5:0]
107839b0332aSBorislav Petkov  * 0x0DC	LS	PERF_CTL[5:0]
107939b0332aSBorislav Petkov  * 0x0DD	LS	PERF_CTL[5:0]
108039b0332aSBorislav Petkov  * 0x0DE	LS	PERF_CTL[5:0]
108139b0332aSBorislav Petkov  * 0x0DF	LS	PERF_CTL[5:0]
108239b0332aSBorislav Petkov  * 0x1C0	EX	PERF_CTL[5:3]
108339b0332aSBorislav Petkov  * 0x1D6	EX	PERF_CTL[5:0]
108439b0332aSBorislav Petkov  * 0x1D8	EX	PERF_CTL[5:0]
108539b0332aSBorislav Petkov  *
108639b0332aSBorislav Petkov  * (*)  depending on the umask all FPU counters may be used
108739b0332aSBorislav Petkov  * (**) only one unitmask enabled at a time
108839b0332aSBorislav Petkov  */
108939b0332aSBorislav Petkov 
109039b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
109139b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
109239b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
109339b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
109439b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
109539b0332aSBorislav Petkov static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
109639b0332aSBorislav Petkov 
109739b0332aSBorislav Petkov static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)109839b0332aSBorislav Petkov amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
109939b0332aSBorislav Petkov 			       struct perf_event *event)
110039b0332aSBorislav Petkov {
110139b0332aSBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
110239b0332aSBorislav Petkov 	unsigned int event_code = amd_get_event_code(hwc);
110339b0332aSBorislav Petkov 
110439b0332aSBorislav Petkov 	switch (event_code & AMD_EVENT_TYPE_MASK) {
110539b0332aSBorislav Petkov 	case AMD_EVENT_FP:
110639b0332aSBorislav Petkov 		switch (event_code) {
110739b0332aSBorislav Petkov 		case 0x000:
110839b0332aSBorislav Petkov 			if (!(hwc->config & 0x0000F000ULL))
110939b0332aSBorislav Petkov 				break;
111039b0332aSBorislav Petkov 			if (!(hwc->config & 0x00000F00ULL))
111139b0332aSBorislav Petkov 				break;
111239b0332aSBorislav Petkov 			return &amd_f15_PMC3;
111339b0332aSBorislav Petkov 		case 0x004:
111439b0332aSBorislav Petkov 			if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
111539b0332aSBorislav Petkov 				break;
111639b0332aSBorislav Petkov 			return &amd_f15_PMC3;
111739b0332aSBorislav Petkov 		case 0x003:
111839b0332aSBorislav Petkov 		case 0x00B:
111939b0332aSBorislav Petkov 		case 0x00D:
112039b0332aSBorislav Petkov 			return &amd_f15_PMC3;
112139b0332aSBorislav Petkov 		}
112239b0332aSBorislav Petkov 		return &amd_f15_PMC53;
112339b0332aSBorislav Petkov 	case AMD_EVENT_LS:
112439b0332aSBorislav Petkov 	case AMD_EVENT_DC:
112539b0332aSBorislav Petkov 	case AMD_EVENT_EX_LS:
112639b0332aSBorislav Petkov 		switch (event_code) {
112739b0332aSBorislav Petkov 		case 0x023:
112839b0332aSBorislav Petkov 		case 0x043:
112939b0332aSBorislav Petkov 		case 0x045:
113039b0332aSBorislav Petkov 		case 0x046:
113139b0332aSBorislav Petkov 		case 0x054:
113239b0332aSBorislav Petkov 		case 0x055:
113339b0332aSBorislav Petkov 			return &amd_f15_PMC20;
113439b0332aSBorislav Petkov 		case 0x02D:
113539b0332aSBorislav Petkov 			return &amd_f15_PMC3;
113639b0332aSBorislav Petkov 		case 0x02E:
113739b0332aSBorislav Petkov 			return &amd_f15_PMC30;
113839b0332aSBorislav Petkov 		case 0x031:
113939b0332aSBorislav Petkov 			if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
114039b0332aSBorislav Petkov 				return &amd_f15_PMC20;
114139b0332aSBorislav Petkov 			return &emptyconstraint;
114239b0332aSBorislav Petkov 		case 0x1C0:
114339b0332aSBorislav Petkov 			return &amd_f15_PMC53;
114439b0332aSBorislav Petkov 		default:
114539b0332aSBorislav Petkov 			return &amd_f15_PMC50;
114639b0332aSBorislav Petkov 		}
114739b0332aSBorislav Petkov 	case AMD_EVENT_CU:
114839b0332aSBorislav Petkov 	case AMD_EVENT_IC_DE:
114939b0332aSBorislav Petkov 	case AMD_EVENT_DE:
115039b0332aSBorislav Petkov 		switch (event_code) {
115139b0332aSBorislav Petkov 		case 0x08F:
115239b0332aSBorislav Petkov 		case 0x187:
115339b0332aSBorislav Petkov 		case 0x188:
115439b0332aSBorislav Petkov 			return &amd_f15_PMC0;
115539b0332aSBorislav Petkov 		case 0x0DB ... 0x0DF:
115639b0332aSBorislav Petkov 		case 0x1D6:
115739b0332aSBorislav Petkov 		case 0x1D8:
115839b0332aSBorislav Petkov 			return &amd_f15_PMC50;
115939b0332aSBorislav Petkov 		default:
116039b0332aSBorislav Petkov 			return &amd_f15_PMC20;
116139b0332aSBorislav Petkov 		}
116239b0332aSBorislav Petkov 	case AMD_EVENT_NB:
1163940b2f2fSBorislav Petkov 		/* moved to uncore.c */
116439b0332aSBorislav Petkov 		return &emptyconstraint;
116539b0332aSBorislav Petkov 	default:
116639b0332aSBorislav Petkov 		return &emptyconstraint;
116739b0332aSBorislav Petkov 	}
116839b0332aSBorislav Petkov }
116939b0332aSBorislav Petkov 
1170471af006SKim Phillips static struct event_constraint pair_constraint;
1171471af006SKim Phillips 
1172471af006SKim Phillips static struct event_constraint *
amd_get_event_constraints_f17h(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)1173471af006SKim Phillips amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
1174471af006SKim Phillips 			       struct perf_event *event)
1175471af006SKim Phillips {
1176471af006SKim Phillips 	struct hw_perf_event *hwc = &event->hw;
1177471af006SKim Phillips 
1178471af006SKim Phillips 	if (amd_is_pair_event_code(hwc))
1179471af006SKim Phillips 		return &pair_constraint;
1180471af006SKim Phillips 
1181471af006SKim Phillips 	return &unconstrained;
1182471af006SKim Phillips }
1183471af006SKim Phillips 
amd_put_event_constraints_f17h(struct cpu_hw_events * cpuc,struct perf_event * event)118457388912SKim Phillips static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
118557388912SKim Phillips 					   struct perf_event *event)
118657388912SKim Phillips {
118757388912SKim Phillips 	struct hw_perf_event *hwc = &event->hw;
118857388912SKim Phillips 
118957388912SKim Phillips 	if (is_counter_pair(hwc))
119057388912SKim Phillips 		--cpuc->n_pair;
119157388912SKim Phillips }
119257388912SKim Phillips 
1193ada54345SStephane Eranian /*
1194ada54345SStephane Eranian  * Because of the way BRS operates with an inactive and active phases, and
1195ada54345SStephane Eranian  * the link to one counter, it is not possible to have two events using BRS
1196ada54345SStephane Eranian  * scheduled at the same time. There would be an issue with enforcing the
1197ada54345SStephane Eranian  * period of each one and given that the BRS saturates, it would not be possible
1198ada54345SStephane Eranian  * to guarantee correlated content for all events. Therefore, in situations
1199ada54345SStephane Eranian  * where multiple events want to use BRS, the kernel enforces mutual exclusion.
1200ada54345SStephane Eranian  * Exclusion is enforced by chosing only one counter for events using BRS.
1201ada54345SStephane Eranian  * The event scheduling logic will then automatically multiplex the
1202ada54345SStephane Eranian  * events and ensure that at most one event is actively using BRS.
1203ada54345SStephane Eranian  *
1204ada54345SStephane Eranian  * The BRS counter could be any counter, but there is no constraint on Fam19h,
1205ada54345SStephane Eranian  * therefore all counters are equal and thus we pick the first one: PMC0
1206ada54345SStephane Eranian  */
1207ada54345SStephane Eranian static struct event_constraint amd_fam19h_brs_cntr0_constraint =
1208ada54345SStephane Eranian 	EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK);
1209ada54345SStephane Eranian 
1210ada54345SStephane Eranian static struct event_constraint amd_fam19h_brs_pair_cntr0_constraint =
1211ada54345SStephane Eranian 	__EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR);
1212ada54345SStephane Eranian 
1213ada54345SStephane Eranian static struct event_constraint *
amd_get_event_constraints_f19h(struct cpu_hw_events * cpuc,int idx,struct perf_event * event)1214ada54345SStephane Eranian amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx,
1215ada54345SStephane Eranian 			  struct perf_event *event)
1216ada54345SStephane Eranian {
1217ada54345SStephane Eranian 	struct hw_perf_event *hwc = &event->hw;
1218ada54345SStephane Eranian 	bool has_brs = has_amd_brs(hwc);
1219ada54345SStephane Eranian 
1220ada54345SStephane Eranian 	/*
1221ada54345SStephane Eranian 	 * In case BRS is used with an event requiring a counter pair,
1222ada54345SStephane Eranian 	 * the kernel allows it but only on counter 0 & 1 to enforce
1223ada54345SStephane Eranian 	 * multiplexing requiring to protect BRS in case of multiple
1224ada54345SStephane Eranian 	 * BRS users
1225ada54345SStephane Eranian 	 */
1226ada54345SStephane Eranian 	if (amd_is_pair_event_code(hwc)) {
1227ada54345SStephane Eranian 		return has_brs ? &amd_fam19h_brs_pair_cntr0_constraint
1228ada54345SStephane Eranian 			       : &pair_constraint;
1229ada54345SStephane Eranian 	}
1230ada54345SStephane Eranian 
1231ada54345SStephane Eranian 	if (has_brs)
1232ada54345SStephane Eranian 		return &amd_fam19h_brs_cntr0_constraint;
1233ada54345SStephane Eranian 
1234ada54345SStephane Eranian 	return &unconstrained;
1235ada54345SStephane Eranian }
1236ada54345SStephane Eranian 
1237ada54345SStephane Eranian 
amd_event_sysfs_show(char * page,u64 config)123839b0332aSBorislav Petkov static ssize_t amd_event_sysfs_show(char *page, u64 config)
123939b0332aSBorislav Petkov {
124039b0332aSBorislav Petkov 	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
124139b0332aSBorislav Petkov 		    (config & AMD64_EVENTSEL_EVENT) >> 24;
124239b0332aSBorislav Petkov 
124339b0332aSBorislav Petkov 	return x86_event_sysfs_show(page, config, event);
124439b0332aSBorislav Petkov }
124539b0332aSBorislav Petkov 
amd_pmu_limit_period(struct perf_event * event,s64 * left)124628f0f3c4SPeter Zijlstra static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
12473c27b0c6SPeter Zijlstra {
12483c27b0c6SPeter Zijlstra 	/*
12493c27b0c6SPeter Zijlstra 	 * Decrease period by the depth of the BRS feature to get the last N
12503c27b0c6SPeter Zijlstra 	 * taken branches and approximate the desired period
12513c27b0c6SPeter Zijlstra 	 */
125228f0f3c4SPeter Zijlstra 	if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
125328f0f3c4SPeter Zijlstra 		*left -= x86_pmu.lbr_nr;
12543c27b0c6SPeter Zijlstra }
12553c27b0c6SPeter Zijlstra 
125639b0332aSBorislav Petkov static __initconst const struct x86_pmu amd_pmu = {
125739b0332aSBorislav Petkov 	.name			= "AMD",
12586d3edaaeSLendacky, Thomas 	.handle_irq		= amd_pmu_handle_irq,
1259914123faSLendacky, Thomas 	.disable_all		= amd_pmu_disable_all,
1260ada54345SStephane Eranian 	.enable_all		= amd_pmu_enable_all,
1261ada54345SStephane Eranian 	.enable			= amd_pmu_enable_event,
12623966c3feSLendacky, Thomas 	.disable		= amd_pmu_disable_event,
126339b0332aSBorislav Petkov 	.hw_config		= amd_pmu_hw_config,
126439b0332aSBorislav Petkov 	.schedule_events	= x86_schedule_events,
126539b0332aSBorislav Petkov 	.eventsel		= MSR_K7_EVNTSEL0,
126639b0332aSBorislav Petkov 	.perfctr		= MSR_K7_PERFCTR0,
126739b0332aSBorislav Petkov 	.addr_offset            = amd_pmu_addr_offset,
126839b0332aSBorislav Petkov 	.event_map		= amd_pmu_event_map,
126939b0332aSBorislav Petkov 	.max_events		= ARRAY_SIZE(amd_perfmon_event_map),
127039b0332aSBorislav Petkov 	.num_counters		= AMD64_NUM_COUNTERS,
1271ada54345SStephane Eranian 	.add			= amd_pmu_add_event,
1272ada54345SStephane Eranian 	.del			= amd_pmu_del_event,
127339b0332aSBorislav Petkov 	.cntval_bits		= 48,
127439b0332aSBorislav Petkov 	.cntval_mask		= (1ULL << 48) - 1,
127539b0332aSBorislav Petkov 	.apic			= 1,
127639b0332aSBorislav Petkov 	/* use highest bit to detect overflow */
127739b0332aSBorislav Petkov 	.max_period		= (1ULL << 47) - 1,
127839b0332aSBorislav Petkov 	.get_event_constraints	= amd_get_event_constraints,
127939b0332aSBorislav Petkov 	.put_event_constraints	= amd_put_event_constraints,
128039b0332aSBorislav Petkov 
128139b0332aSBorislav Petkov 	.format_attrs		= amd_format_attr,
128239b0332aSBorislav Petkov 	.events_sysfs_show	= amd_event_sysfs_show,
128339b0332aSBorislav Petkov 
128439b0332aSBorislav Petkov 	.cpu_prepare		= amd_pmu_cpu_prepare,
128539b0332aSBorislav Petkov 	.cpu_starting		= amd_pmu_cpu_starting,
128639b0332aSBorislav Petkov 	.cpu_dead		= amd_pmu_cpu_dead,
128732b62f44SPeter Zijlstra 
128832b62f44SPeter Zijlstra 	.amd_nb_constraints	= 1,
128939b0332aSBorislav Petkov };
129039b0332aSBorislav Petkov 
branches_show(struct device * cdev,struct device_attribute * attr,char * buf)1291ada54345SStephane Eranian static ssize_t branches_show(struct device *cdev,
1292ada54345SStephane Eranian 			      struct device_attribute *attr,
1293ada54345SStephane Eranian 			      char *buf)
1294ada54345SStephane Eranian {
1295ada54345SStephane Eranian 	return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
1296ada54345SStephane Eranian }
1297ada54345SStephane Eranian 
1298ada54345SStephane Eranian static DEVICE_ATTR_RO(branches);
1299ada54345SStephane Eranian 
13009603aa79SSandipan Das static struct attribute *amd_pmu_branches_attrs[] = {
1301ada54345SStephane Eranian 	&dev_attr_branches.attr,
1302ada54345SStephane Eranian 	NULL,
1303ada54345SStephane Eranian };
1304ada54345SStephane Eranian 
1305ada54345SStephane Eranian static umode_t
amd_branches_is_visible(struct kobject * kobj,struct attribute * attr,int i)13069603aa79SSandipan Das amd_branches_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1307ada54345SStephane Eranian {
1308ada54345SStephane Eranian 	return x86_pmu.lbr_nr ? attr->mode : 0;
1309ada54345SStephane Eranian }
1310ada54345SStephane Eranian 
13119603aa79SSandipan Das static struct attribute_group group_caps_amd_branches = {
1312ada54345SStephane Eranian 	.name  = "caps",
13139603aa79SSandipan Das 	.attrs = amd_pmu_branches_attrs,
13149603aa79SSandipan Das 	.is_visible = amd_branches_is_visible,
1315ada54345SStephane Eranian };
1316ada54345SStephane Eranian 
13179603aa79SSandipan Das #ifdef CONFIG_PERF_EVENTS_AMD_BRS
13189603aa79SSandipan Das 
131944175993SStephane Eranian EVENT_ATTR_STR(branch-brs, amd_branch_brs,
132044175993SStephane Eranian 	       "event=" __stringify(AMD_FAM19H_BRS_EVENT)"\n");
132144175993SStephane Eranian 
132244175993SStephane Eranian static struct attribute *amd_brs_events_attrs[] = {
132344175993SStephane Eranian 	EVENT_PTR(amd_branch_brs),
132444175993SStephane Eranian 	NULL,
132544175993SStephane Eranian };
132644175993SStephane Eranian 
13279603aa79SSandipan Das static umode_t
amd_brs_is_visible(struct kobject * kobj,struct attribute * attr,int i)13289603aa79SSandipan Das amd_brs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
13299603aa79SSandipan Das {
13309603aa79SSandipan Das 	return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ?
13319603aa79SSandipan Das 	       attr->mode : 0;
13329603aa79SSandipan Das }
13339603aa79SSandipan Das 
133444175993SStephane Eranian static struct attribute_group group_events_amd_brs = {
133544175993SStephane Eranian 	.name       = "events",
133644175993SStephane Eranian 	.attrs      = amd_brs_events_attrs,
133744175993SStephane Eranian 	.is_visible = amd_brs_is_visible,
133844175993SStephane Eranian };
133944175993SStephane Eranian 
13409603aa79SSandipan Das #endif	/* CONFIG_PERF_EVENTS_AMD_BRS */
13419603aa79SSandipan Das 
1342ada54345SStephane Eranian static const struct attribute_group *amd_attr_update[] = {
13439603aa79SSandipan Das 	&group_caps_amd_branches,
13449603aa79SSandipan Das #ifdef CONFIG_PERF_EVENTS_AMD_BRS
134544175993SStephane Eranian 	&group_events_amd_brs,
13469603aa79SSandipan Das #endif
1347ada54345SStephane Eranian 	NULL,
1348ada54345SStephane Eranian };
1349ada54345SStephane Eranian 
amd_core_pmu_init(void)135039b0332aSBorislav Petkov static int __init amd_core_pmu_init(void)
135139b0332aSBorislav Petkov {
135256e026a7SSandipan Das 	union cpuid_0x80000022_ebx ebx;
1353471af006SKim Phillips 	u64 even_ctr_mask = 0ULL;
1354471af006SKim Phillips 	int i;
1355471af006SKim Phillips 
135639b0332aSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
135739b0332aSBorislav Petkov 		return 0;
135839b0332aSBorislav Petkov 
1359471af006SKim Phillips 	/* Avoid calculating the value each time in the NMI handler */
1360df4d2973STom Lendacky 	perf_nmi_window = msecs_to_jiffies(100);
1361df4d2973STom Lendacky 
136239b0332aSBorislav Petkov 	/*
136339b0332aSBorislav Petkov 	 * If core performance counter extensions exists, we must use
136439b0332aSBorislav Petkov 	 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
136539b0332aSBorislav Petkov 	 * amd_pmu_addr_offset().
136639b0332aSBorislav Petkov 	 */
136739b0332aSBorislav Petkov 	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
136839b0332aSBorislav Petkov 	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
136939b0332aSBorislav Petkov 	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;
137021d59e3eSSandipan Das 
137121d59e3eSSandipan Das 	/* Check for Performance Monitoring v2 support */
137221d59e3eSSandipan Das 	if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
137356e026a7SSandipan Das 		ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
137456e026a7SSandipan Das 
137521d59e3eSSandipan Das 		/* Update PMU version for later usage */
137621d59e3eSSandipan Das 		x86_pmu.version = 2;
137721d59e3eSSandipan Das 
137856e026a7SSandipan Das 		/* Find the number of available Core PMCs */
137956e026a7SSandipan Das 		x86_pmu.num_counters = ebx.split.num_core_pmc;
138056e026a7SSandipan Das 
138121d59e3eSSandipan Das 		amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
13829622e67eSSandipan Das 
13839622e67eSSandipan Das 		/* Update PMC handling functions */
13849622e67eSSandipan Das 		x86_pmu.enable_all = amd_pmu_v2_enable_all;
13859622e67eSSandipan Das 		x86_pmu.disable_all = amd_pmu_v2_disable_all;
13869622e67eSSandipan Das 		x86_pmu.enable = amd_pmu_v2_enable_event;
13877685665cSSandipan Das 		x86_pmu.handle_irq = amd_pmu_v2_handle_irq;
13887685665cSSandipan Das 		static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status);
138921d59e3eSSandipan Das 	}
139021d59e3eSSandipan Das 
139132b62f44SPeter Zijlstra 	/*
139232b62f44SPeter Zijlstra 	 * AMD Core perfctr has separate MSRs for the NB events, see
139332b62f44SPeter Zijlstra 	 * the amd/uncore.c driver.
139432b62f44SPeter Zijlstra 	 */
139532b62f44SPeter Zijlstra 	x86_pmu.amd_nb_constraints = 0;
139639b0332aSBorislav Petkov 
1397471af006SKim Phillips 	if (boot_cpu_data.x86 == 0x15) {
1398471af006SKim Phillips 		pr_cont("Fam15h ");
1399471af006SKim Phillips 		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1400471af006SKim Phillips 	}
1401471af006SKim Phillips 	if (boot_cpu_data.x86 >= 0x17) {
1402471af006SKim Phillips 		pr_cont("Fam17h+ ");
1403471af006SKim Phillips 		/*
1404471af006SKim Phillips 		 * Family 17h and compatibles have constraints for Large
1405471af006SKim Phillips 		 * Increment per Cycle events: they may only be assigned an
1406471af006SKim Phillips 		 * even numbered counter that has a consecutive adjacent odd
1407471af006SKim Phillips 		 * numbered counter following it.
1408471af006SKim Phillips 		 */
1409471af006SKim Phillips 		for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
141008245672SColin Ian King 			even_ctr_mask |= BIT_ULL(i);
1411471af006SKim Phillips 
1412471af006SKim Phillips 		pair_constraint = (struct event_constraint)
1413471af006SKim Phillips 				    __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1414471af006SKim Phillips 				    x86_pmu.num_counters / 2, 0,
1415471af006SKim Phillips 				    PERF_X86_EVENT_PAIR);
1416471af006SKim Phillips 
1417471af006SKim Phillips 		x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
141857388912SKim Phillips 		x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
141957388912SKim Phillips 		x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
1420471af006SKim Phillips 		x86_pmu.flags |= PMU_FL_PAIR;
1421471af006SKim Phillips 	}
1422471af006SKim Phillips 
1423703fb765SSandipan Das 	/* LBR and BRS are mutually exclusive features */
1424ca5b7c0dSSandipan Das 	if (!amd_pmu_lbr_init()) {
1425ca5b7c0dSSandipan Das 		/* LBR requires flushing on context switch */
1426ca5b7c0dSSandipan Das 		x86_pmu.sched_task = amd_pmu_lbr_sched_task;
1427ca5b7c0dSSandipan Das 		static_call_update(amd_pmu_branch_hw_config, amd_pmu_lbr_hw_config);
1428ca5b7c0dSSandipan Das 		static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset);
1429ca5b7c0dSSandipan Das 		static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add);
1430ca5b7c0dSSandipan Das 		static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del);
1431ca5b7c0dSSandipan Das 	} else if (!amd_brs_init()) {
1432ada54345SStephane Eranian 		/*
1433ada54345SStephane Eranian 		 * BRS requires special event constraints and flushing on ctxsw.
1434ada54345SStephane Eranian 		 */
1435ada54345SStephane Eranian 		x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
1436706460a9SSandipan Das 		x86_pmu.sched_task = amd_pmu_brs_sched_task;
14373c27b0c6SPeter Zijlstra 		x86_pmu.limit_period = amd_pmu_limit_period;
1438706460a9SSandipan Das 
1439706460a9SSandipan Das 		static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config);
1440706460a9SSandipan Das 		static_call_update(amd_pmu_branch_reset, amd_brs_reset);
1441706460a9SSandipan Das 		static_call_update(amd_pmu_branch_add, amd_pmu_brs_add);
1442706460a9SSandipan Das 		static_call_update(amd_pmu_branch_del, amd_pmu_brs_del);
1443706460a9SSandipan Das 
1444ada54345SStephane Eranian 		/*
1445ada54345SStephane Eranian 		 * put_event_constraints callback same as Fam17h, set above
1446ada54345SStephane Eranian 		 */
1447d5616bacSStephane Eranian 
1448d5616bacSStephane Eranian 		/* branch sampling must be stopped when entering low power */
1449d5616bacSStephane Eranian 		amd_brs_lopwr_init();
1450ada54345SStephane Eranian 	}
1451ada54345SStephane Eranian 
1452ada54345SStephane Eranian 	x86_pmu.attr_update = amd_attr_update;
1453ada54345SStephane Eranian 
145439b0332aSBorislav Petkov 	pr_cont("core perfctr, ");
145539b0332aSBorislav Petkov 	return 0;
145639b0332aSBorislav Petkov }
145739b0332aSBorislav Petkov 
amd_pmu_init(void)145839b0332aSBorislav Petkov __init int amd_pmu_init(void)
145939b0332aSBorislav Petkov {
146039b0332aSBorislav Petkov 	int ret;
146139b0332aSBorislav Petkov 
146239b0332aSBorislav Petkov 	/* Performance-monitoring supported from K7 and later: */
146339b0332aSBorislav Petkov 	if (boot_cpu_data.x86 < 6)
146439b0332aSBorislav Petkov 		return -ENODEV;
146539b0332aSBorislav Petkov 
146639b0332aSBorislav Petkov 	x86_pmu = amd_pmu;
146739b0332aSBorislav Petkov 
146839b0332aSBorislav Petkov 	ret = amd_core_pmu_init();
146939b0332aSBorislav Petkov 	if (ret)
147039b0332aSBorislav Petkov 		return ret;
147139b0332aSBorislav Petkov 
147232b62f44SPeter Zijlstra 	if (num_possible_cpus() == 1) {
147332b62f44SPeter Zijlstra 		/*
147432b62f44SPeter Zijlstra 		 * No point in allocating data structures to serialize
147532b62f44SPeter Zijlstra 		 * against other CPUs, when there is only the one CPU.
147632b62f44SPeter Zijlstra 		 */
147732b62f44SPeter Zijlstra 		x86_pmu.amd_nb_constraints = 0;
147832b62f44SPeter Zijlstra 	}
147932b62f44SPeter Zijlstra 
14800e3b74e2SKim Phillips 	if (boot_cpu_data.x86 >= 0x17)
14810e3b74e2SKim Phillips 		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
14820e3b74e2SKim Phillips 	else
14830e3b74e2SKim Phillips 		memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
148439b0332aSBorislav Petkov 
148539b0332aSBorislav Petkov 	return 0;
148639b0332aSBorislav Petkov }
148739b0332aSBorislav Petkov 
amd_pmu_reload_virt(void)1488bae19fddSSandipan Das static inline void amd_pmu_reload_virt(void)
1489bae19fddSSandipan Das {
1490bae19fddSSandipan Das 	if (x86_pmu.version >= 2) {
1491bae19fddSSandipan Das 		/*
1492bae19fddSSandipan Das 		 * Clear global enable bits, reprogram the PERF_CTL
1493bae19fddSSandipan Das 		 * registers with updated perf_ctr_virt_mask and then
1494bae19fddSSandipan Das 		 * set global enable bits once again
1495bae19fddSSandipan Das 		 */
1496bae19fddSSandipan Das 		amd_pmu_v2_disable_all();
1497bae19fddSSandipan Das 		amd_pmu_enable_all(0);
1498bae19fddSSandipan Das 		amd_pmu_v2_enable_all(0);
1499bae19fddSSandipan Das 		return;
1500bae19fddSSandipan Das 	}
1501bae19fddSSandipan Das 
1502bae19fddSSandipan Das 	amd_pmu_disable_all();
1503bae19fddSSandipan Das 	amd_pmu_enable_all(0);
1504bae19fddSSandipan Das }
1505bae19fddSSandipan Das 
amd_pmu_enable_virt(void)150639b0332aSBorislav Petkov void amd_pmu_enable_virt(void)
150739b0332aSBorislav Petkov {
150839b0332aSBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
150939b0332aSBorislav Petkov 
151039b0332aSBorislav Petkov 	cpuc->perf_ctr_virt_mask = 0;
151139b0332aSBorislav Petkov 
151239b0332aSBorislav Petkov 	/* Reload all events */
1513bae19fddSSandipan Das 	amd_pmu_reload_virt();
151439b0332aSBorislav Petkov }
151539b0332aSBorislav Petkov EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
151639b0332aSBorislav Petkov 
amd_pmu_disable_virt(void)151739b0332aSBorislav Petkov void amd_pmu_disable_virt(void)
151839b0332aSBorislav Petkov {
151939b0332aSBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
152039b0332aSBorislav Petkov 
152139b0332aSBorislav Petkov 	/*
152239b0332aSBorislav Petkov 	 * We only mask out the Host-only bit so that host-only counting works
152339b0332aSBorislav Petkov 	 * when SVM is disabled. If someone sets up a guest-only counter when
152439b0332aSBorislav Petkov 	 * SVM is disabled the Guest-only bits still gets set and the counter
152539b0332aSBorislav Petkov 	 * will not count anything.
152639b0332aSBorislav Petkov 	 */
152739b0332aSBorislav Petkov 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
152839b0332aSBorislav Petkov 
152939b0332aSBorislav Petkov 	/* Reload all events */
1530bae19fddSSandipan Das 	amd_pmu_reload_virt();
153139b0332aSBorislav Petkov }
153239b0332aSBorislav Petkov EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
1533