xref: /openbmc/linux/arch/x86/events/intel/lbr.c (revision f7af616c632ee2ac3af0876fe33bf9e0232e665a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/types.h>
4 
5 #include <asm/perf_event.h>
6 #include <asm/msr.h>
7 #include <asm/insn.h>
8 
9 #include "../perf_event.h"
10 
11 static const enum {
12 	LBR_EIP_FLAGS		= 1,
13 	LBR_TSX			= 2,
14 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
15 	[LBR_FORMAT_EIP_FLAGS]  = LBR_EIP_FLAGS,
16 	[LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
17 };
18 
19 /*
20  * Intel LBR_SELECT bits
21  * Intel Vol3a, April 2011, Section 16.7 Table 16-10
22  *
23  * Hardware branch filter (not available on all CPUs)
24  */
25 #define LBR_KERNEL_BIT		0 /* do not capture at ring0 */
26 #define LBR_USER_BIT		1 /* do not capture at ring > 0 */
27 #define LBR_JCC_BIT		2 /* do not capture conditional branches */
28 #define LBR_REL_CALL_BIT	3 /* do not capture relative calls */
29 #define LBR_IND_CALL_BIT	4 /* do not capture indirect calls */
30 #define LBR_RETURN_BIT		5 /* do not capture near returns */
31 #define LBR_IND_JMP_BIT		6 /* do not capture indirect jumps */
32 #define LBR_REL_JMP_BIT		7 /* do not capture relative jumps */
33 #define LBR_FAR_BIT		8 /* do not capture far branches */
34 #define LBR_CALL_STACK_BIT	9 /* enable call stack */
35 
36 /*
37  * Following bit only exists in Linux; we mask it out before writing it to
38  * the actual MSR. But it helps the constraint perf code to understand
39  * that this is a separate configuration.
40  */
41 #define LBR_NO_INFO_BIT	       63 /* don't read LBR_INFO. */
42 
43 #define LBR_KERNEL	(1 << LBR_KERNEL_BIT)
44 #define LBR_USER	(1 << LBR_USER_BIT)
45 #define LBR_JCC		(1 << LBR_JCC_BIT)
46 #define LBR_REL_CALL	(1 << LBR_REL_CALL_BIT)
47 #define LBR_IND_CALL	(1 << LBR_IND_CALL_BIT)
48 #define LBR_RETURN	(1 << LBR_RETURN_BIT)
49 #define LBR_REL_JMP	(1 << LBR_REL_JMP_BIT)
50 #define LBR_IND_JMP	(1 << LBR_IND_JMP_BIT)
51 #define LBR_FAR		(1 << LBR_FAR_BIT)
52 #define LBR_CALL_STACK	(1 << LBR_CALL_STACK_BIT)
53 #define LBR_NO_INFO	(1ULL << LBR_NO_INFO_BIT)
54 
55 #define LBR_PLM (LBR_KERNEL | LBR_USER)
56 
57 #define LBR_SEL_MASK	0x3ff	/* valid bits in LBR_SELECT */
58 #define LBR_NOT_SUPP	-1	/* LBR filter not supported */
59 #define LBR_IGN		0	/* ignored */
60 
61 #define LBR_ANY		 \
62 	(LBR_JCC	|\
63 	 LBR_REL_CALL	|\
64 	 LBR_IND_CALL	|\
65 	 LBR_RETURN	|\
66 	 LBR_REL_JMP	|\
67 	 LBR_IND_JMP	|\
68 	 LBR_FAR)
69 
70 #define LBR_FROM_FLAG_MISPRED	BIT_ULL(63)
71 #define LBR_FROM_FLAG_IN_TX	BIT_ULL(62)
72 #define LBR_FROM_FLAG_ABORT	BIT_ULL(61)
73 
74 #define LBR_FROM_SIGNEXT_2MSB	(BIT_ULL(60) | BIT_ULL(59))
75 
76 /*
77  * x86control flow change classification
78  * x86control flow changes include branches, interrupts, traps, faults
79  */
80 enum {
81 	X86_BR_NONE		= 0,      /* unknown */
82 
83 	X86_BR_USER		= 1 << 0, /* branch target is user */
84 	X86_BR_KERNEL		= 1 << 1, /* branch target is kernel */
85 
86 	X86_BR_CALL		= 1 << 2, /* call */
87 	X86_BR_RET		= 1 << 3, /* return */
88 	X86_BR_SYSCALL		= 1 << 4, /* syscall */
89 	X86_BR_SYSRET		= 1 << 5, /* syscall return */
90 	X86_BR_INT		= 1 << 6, /* sw interrupt */
91 	X86_BR_IRET		= 1 << 7, /* return from interrupt */
92 	X86_BR_JCC		= 1 << 8, /* conditional */
93 	X86_BR_JMP		= 1 << 9, /* jump */
94 	X86_BR_IRQ		= 1 << 10,/* hw interrupt or trap or fault */
95 	X86_BR_IND_CALL		= 1 << 11,/* indirect calls */
96 	X86_BR_ABORT		= 1 << 12,/* transaction abort */
97 	X86_BR_IN_TX		= 1 << 13,/* in transaction */
98 	X86_BR_NO_TX		= 1 << 14,/* not in transaction */
99 	X86_BR_ZERO_CALL	= 1 << 15,/* zero length call */
100 	X86_BR_CALL_STACK	= 1 << 16,/* call stack */
101 	X86_BR_IND_JMP		= 1 << 17,/* indirect jump */
102 
103 	X86_BR_TYPE_SAVE	= 1 << 18,/* indicate to save branch type */
104 
105 };
106 
107 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
108 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
109 
110 #define X86_BR_ANY       \
111 	(X86_BR_CALL    |\
112 	 X86_BR_RET     |\
113 	 X86_BR_SYSCALL |\
114 	 X86_BR_SYSRET  |\
115 	 X86_BR_INT     |\
116 	 X86_BR_IRET    |\
117 	 X86_BR_JCC     |\
118 	 X86_BR_JMP	 |\
119 	 X86_BR_IRQ	 |\
120 	 X86_BR_ABORT	 |\
121 	 X86_BR_IND_CALL |\
122 	 X86_BR_IND_JMP  |\
123 	 X86_BR_ZERO_CALL)
124 
125 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
126 
127 #define X86_BR_ANY_CALL		 \
128 	(X86_BR_CALL		|\
129 	 X86_BR_IND_CALL	|\
130 	 X86_BR_ZERO_CALL	|\
131 	 X86_BR_SYSCALL		|\
132 	 X86_BR_IRQ		|\
133 	 X86_BR_INT)
134 
135 /*
136  * Intel LBR_CTL bits
137  *
138  * Hardware branch filter for Arch LBR
139  */
140 #define ARCH_LBR_KERNEL_BIT		1  /* capture at ring0 */
141 #define ARCH_LBR_USER_BIT		2  /* capture at ring > 0 */
142 #define ARCH_LBR_CALL_STACK_BIT		3  /* enable call stack */
143 #define ARCH_LBR_JCC_BIT		16 /* capture conditional branches */
144 #define ARCH_LBR_REL_JMP_BIT		17 /* capture relative jumps */
145 #define ARCH_LBR_IND_JMP_BIT		18 /* capture indirect jumps */
146 #define ARCH_LBR_REL_CALL_BIT		19 /* capture relative calls */
147 #define ARCH_LBR_IND_CALL_BIT		20 /* capture indirect calls */
148 #define ARCH_LBR_RETURN_BIT		21 /* capture near returns */
149 #define ARCH_LBR_OTHER_BRANCH_BIT	22 /* capture other branches */
150 
151 #define ARCH_LBR_KERNEL			(1ULL << ARCH_LBR_KERNEL_BIT)
152 #define ARCH_LBR_USER			(1ULL << ARCH_LBR_USER_BIT)
153 #define ARCH_LBR_CALL_STACK		(1ULL << ARCH_LBR_CALL_STACK_BIT)
154 #define ARCH_LBR_JCC			(1ULL << ARCH_LBR_JCC_BIT)
155 #define ARCH_LBR_REL_JMP		(1ULL << ARCH_LBR_REL_JMP_BIT)
156 #define ARCH_LBR_IND_JMP		(1ULL << ARCH_LBR_IND_JMP_BIT)
157 #define ARCH_LBR_REL_CALL		(1ULL << ARCH_LBR_REL_CALL_BIT)
158 #define ARCH_LBR_IND_CALL		(1ULL << ARCH_LBR_IND_CALL_BIT)
159 #define ARCH_LBR_RETURN			(1ULL << ARCH_LBR_RETURN_BIT)
160 #define ARCH_LBR_OTHER_BRANCH		(1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
161 
162 #define ARCH_LBR_ANY			 \
163 	(ARCH_LBR_JCC			|\
164 	 ARCH_LBR_REL_JMP		|\
165 	 ARCH_LBR_IND_JMP		|\
166 	 ARCH_LBR_REL_CALL		|\
167 	 ARCH_LBR_IND_CALL		|\
168 	 ARCH_LBR_RETURN		|\
169 	 ARCH_LBR_OTHER_BRANCH)
170 
171 #define ARCH_LBR_CTL_MASK			0x7f000e
172 
173 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
174 
175 static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
176 {
177 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
178 		return !!(config & ARCH_LBR_CALL_STACK);
179 
180 	return !!(config & LBR_CALL_STACK);
181 }
182 
183 /*
184  * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
185  * otherwise it becomes near impossible to get a reliable stack.
186  */
187 
188 static void __intel_pmu_lbr_enable(bool pmi)
189 {
190 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
191 	u64 debugctl, lbr_select = 0, orig_debugctl;
192 
193 	/*
194 	 * No need to unfreeze manually, as v4 can do that as part
195 	 * of the GLOBAL_STATUS ack.
196 	 */
197 	if (pmi && x86_pmu.version >= 4)
198 		return;
199 
200 	/*
201 	 * No need to reprogram LBR_SELECT in a PMI, as it
202 	 * did not change.
203 	 */
204 	if (cpuc->lbr_sel)
205 		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
206 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
207 		wrmsrl(MSR_LBR_SELECT, lbr_select);
208 
209 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
210 	orig_debugctl = debugctl;
211 
212 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
213 		debugctl |= DEBUGCTLMSR_LBR;
214 	/*
215 	 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
216 	 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
217 	 * may cause superfluous increase/decrease of LBR_TOS.
218 	 */
219 	if (is_lbr_call_stack_bit_set(lbr_select))
220 		debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
221 	else
222 		debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
223 
224 	if (orig_debugctl != debugctl)
225 		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
226 
227 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
228 		wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
229 }
230 
231 static void __intel_pmu_lbr_disable(void)
232 {
233 	u64 debugctl;
234 
235 	if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
236 		wrmsrl(MSR_ARCH_LBR_CTL, 0);
237 		return;
238 	}
239 
240 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
241 	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
242 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
243 }
244 
245 void intel_pmu_lbr_reset_32(void)
246 {
247 	int i;
248 
249 	for (i = 0; i < x86_pmu.lbr_nr; i++)
250 		wrmsrl(x86_pmu.lbr_from + i, 0);
251 }
252 
253 void intel_pmu_lbr_reset_64(void)
254 {
255 	int i;
256 
257 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
258 		wrmsrl(x86_pmu.lbr_from + i, 0);
259 		wrmsrl(x86_pmu.lbr_to   + i, 0);
260 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
261 			wrmsrl(x86_pmu.lbr_info + i, 0);
262 	}
263 }
264 
265 static void intel_pmu_arch_lbr_reset(void)
266 {
267 	/* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
268 	wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
269 }
270 
271 void intel_pmu_lbr_reset(void)
272 {
273 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
274 
275 	if (!x86_pmu.lbr_nr)
276 		return;
277 
278 	x86_pmu.lbr_reset();
279 
280 	cpuc->last_task_ctx = NULL;
281 	cpuc->last_log_id = 0;
282 }
283 
284 /*
285  * TOS = most recently recorded branch
286  */
287 static inline u64 intel_pmu_lbr_tos(void)
288 {
289 	u64 tos;
290 
291 	rdmsrl(x86_pmu.lbr_tos, tos);
292 	return tos;
293 }
294 
295 enum {
296 	LBR_NONE,
297 	LBR_VALID,
298 };
299 
300 /*
301  * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
302  * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
303  * TSX is not supported they have no consistent behavior:
304  *
305  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
306  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
307  *     part of the sign extension.
308  *
309  * Therefore, if:
310  *
311  *   1) LBR has TSX format
312  *   2) CPU has no TSX support enabled
313  *
314  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
315  * value from rdmsr() must be converted to have a 61 bits sign extension,
316  * ignoring the TSX flags.
317  */
318 static inline bool lbr_from_signext_quirk_needed(void)
319 {
320 	int lbr_format = x86_pmu.intel_cap.lbr_format;
321 	bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
322 			   boot_cpu_has(X86_FEATURE_RTM);
323 
324 	return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
325 }
326 
327 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
328 
329 /* If quirk is enabled, ensure sign extension is 63 bits: */
330 inline u64 lbr_from_signext_quirk_wr(u64 val)
331 {
332 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
333 		/*
334 		 * Sign extend into bits 61:62 while preserving bit 63.
335 		 *
336 		 * Quirk is enabled when TSX is disabled. Therefore TSX bits
337 		 * in val are always OFF and must be changed to be sign
338 		 * extension bits. Since bits 59:60 are guaranteed to be
339 		 * part of the sign extension bits, we can just copy them
340 		 * to 61:62.
341 		 */
342 		val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
343 	}
344 	return val;
345 }
346 
347 /*
348  * If quirk is needed, ensure sign extension is 61 bits:
349  */
350 static u64 lbr_from_signext_quirk_rd(u64 val)
351 {
352 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
353 		/*
354 		 * Quirk is on when TSX is not enabled. Therefore TSX
355 		 * flags must be read as OFF.
356 		 */
357 		val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
358 	}
359 	return val;
360 }
361 
362 static __always_inline void wrlbr_from(unsigned int idx, u64 val)
363 {
364 	val = lbr_from_signext_quirk_wr(val);
365 	wrmsrl(x86_pmu.lbr_from + idx, val);
366 }
367 
368 static __always_inline void wrlbr_to(unsigned int idx, u64 val)
369 {
370 	wrmsrl(x86_pmu.lbr_to + idx, val);
371 }
372 
373 static __always_inline void wrlbr_info(unsigned int idx, u64 val)
374 {
375 	wrmsrl(x86_pmu.lbr_info + idx, val);
376 }
377 
378 static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
379 {
380 	u64 val;
381 
382 	if (lbr)
383 		return lbr->from;
384 
385 	rdmsrl(x86_pmu.lbr_from + idx, val);
386 
387 	return lbr_from_signext_quirk_rd(val);
388 }
389 
390 static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
391 {
392 	u64 val;
393 
394 	if (lbr)
395 		return lbr->to;
396 
397 	rdmsrl(x86_pmu.lbr_to + idx, val);
398 
399 	return val;
400 }
401 
402 static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
403 {
404 	u64 val;
405 
406 	if (lbr)
407 		return lbr->info;
408 
409 	rdmsrl(x86_pmu.lbr_info + idx, val);
410 
411 	return val;
412 }
413 
414 static inline void
415 wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
416 {
417 	wrlbr_from(idx, lbr->from);
418 	wrlbr_to(idx, lbr->to);
419 	if (need_info)
420 		wrlbr_info(idx, lbr->info);
421 }
422 
423 static inline bool
424 rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
425 {
426 	u64 from = rdlbr_from(idx, NULL);
427 
428 	/* Don't read invalid entry */
429 	if (!from)
430 		return false;
431 
432 	lbr->from = from;
433 	lbr->to = rdlbr_to(idx, NULL);
434 	if (need_info)
435 		lbr->info = rdlbr_info(idx, NULL);
436 
437 	return true;
438 }
439 
440 void intel_pmu_lbr_restore(void *ctx)
441 {
442 	bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
443 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
444 	struct x86_perf_task_context *task_ctx = ctx;
445 	int i;
446 	unsigned lbr_idx, mask;
447 	u64 tos = task_ctx->tos;
448 
449 	mask = x86_pmu.lbr_nr - 1;
450 	for (i = 0; i < task_ctx->valid_lbrs; i++) {
451 		lbr_idx = (tos - i) & mask;
452 		wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
453 	}
454 
455 	for (; i < x86_pmu.lbr_nr; i++) {
456 		lbr_idx = (tos - i) & mask;
457 		wrlbr_from(lbr_idx, 0);
458 		wrlbr_to(lbr_idx, 0);
459 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
460 			wrlbr_info(lbr_idx, 0);
461 	}
462 
463 	wrmsrl(x86_pmu.lbr_tos, tos);
464 
465 	if (cpuc->lbr_select)
466 		wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
467 }
468 
469 static void intel_pmu_arch_lbr_restore(void *ctx)
470 {
471 	struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
472 	struct lbr_entry *entries = task_ctx->entries;
473 	int i;
474 
475 	/* Fast reset the LBRs before restore if the call stack is not full. */
476 	if (!entries[x86_pmu.lbr_nr - 1].from)
477 		intel_pmu_arch_lbr_reset();
478 
479 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
480 		if (!entries[i].from)
481 			break;
482 		wrlbr_all(&entries[i], i, true);
483 	}
484 }
485 
486 /*
487  * Restore the Architecture LBR state from the xsave area in the perf
488  * context data for the task via the XRSTORS instruction.
489  */
490 static void intel_pmu_arch_lbr_xrstors(void *ctx)
491 {
492 	struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
493 
494 	copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
495 }
496 
497 static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
498 {
499 	if (static_cpu_has(X86_FEATURE_ARCH_LBR))
500 		return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
501 
502 	return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
503 }
504 
505 static void __intel_pmu_lbr_restore(void *ctx)
506 {
507 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
508 
509 	if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
510 	    task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
511 		intel_pmu_lbr_reset();
512 		return;
513 	}
514 
515 	/*
516 	 * Does not restore the LBR registers, if
517 	 * - No one else touched them, and
518 	 * - Was not cleared in Cstate
519 	 */
520 	if ((ctx == cpuc->last_task_ctx) &&
521 	    (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
522 	    !lbr_is_reset_in_cstate(ctx)) {
523 		task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
524 		return;
525 	}
526 
527 	x86_pmu.lbr_restore(ctx);
528 
529 	task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
530 }
531 
532 void intel_pmu_lbr_save(void *ctx)
533 {
534 	bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
535 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
536 	struct x86_perf_task_context *task_ctx = ctx;
537 	unsigned lbr_idx, mask;
538 	u64 tos;
539 	int i;
540 
541 	mask = x86_pmu.lbr_nr - 1;
542 	tos = intel_pmu_lbr_tos();
543 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
544 		lbr_idx = (tos - i) & mask;
545 		if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
546 			break;
547 	}
548 	task_ctx->valid_lbrs = i;
549 	task_ctx->tos = tos;
550 
551 	if (cpuc->lbr_select)
552 		rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
553 }
554 
555 static void intel_pmu_arch_lbr_save(void *ctx)
556 {
557 	struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
558 	struct lbr_entry *entries = task_ctx->entries;
559 	int i;
560 
561 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
562 		if (!rdlbr_all(&entries[i], i, true))
563 			break;
564 	}
565 
566 	/* LBR call stack is not full. Reset is required in restore. */
567 	if (i < x86_pmu.lbr_nr)
568 		entries[x86_pmu.lbr_nr - 1].from = 0;
569 }
570 
571 /*
572  * Save the Architecture LBR state to the xsave area in the perf
573  * context data for the task via the XSAVES instruction.
574  */
575 static void intel_pmu_arch_lbr_xsaves(void *ctx)
576 {
577 	struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
578 
579 	copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
580 }
581 
582 static void __intel_pmu_lbr_save(void *ctx)
583 {
584 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
585 
586 	if (task_context_opt(ctx)->lbr_callstack_users == 0) {
587 		task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
588 		return;
589 	}
590 
591 	x86_pmu.lbr_save(ctx);
592 
593 	task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
594 
595 	cpuc->last_task_ctx = ctx;
596 	cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
597 }
598 
599 void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
600 				 struct perf_event_context *next)
601 {
602 	void *prev_ctx_data, *next_ctx_data;
603 
604 	swap(prev->task_ctx_data, next->task_ctx_data);
605 
606 	/*
607 	 * Architecture specific synchronization makes sense in
608 	 * case both prev->task_ctx_data and next->task_ctx_data
609 	 * pointers are allocated.
610 	 */
611 
612 	prev_ctx_data = next->task_ctx_data;
613 	next_ctx_data = prev->task_ctx_data;
614 
615 	if (!prev_ctx_data || !next_ctx_data)
616 		return;
617 
618 	swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
619 	     task_context_opt(next_ctx_data)->lbr_callstack_users);
620 }
621 
622 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
623 {
624 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
625 	void *task_ctx;
626 
627 	if (!cpuc->lbr_users)
628 		return;
629 
630 	/*
631 	 * If LBR callstack feature is enabled and the stack was saved when
632 	 * the task was scheduled out, restore the stack. Otherwise flush
633 	 * the LBR stack.
634 	 */
635 	task_ctx = ctx ? ctx->task_ctx_data : NULL;
636 	if (task_ctx) {
637 		if (sched_in)
638 			__intel_pmu_lbr_restore(task_ctx);
639 		else
640 			__intel_pmu_lbr_save(task_ctx);
641 		return;
642 	}
643 
644 	/*
645 	 * Since a context switch can flip the address space and LBR entries
646 	 * are not tagged with an identifier, we need to wipe the LBR, even for
647 	 * per-cpu events. You simply cannot resolve the branches from the old
648 	 * address space.
649 	 */
650 	if (sched_in)
651 		intel_pmu_lbr_reset();
652 }
653 
654 static inline bool branch_user_callstack(unsigned br_sel)
655 {
656 	return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
657 }
658 
659 void intel_pmu_lbr_add(struct perf_event *event)
660 {
661 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
662 
663 	if (!x86_pmu.lbr_nr)
664 		return;
665 
666 	if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
667 		cpuc->lbr_select = 1;
668 
669 	cpuc->br_sel = event->hw.branch_reg.reg;
670 
671 	if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
672 		task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
673 
674 	/*
675 	 * Request pmu::sched_task() callback, which will fire inside the
676 	 * regular perf event scheduling, so that call will:
677 	 *
678 	 *  - restore or wipe; when LBR-callstack,
679 	 *  - wipe; otherwise,
680 	 *
681 	 * when this is from __perf_event_task_sched_in().
682 	 *
683 	 * However, if this is from perf_install_in_context(), no such callback
684 	 * will follow and we'll need to reset the LBR here if this is the
685 	 * first LBR event.
686 	 *
687 	 * The problem is, we cannot tell these cases apart... but we can
688 	 * exclude the biggest chunk of cases by looking at
689 	 * event->total_time_running. An event that has accrued runtime cannot
690 	 * be 'new'. Conversely, a new event can get installed through the
691 	 * context switch path for the first time.
692 	 */
693 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
694 		cpuc->lbr_pebs_users++;
695 	perf_sched_cb_inc(event->ctx->pmu);
696 	if (!cpuc->lbr_users++ && !event->total_time_running)
697 		intel_pmu_lbr_reset();
698 }
699 
700 void release_lbr_buffers(void)
701 {
702 	struct kmem_cache *kmem_cache;
703 	struct cpu_hw_events *cpuc;
704 	int cpu;
705 
706 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
707 		return;
708 
709 	for_each_possible_cpu(cpu) {
710 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
711 		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
712 		if (kmem_cache && cpuc->lbr_xsave) {
713 			kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
714 			cpuc->lbr_xsave = NULL;
715 		}
716 	}
717 }
718 
719 void reserve_lbr_buffers(void)
720 {
721 	struct kmem_cache *kmem_cache;
722 	struct cpu_hw_events *cpuc;
723 	int cpu;
724 
725 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
726 		return;
727 
728 	for_each_possible_cpu(cpu) {
729 		cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
730 		kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
731 		if (!kmem_cache || cpuc->lbr_xsave)
732 			continue;
733 
734 		cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
735 							cpu_to_node(cpu));
736 	}
737 }
738 
739 void intel_pmu_lbr_del(struct perf_event *event)
740 {
741 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
742 
743 	if (!x86_pmu.lbr_nr)
744 		return;
745 
746 	if (branch_user_callstack(cpuc->br_sel) &&
747 	    event->ctx->task_ctx_data)
748 		task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
749 
750 	if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
751 		cpuc->lbr_select = 0;
752 
753 	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
754 		cpuc->lbr_pebs_users--;
755 	cpuc->lbr_users--;
756 	WARN_ON_ONCE(cpuc->lbr_users < 0);
757 	WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
758 	perf_sched_cb_dec(event->ctx->pmu);
759 }
760 
761 static inline bool vlbr_exclude_host(void)
762 {
763 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
764 
765 	return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
766 		(unsigned long *)&cpuc->intel_ctrl_guest_mask);
767 }
768 
769 void intel_pmu_lbr_enable_all(bool pmi)
770 {
771 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
772 
773 	if (cpuc->lbr_users && !vlbr_exclude_host())
774 		__intel_pmu_lbr_enable(pmi);
775 }
776 
777 void intel_pmu_lbr_disable_all(void)
778 {
779 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
780 
781 	if (cpuc->lbr_users && !vlbr_exclude_host())
782 		__intel_pmu_lbr_disable();
783 }
784 
785 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
786 {
787 	unsigned long mask = x86_pmu.lbr_nr - 1;
788 	u64 tos = intel_pmu_lbr_tos();
789 	int i;
790 
791 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
792 		unsigned long lbr_idx = (tos - i) & mask;
793 		union {
794 			struct {
795 				u32 from;
796 				u32 to;
797 			};
798 			u64     lbr;
799 		} msr_lastbranch;
800 
801 		rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
802 
803 		cpuc->lbr_entries[i].from	= msr_lastbranch.from;
804 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
805 		cpuc->lbr_entries[i].mispred	= 0;
806 		cpuc->lbr_entries[i].predicted	= 0;
807 		cpuc->lbr_entries[i].in_tx	= 0;
808 		cpuc->lbr_entries[i].abort	= 0;
809 		cpuc->lbr_entries[i].cycles	= 0;
810 		cpuc->lbr_entries[i].type	= 0;
811 		cpuc->lbr_entries[i].reserved	= 0;
812 	}
813 	cpuc->lbr_stack.nr = i;
814 	cpuc->lbr_stack.hw_idx = tos;
815 }
816 
817 /*
818  * Due to lack of segmentation in Linux the effective address (offset)
819  * is the same as the linear address, allowing us to merge the LIP and EIP
820  * LBR formats.
821  */
822 void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
823 {
824 	bool need_info = false, call_stack = false;
825 	unsigned long mask = x86_pmu.lbr_nr - 1;
826 	int lbr_format = x86_pmu.intel_cap.lbr_format;
827 	u64 tos = intel_pmu_lbr_tos();
828 	int i;
829 	int out = 0;
830 	int num = x86_pmu.lbr_nr;
831 
832 	if (cpuc->lbr_sel) {
833 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
834 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
835 			call_stack = true;
836 	}
837 
838 	for (i = 0; i < num; i++) {
839 		unsigned long lbr_idx = (tos - i) & mask;
840 		u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
841 		int skip = 0;
842 		u16 cycles = 0;
843 		int lbr_flags = lbr_desc[lbr_format];
844 
845 		from = rdlbr_from(lbr_idx, NULL);
846 		to   = rdlbr_to(lbr_idx, NULL);
847 
848 		/*
849 		 * Read LBR call stack entries
850 		 * until invalid entry (0s) is detected.
851 		 */
852 		if (call_stack && !from)
853 			break;
854 
855 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
856 			u64 info;
857 
858 			info = rdlbr_info(lbr_idx, NULL);
859 			mis = !!(info & LBR_INFO_MISPRED);
860 			pred = !mis;
861 			in_tx = !!(info & LBR_INFO_IN_TX);
862 			abort = !!(info & LBR_INFO_ABORT);
863 			cycles = (info & LBR_INFO_CYCLES);
864 		}
865 
866 		if (lbr_format == LBR_FORMAT_TIME) {
867 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
868 			pred = !mis;
869 			skip = 1;
870 			cycles = ((to >> 48) & LBR_INFO_CYCLES);
871 
872 			to = (u64)((((s64)to) << 16) >> 16);
873 		}
874 
875 		if (lbr_flags & LBR_EIP_FLAGS) {
876 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
877 			pred = !mis;
878 			skip = 1;
879 		}
880 		if (lbr_flags & LBR_TSX) {
881 			in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
882 			abort = !!(from & LBR_FROM_FLAG_ABORT);
883 			skip = 3;
884 		}
885 		from = (u64)((((s64)from) << skip) >> skip);
886 
887 		/*
888 		 * Some CPUs report duplicated abort records,
889 		 * with the second entry not having an abort bit set.
890 		 * Skip them here. This loop runs backwards,
891 		 * so we need to undo the previous record.
892 		 * If the abort just happened outside the window
893 		 * the extra entry cannot be removed.
894 		 */
895 		if (abort && x86_pmu.lbr_double_abort && out > 0)
896 			out--;
897 
898 		cpuc->lbr_entries[out].from	 = from;
899 		cpuc->lbr_entries[out].to	 = to;
900 		cpuc->lbr_entries[out].mispred	 = mis;
901 		cpuc->lbr_entries[out].predicted = pred;
902 		cpuc->lbr_entries[out].in_tx	 = in_tx;
903 		cpuc->lbr_entries[out].abort	 = abort;
904 		cpuc->lbr_entries[out].cycles	 = cycles;
905 		cpuc->lbr_entries[out].type	 = 0;
906 		cpuc->lbr_entries[out].reserved	 = 0;
907 		out++;
908 	}
909 	cpuc->lbr_stack.nr = out;
910 	cpuc->lbr_stack.hw_idx = tos;
911 }
912 
913 static __always_inline int get_lbr_br_type(u64 info)
914 {
915 	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
916 		return 0;
917 
918 	return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
919 }
920 
921 static __always_inline bool get_lbr_mispred(u64 info)
922 {
923 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
924 		return 0;
925 
926 	return !!(info & LBR_INFO_MISPRED);
927 }
928 
929 static __always_inline bool get_lbr_predicted(u64 info)
930 {
931 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
932 		return 0;
933 
934 	return !(info & LBR_INFO_MISPRED);
935 }
936 
937 static __always_inline u16 get_lbr_cycles(u64 info)
938 {
939 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
940 	    !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
941 		return 0;
942 
943 	return info & LBR_INFO_CYCLES;
944 }
945 
946 static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
947 				struct lbr_entry *entries)
948 {
949 	struct perf_branch_entry *e;
950 	struct lbr_entry *lbr;
951 	u64 from, to, info;
952 	int i;
953 
954 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
955 		lbr = entries ? &entries[i] : NULL;
956 		e = &cpuc->lbr_entries[i];
957 
958 		from = rdlbr_from(i, lbr);
959 		/*
960 		 * Read LBR entries until invalid entry (0s) is detected.
961 		 */
962 		if (!from)
963 			break;
964 
965 		to = rdlbr_to(i, lbr);
966 		info = rdlbr_info(i, lbr);
967 
968 		e->from		= from;
969 		e->to		= to;
970 		e->mispred	= get_lbr_mispred(info);
971 		e->predicted	= get_lbr_predicted(info);
972 		e->in_tx	= !!(info & LBR_INFO_IN_TX);
973 		e->abort	= !!(info & LBR_INFO_ABORT);
974 		e->cycles	= get_lbr_cycles(info);
975 		e->type		= get_lbr_br_type(info);
976 		e->reserved	= 0;
977 	}
978 
979 	cpuc->lbr_stack.nr = i;
980 }
981 
982 static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
983 {
984 	intel_pmu_store_lbr(cpuc, NULL);
985 }
986 
987 static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
988 {
989 	struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
990 
991 	if (!xsave) {
992 		intel_pmu_store_lbr(cpuc, NULL);
993 		return;
994 	}
995 	copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
996 
997 	intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
998 }
999 
1000 void intel_pmu_lbr_read(void)
1001 {
1002 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1003 
1004 	/*
1005 	 * Don't read when all LBRs users are using adaptive PEBS.
1006 	 *
1007 	 * This could be smarter and actually check the event,
1008 	 * but this simple approach seems to work for now.
1009 	 */
1010 	if (!cpuc->lbr_users || vlbr_exclude_host() ||
1011 	    cpuc->lbr_users == cpuc->lbr_pebs_users)
1012 		return;
1013 
1014 	x86_pmu.lbr_read(cpuc);
1015 
1016 	intel_pmu_lbr_filter(cpuc);
1017 }
1018 
1019 /*
1020  * SW filter is used:
1021  * - in case there is no HW filter
1022  * - in case the HW filter has errata or limitations
1023  */
1024 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1025 {
1026 	u64 br_type = event->attr.branch_sample_type;
1027 	int mask = 0;
1028 
1029 	if (br_type & PERF_SAMPLE_BRANCH_USER)
1030 		mask |= X86_BR_USER;
1031 
1032 	if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1033 		mask |= X86_BR_KERNEL;
1034 
1035 	/* we ignore BRANCH_HV here */
1036 
1037 	if (br_type & PERF_SAMPLE_BRANCH_ANY)
1038 		mask |= X86_BR_ANY;
1039 
1040 	if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1041 		mask |= X86_BR_ANY_CALL;
1042 
1043 	if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1044 		mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1045 
1046 	if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1047 		mask |= X86_BR_IND_CALL;
1048 
1049 	if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1050 		mask |= X86_BR_ABORT;
1051 
1052 	if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1053 		mask |= X86_BR_IN_TX;
1054 
1055 	if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1056 		mask |= X86_BR_NO_TX;
1057 
1058 	if (br_type & PERF_SAMPLE_BRANCH_COND)
1059 		mask |= X86_BR_JCC;
1060 
1061 	if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1062 		if (!x86_pmu_has_lbr_callstack())
1063 			return -EOPNOTSUPP;
1064 		if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1065 			return -EINVAL;
1066 		mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1067 			X86_BR_CALL_STACK;
1068 	}
1069 
1070 	if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1071 		mask |= X86_BR_IND_JMP;
1072 
1073 	if (br_type & PERF_SAMPLE_BRANCH_CALL)
1074 		mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1075 
1076 	if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1077 		mask |= X86_BR_TYPE_SAVE;
1078 
1079 	/*
1080 	 * stash actual user request into reg, it may
1081 	 * be used by fixup code for some CPU
1082 	 */
1083 	event->hw.branch_reg.reg = mask;
1084 	return 0;
1085 }
1086 
1087 /*
1088  * setup the HW LBR filter
1089  * Used only when available, may not be enough to disambiguate
1090  * all branches, may need the help of the SW filter
1091  */
1092 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1093 {
1094 	struct hw_perf_event_extra *reg;
1095 	u64 br_type = event->attr.branch_sample_type;
1096 	u64 mask = 0, v;
1097 	int i;
1098 
1099 	for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1100 		if (!(br_type & (1ULL << i)))
1101 			continue;
1102 
1103 		v = x86_pmu.lbr_sel_map[i];
1104 		if (v == LBR_NOT_SUPP)
1105 			return -EOPNOTSUPP;
1106 
1107 		if (v != LBR_IGN)
1108 			mask |= v;
1109 	}
1110 
1111 	reg = &event->hw.branch_reg;
1112 	reg->idx = EXTRA_REG_LBR;
1113 
1114 	if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1115 		reg->config = mask;
1116 		return 0;
1117 	}
1118 
1119 	/*
1120 	 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1121 	 * in suppress mode. So LBR_SELECT should be set to
1122 	 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1123 	 * But the 10th bit LBR_CALL_STACK does not operate
1124 	 * in suppress mode.
1125 	 */
1126 	reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1127 
1128 	if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1129 	    (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1130 	    (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1131 		reg->config |= LBR_NO_INFO;
1132 
1133 	return 0;
1134 }
1135 
1136 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1137 {
1138 	int ret = 0;
1139 
1140 	/*
1141 	 * no LBR on this PMU
1142 	 */
1143 	if (!x86_pmu.lbr_nr)
1144 		return -EOPNOTSUPP;
1145 
1146 	/*
1147 	 * setup SW LBR filter
1148 	 */
1149 	ret = intel_pmu_setup_sw_lbr_filter(event);
1150 	if (ret)
1151 		return ret;
1152 
1153 	/*
1154 	 * setup HW LBR filter, if any
1155 	 */
1156 	if (x86_pmu.lbr_sel_map)
1157 		ret = intel_pmu_setup_hw_lbr_filter(event);
1158 
1159 	return ret;
1160 }
1161 
1162 /*
1163  * return the type of control flow change at address "from"
1164  * instruction is not necessarily a branch (in case of interrupt).
1165  *
1166  * The branch type returned also includes the priv level of the
1167  * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1168  *
1169  * If a branch type is unknown OR the instruction cannot be
1170  * decoded (e.g., text page not present), then X86_BR_NONE is
1171  * returned.
1172  */
1173 static int branch_type(unsigned long from, unsigned long to, int abort)
1174 {
1175 	struct insn insn;
1176 	void *addr;
1177 	int bytes_read, bytes_left;
1178 	int ret = X86_BR_NONE;
1179 	int ext, to_plm, from_plm;
1180 	u8 buf[MAX_INSN_SIZE];
1181 	int is64 = 0;
1182 
1183 	to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1184 	from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1185 
1186 	/*
1187 	 * maybe zero if lbr did not fill up after a reset by the time
1188 	 * we get a PMU interrupt
1189 	 */
1190 	if (from == 0 || to == 0)
1191 		return X86_BR_NONE;
1192 
1193 	if (abort)
1194 		return X86_BR_ABORT | to_plm;
1195 
1196 	if (from_plm == X86_BR_USER) {
1197 		/*
1198 		 * can happen if measuring at the user level only
1199 		 * and we interrupt in a kernel thread, e.g., idle.
1200 		 */
1201 		if (!current->mm)
1202 			return X86_BR_NONE;
1203 
1204 		/* may fail if text not present */
1205 		bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1206 						MAX_INSN_SIZE);
1207 		bytes_read = MAX_INSN_SIZE - bytes_left;
1208 		if (!bytes_read)
1209 			return X86_BR_NONE;
1210 
1211 		addr = buf;
1212 	} else {
1213 		/*
1214 		 * The LBR logs any address in the IP, even if the IP just
1215 		 * faulted. This means userspace can control the from address.
1216 		 * Ensure we don't blindly read any address by validating it is
1217 		 * a known text address.
1218 		 */
1219 		if (kernel_text_address(from)) {
1220 			addr = (void *)from;
1221 			/*
1222 			 * Assume we can get the maximum possible size
1223 			 * when grabbing kernel data.  This is not
1224 			 * _strictly_ true since we could possibly be
1225 			 * executing up next to a memory hole, but
1226 			 * it is very unlikely to be a problem.
1227 			 */
1228 			bytes_read = MAX_INSN_SIZE;
1229 		} else {
1230 			return X86_BR_NONE;
1231 		}
1232 	}
1233 
1234 	/*
1235 	 * decoder needs to know the ABI especially
1236 	 * on 64-bit systems running 32-bit apps
1237 	 */
1238 #ifdef CONFIG_X86_64
1239 	is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
1240 #endif
1241 	insn_init(&insn, addr, bytes_read, is64);
1242 	if (insn_get_opcode(&insn))
1243 		return X86_BR_ABORT;
1244 
1245 	switch (insn.opcode.bytes[0]) {
1246 	case 0xf:
1247 		switch (insn.opcode.bytes[1]) {
1248 		case 0x05: /* syscall */
1249 		case 0x34: /* sysenter */
1250 			ret = X86_BR_SYSCALL;
1251 			break;
1252 		case 0x07: /* sysret */
1253 		case 0x35: /* sysexit */
1254 			ret = X86_BR_SYSRET;
1255 			break;
1256 		case 0x80 ... 0x8f: /* conditional */
1257 			ret = X86_BR_JCC;
1258 			break;
1259 		default:
1260 			ret = X86_BR_NONE;
1261 		}
1262 		break;
1263 	case 0x70 ... 0x7f: /* conditional */
1264 		ret = X86_BR_JCC;
1265 		break;
1266 	case 0xc2: /* near ret */
1267 	case 0xc3: /* near ret */
1268 	case 0xca: /* far ret */
1269 	case 0xcb: /* far ret */
1270 		ret = X86_BR_RET;
1271 		break;
1272 	case 0xcf: /* iret */
1273 		ret = X86_BR_IRET;
1274 		break;
1275 	case 0xcc ... 0xce: /* int */
1276 		ret = X86_BR_INT;
1277 		break;
1278 	case 0xe8: /* call near rel */
1279 		if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
1280 			/* zero length call */
1281 			ret = X86_BR_ZERO_CALL;
1282 			break;
1283 		}
1284 		fallthrough;
1285 	case 0x9a: /* call far absolute */
1286 		ret = X86_BR_CALL;
1287 		break;
1288 	case 0xe0 ... 0xe3: /* loop jmp */
1289 		ret = X86_BR_JCC;
1290 		break;
1291 	case 0xe9 ... 0xeb: /* jmp */
1292 		ret = X86_BR_JMP;
1293 		break;
1294 	case 0xff: /* call near absolute, call far absolute ind */
1295 		if (insn_get_modrm(&insn))
1296 			return X86_BR_ABORT;
1297 
1298 		ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1299 		switch (ext) {
1300 		case 2: /* near ind call */
1301 		case 3: /* far ind call */
1302 			ret = X86_BR_IND_CALL;
1303 			break;
1304 		case 4:
1305 		case 5:
1306 			ret = X86_BR_IND_JMP;
1307 			break;
1308 		}
1309 		break;
1310 	default:
1311 		ret = X86_BR_NONE;
1312 	}
1313 	/*
1314 	 * interrupts, traps, faults (and thus ring transition) may
1315 	 * occur on any instructions. Thus, to classify them correctly,
1316 	 * we need to first look at the from and to priv levels. If they
1317 	 * are different and to is in the kernel, then it indicates
1318 	 * a ring transition. If the from instruction is not a ring
1319 	 * transition instr (syscall, systenter, int), then it means
1320 	 * it was a irq, trap or fault.
1321 	 *
1322 	 * we have no way of detecting kernel to kernel faults.
1323 	 */
1324 	if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1325 	    && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1326 		ret = X86_BR_IRQ;
1327 
1328 	/*
1329 	 * branch priv level determined by target as
1330 	 * is done by HW when LBR_SELECT is implemented
1331 	 */
1332 	if (ret != X86_BR_NONE)
1333 		ret |= to_plm;
1334 
1335 	return ret;
1336 }
1337 
1338 #define X86_BR_TYPE_MAP_MAX	16
1339 
1340 static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1341 	PERF_BR_CALL,		/* X86_BR_CALL */
1342 	PERF_BR_RET,		/* X86_BR_RET */
1343 	PERF_BR_SYSCALL,	/* X86_BR_SYSCALL */
1344 	PERF_BR_SYSRET,		/* X86_BR_SYSRET */
1345 	PERF_BR_UNKNOWN,	/* X86_BR_INT */
1346 	PERF_BR_UNKNOWN,	/* X86_BR_IRET */
1347 	PERF_BR_COND,		/* X86_BR_JCC */
1348 	PERF_BR_UNCOND,		/* X86_BR_JMP */
1349 	PERF_BR_UNKNOWN,	/* X86_BR_IRQ */
1350 	PERF_BR_IND_CALL,	/* X86_BR_IND_CALL */
1351 	PERF_BR_UNKNOWN,	/* X86_BR_ABORT */
1352 	PERF_BR_UNKNOWN,	/* X86_BR_IN_TX */
1353 	PERF_BR_UNKNOWN,	/* X86_BR_NO_TX */
1354 	PERF_BR_CALL,		/* X86_BR_ZERO_CALL */
1355 	PERF_BR_UNKNOWN,	/* X86_BR_CALL_STACK */
1356 	PERF_BR_IND,		/* X86_BR_IND_JMP */
1357 };
1358 
1359 static int
1360 common_branch_type(int type)
1361 {
1362 	int i;
1363 
1364 	type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1365 
1366 	if (type) {
1367 		i = __ffs(type);
1368 		if (i < X86_BR_TYPE_MAP_MAX)
1369 			return branch_map[i];
1370 	}
1371 
1372 	return PERF_BR_UNKNOWN;
1373 }
1374 
1375 enum {
1376 	ARCH_LBR_BR_TYPE_JCC			= 0,
1377 	ARCH_LBR_BR_TYPE_NEAR_IND_JMP		= 1,
1378 	ARCH_LBR_BR_TYPE_NEAR_REL_JMP		= 2,
1379 	ARCH_LBR_BR_TYPE_NEAR_IND_CALL		= 3,
1380 	ARCH_LBR_BR_TYPE_NEAR_REL_CALL		= 4,
1381 	ARCH_LBR_BR_TYPE_NEAR_RET		= 5,
1382 	ARCH_LBR_BR_TYPE_KNOWN_MAX		= ARCH_LBR_BR_TYPE_NEAR_RET,
1383 
1384 	ARCH_LBR_BR_TYPE_MAP_MAX		= 16,
1385 };
1386 
1387 static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1388 	[ARCH_LBR_BR_TYPE_JCC]			= X86_BR_JCC,
1389 	[ARCH_LBR_BR_TYPE_NEAR_IND_JMP]		= X86_BR_IND_JMP,
1390 	[ARCH_LBR_BR_TYPE_NEAR_REL_JMP]		= X86_BR_JMP,
1391 	[ARCH_LBR_BR_TYPE_NEAR_IND_CALL]	= X86_BR_IND_CALL,
1392 	[ARCH_LBR_BR_TYPE_NEAR_REL_CALL]	= X86_BR_CALL,
1393 	[ARCH_LBR_BR_TYPE_NEAR_RET]		= X86_BR_RET,
1394 };
1395 
1396 /*
1397  * implement actual branch filter based on user demand.
1398  * Hardware may not exactly satisfy that request, thus
1399  * we need to inspect opcodes. Mismatched branches are
1400  * discarded. Therefore, the number of branches returned
1401  * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1402  */
1403 static void
1404 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1405 {
1406 	u64 from, to;
1407 	int br_sel = cpuc->br_sel;
1408 	int i, j, type, to_plm;
1409 	bool compress = false;
1410 
1411 	/* if sampling all branches, then nothing to filter */
1412 	if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1413 	    ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1414 		return;
1415 
1416 	for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1417 
1418 		from = cpuc->lbr_entries[i].from;
1419 		to = cpuc->lbr_entries[i].to;
1420 		type = cpuc->lbr_entries[i].type;
1421 
1422 		/*
1423 		 * Parse the branch type recorded in LBR_x_INFO MSR.
1424 		 * Doesn't support OTHER_BRANCH decoding for now.
1425 		 * OTHER_BRANCH branch type still rely on software decoding.
1426 		 */
1427 		if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1428 		    type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1429 			to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1430 			type = arch_lbr_br_type_map[type] | to_plm;
1431 		} else
1432 			type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1433 		if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1434 			if (cpuc->lbr_entries[i].in_tx)
1435 				type |= X86_BR_IN_TX;
1436 			else
1437 				type |= X86_BR_NO_TX;
1438 		}
1439 
1440 		/* if type does not correspond, then discard */
1441 		if (type == X86_BR_NONE || (br_sel & type) != type) {
1442 			cpuc->lbr_entries[i].from = 0;
1443 			compress = true;
1444 		}
1445 
1446 		if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1447 			cpuc->lbr_entries[i].type = common_branch_type(type);
1448 	}
1449 
1450 	if (!compress)
1451 		return;
1452 
1453 	/* remove all entries with from=0 */
1454 	for (i = 0; i < cpuc->lbr_stack.nr; ) {
1455 		if (!cpuc->lbr_entries[i].from) {
1456 			j = i;
1457 			while (++j < cpuc->lbr_stack.nr)
1458 				cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1459 			cpuc->lbr_stack.nr--;
1460 			if (!cpuc->lbr_entries[i].from)
1461 				continue;
1462 		}
1463 		i++;
1464 	}
1465 }
1466 
1467 void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1468 {
1469 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1470 
1471 	/* Cannot get TOS for large PEBS and Arch LBR */
1472 	if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1473 	    (cpuc->n_pebs == cpuc->n_large_pebs))
1474 		cpuc->lbr_stack.hw_idx = -1ULL;
1475 	else
1476 		cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1477 
1478 	intel_pmu_store_lbr(cpuc, lbr);
1479 	intel_pmu_lbr_filter(cpuc);
1480 }
1481 
1482 /*
1483  * Map interface branch filters onto LBR filters
1484  */
1485 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1486 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1487 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1488 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1489 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1490 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_REL_JMP
1491 						| LBR_IND_JMP | LBR_FAR,
1492 	/*
1493 	 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1494 	 */
1495 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1496 	 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1497 	/*
1498 	 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1499 	 */
1500 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1501 	[PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
1502 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1503 };
1504 
1505 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1506 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1507 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1508 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1509 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1510 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1511 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1512 						| LBR_FAR,
1513 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1514 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1515 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1516 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1517 };
1518 
1519 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1520 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1521 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1522 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1523 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1524 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1525 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1526 						| LBR_FAR,
1527 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1528 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1529 	[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1530 						| LBR_RETURN | LBR_CALL_STACK,
1531 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1532 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1533 };
1534 
1535 static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1536 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= ARCH_LBR_ANY,
1537 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= ARCH_LBR_USER,
1538 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= ARCH_LBR_KERNEL,
1539 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1540 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= ARCH_LBR_RETURN |
1541 						  ARCH_LBR_OTHER_BRANCH,
1542 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = ARCH_LBR_REL_CALL |
1543 						  ARCH_LBR_IND_CALL |
1544 						  ARCH_LBR_OTHER_BRANCH,
1545 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = ARCH_LBR_IND_CALL,
1546 	[PERF_SAMPLE_BRANCH_COND_SHIFT]         = ARCH_LBR_JCC,
1547 	[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = ARCH_LBR_REL_CALL |
1548 						  ARCH_LBR_IND_CALL |
1549 						  ARCH_LBR_RETURN |
1550 						  ARCH_LBR_CALL_STACK,
1551 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= ARCH_LBR_IND_JMP,
1552 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= ARCH_LBR_REL_CALL,
1553 };
1554 
1555 /* core */
1556 void __init intel_pmu_lbr_init_core(void)
1557 {
1558 	x86_pmu.lbr_nr     = 4;
1559 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1560 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1561 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1562 
1563 	/*
1564 	 * SW branch filter usage:
1565 	 * - compensate for lack of HW filter
1566 	 */
1567 }
1568 
1569 /* nehalem/westmere */
1570 void __init intel_pmu_lbr_init_nhm(void)
1571 {
1572 	x86_pmu.lbr_nr     = 16;
1573 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1574 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1575 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1576 
1577 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1578 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1579 
1580 	/*
1581 	 * SW branch filter usage:
1582 	 * - workaround LBR_SEL errata (see above)
1583 	 * - support syscall, sysret capture.
1584 	 *   That requires LBR_FAR but that means far
1585 	 *   jmp need to be filtered out
1586 	 */
1587 }
1588 
1589 /* sandy bridge */
1590 void __init intel_pmu_lbr_init_snb(void)
1591 {
1592 	x86_pmu.lbr_nr	 = 16;
1593 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1594 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1595 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1596 
1597 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1598 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1599 
1600 	/*
1601 	 * SW branch filter usage:
1602 	 * - support syscall, sysret capture.
1603 	 *   That requires LBR_FAR but that means far
1604 	 *   jmp need to be filtered out
1605 	 */
1606 }
1607 
1608 static inline struct kmem_cache *
1609 create_lbr_kmem_cache(size_t size, size_t align)
1610 {
1611 	return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1612 }
1613 
1614 /* haswell */
1615 void intel_pmu_lbr_init_hsw(void)
1616 {
1617 	size_t size = sizeof(struct x86_perf_task_context);
1618 
1619 	x86_pmu.lbr_nr	 = 16;
1620 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1621 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1622 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1623 
1624 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1625 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1626 
1627 	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1628 
1629 	if (lbr_from_signext_quirk_needed())
1630 		static_branch_enable(&lbr_from_quirk_key);
1631 }
1632 
1633 /* skylake */
1634 __init void intel_pmu_lbr_init_skl(void)
1635 {
1636 	size_t size = sizeof(struct x86_perf_task_context);
1637 
1638 	x86_pmu.lbr_nr	 = 32;
1639 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1640 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1641 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1642 	x86_pmu.lbr_info = MSR_LBR_INFO_0;
1643 
1644 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1645 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1646 
1647 	x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1648 
1649 	/*
1650 	 * SW branch filter usage:
1651 	 * - support syscall, sysret capture.
1652 	 *   That requires LBR_FAR but that means far
1653 	 *   jmp need to be filtered out
1654 	 */
1655 }
1656 
1657 /* atom */
1658 void __init intel_pmu_lbr_init_atom(void)
1659 {
1660 	/*
1661 	 * only models starting at stepping 10 seems
1662 	 * to have an operational LBR which can freeze
1663 	 * on PMU interrupt
1664 	 */
1665 	if (boot_cpu_data.x86_model == 28
1666 	    && boot_cpu_data.x86_stepping < 10) {
1667 		pr_cont("LBR disabled due to erratum");
1668 		return;
1669 	}
1670 
1671 	x86_pmu.lbr_nr	   = 8;
1672 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1673 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1674 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1675 
1676 	/*
1677 	 * SW branch filter usage:
1678 	 * - compensate for lack of HW filter
1679 	 */
1680 }
1681 
1682 /* slm */
1683 void __init intel_pmu_lbr_init_slm(void)
1684 {
1685 	x86_pmu.lbr_nr	   = 8;
1686 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1687 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1688 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1689 
1690 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1691 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1692 
1693 	/*
1694 	 * SW branch filter usage:
1695 	 * - compensate for lack of HW filter
1696 	 */
1697 	pr_cont("8-deep LBR, ");
1698 }
1699 
1700 /* Knights Landing */
1701 void intel_pmu_lbr_init_knl(void)
1702 {
1703 	x86_pmu.lbr_nr	   = 8;
1704 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1705 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1706 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1707 
1708 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1709 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1710 
1711 	/* Knights Landing does have MISPREDICT bit */
1712 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1713 		x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1714 }
1715 
1716 /*
1717  * LBR state size is variable based on the max number of registers.
1718  * This calculates the expected state size, which should match
1719  * what the hardware enumerates for the size of XFEATURE_LBR.
1720  */
1721 static inline unsigned int get_lbr_state_size(void)
1722 {
1723 	return sizeof(struct arch_lbr_state) +
1724 	       x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1725 }
1726 
1727 static bool is_arch_lbr_xsave_available(void)
1728 {
1729 	if (!boot_cpu_has(X86_FEATURE_XSAVES))
1730 		return false;
1731 
1732 	/*
1733 	 * Check the LBR state with the corresponding software structure.
1734 	 * Disable LBR XSAVES support if the size doesn't match.
1735 	 */
1736 	if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1737 		return false;
1738 
1739 	return true;
1740 }
1741 
1742 void __init intel_pmu_arch_lbr_init(void)
1743 {
1744 	struct pmu *pmu = x86_get_pmu(smp_processor_id());
1745 	union cpuid28_eax eax;
1746 	union cpuid28_ebx ebx;
1747 	union cpuid28_ecx ecx;
1748 	unsigned int unused_edx;
1749 	bool arch_lbr_xsave;
1750 	size_t size;
1751 	u64 lbr_nr;
1752 
1753 	/* Arch LBR Capabilities */
1754 	cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1755 
1756 	lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1757 	if (!lbr_nr)
1758 		goto clear_arch_lbr;
1759 
1760 	/* Apply the max depth of Arch LBR */
1761 	if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1762 		goto clear_arch_lbr;
1763 
1764 	x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1765 	x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1766 	x86_pmu.lbr_lip = eax.split.lbr_lip;
1767 	x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1768 	x86_pmu.lbr_filter = ebx.split.lbr_filter;
1769 	x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1770 	x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1771 	x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1772 	x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1773 	x86_pmu.lbr_nr = lbr_nr;
1774 
1775 
1776 	arch_lbr_xsave = is_arch_lbr_xsave_available();
1777 	if (arch_lbr_xsave) {
1778 		size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1779 		       get_lbr_state_size();
1780 		pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1781 							    XSAVE_ALIGNMENT);
1782 	}
1783 
1784 	if (!pmu->task_ctx_cache) {
1785 		arch_lbr_xsave = false;
1786 
1787 		size = sizeof(struct x86_perf_task_context_arch_lbr) +
1788 		       lbr_nr * sizeof(struct lbr_entry);
1789 		pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1790 	}
1791 
1792 	x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1793 	x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1794 	x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1795 
1796 	/* LBR callstack requires both CPL and Branch Filtering support */
1797 	if (!x86_pmu.lbr_cpl ||
1798 	    !x86_pmu.lbr_filter ||
1799 	    !x86_pmu.lbr_call_stack)
1800 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1801 
1802 	if (!x86_pmu.lbr_cpl) {
1803 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1804 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1805 	} else if (!x86_pmu.lbr_filter) {
1806 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1807 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1808 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1809 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1810 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1811 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1812 		arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1813 	}
1814 
1815 	x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1816 	x86_pmu.lbr_ctl_map  = arch_lbr_ctl_map;
1817 
1818 	if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1819 		x86_pmu.lbr_ctl_map = NULL;
1820 
1821 	x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1822 	if (arch_lbr_xsave) {
1823 		x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1824 		x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1825 		x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1826 		pr_cont("XSAVE ");
1827 	} else {
1828 		x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1829 		x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1830 		x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1831 	}
1832 
1833 	pr_cont("Architectural LBR, ");
1834 
1835 	return;
1836 
1837 clear_arch_lbr:
1838 	clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
1839 }
1840 
1841 /**
1842  * x86_perf_get_lbr - get the LBR records information
1843  *
1844  * @lbr: the caller's memory to store the LBR records information
1845  *
1846  * Returns: 0 indicates the LBR info has been successfully obtained
1847  */
1848 int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1849 {
1850 	int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1851 
1852 	lbr->nr = x86_pmu.lbr_nr;
1853 	lbr->from = x86_pmu.lbr_from;
1854 	lbr->to = x86_pmu.lbr_to;
1855 	lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1856 
1857 	return 0;
1858 }
1859 EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1860 
1861 struct event_constraint vlbr_constraint =
1862 	__EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1863 			  FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1864