xref: /openbmc/linux/arch/x86/events/intel/lbr.c (revision de2bdb3d)
1 #include <linux/perf_event.h>
2 #include <linux/types.h>
3 
4 #include <asm/perf_event.h>
5 #include <asm/msr.h>
6 #include <asm/insn.h>
7 
8 #include "../perf_event.h"
9 
10 enum {
11 	LBR_FORMAT_32		= 0x00,
12 	LBR_FORMAT_LIP		= 0x01,
13 	LBR_FORMAT_EIP		= 0x02,
14 	LBR_FORMAT_EIP_FLAGS	= 0x03,
15 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
16 	LBR_FORMAT_INFO		= 0x05,
17 	LBR_FORMAT_TIME		= 0x06,
18 	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_TIME,
19 };
20 
21 static enum {
22 	LBR_EIP_FLAGS		= 1,
23 	LBR_TSX			= 2,
24 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 	[LBR_FORMAT_EIP_FLAGS]  = LBR_EIP_FLAGS,
26 	[LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
27 };
28 
29 /*
30  * Intel LBR_SELECT bits
31  * Intel Vol3a, April 2011, Section 16.7 Table 16-10
32  *
33  * Hardware branch filter (not available on all CPUs)
34  */
35 #define LBR_KERNEL_BIT		0 /* do not capture at ring0 */
36 #define LBR_USER_BIT		1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT		2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT	3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT	4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT		5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT		6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT		7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT		8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT	9 /* enable call stack */
45 
46 /*
47  * Following bit only exists in Linux; we mask it out before writing it to
48  * the actual MSR. But it helps the constraint perf code to understand
49  * that this is a separate configuration.
50  */
51 #define LBR_NO_INFO_BIT	       63 /* don't read LBR_INFO. */
52 
53 #define LBR_KERNEL	(1 << LBR_KERNEL_BIT)
54 #define LBR_USER	(1 << LBR_USER_BIT)
55 #define LBR_JCC		(1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL	(1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL	(1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN	(1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP	(1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP	(1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR		(1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK	(1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO	(1ULL << LBR_NO_INFO_BIT)
64 
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
66 
67 #define LBR_SEL_MASK	0x3ff	/* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP	-1	/* LBR filter not supported */
69 #define LBR_IGN		0	/* ignored */
70 
71 #define LBR_ANY		 \
72 	(LBR_JCC	|\
73 	 LBR_REL_CALL	|\
74 	 LBR_IND_CALL	|\
75 	 LBR_RETURN	|\
76 	 LBR_REL_JMP	|\
77 	 LBR_IND_JMP	|\
78 	 LBR_FAR)
79 
80 #define LBR_FROM_FLAG_MISPRED	BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX	BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT	BIT_ULL(61)
83 
84 #define LBR_FROM_SIGNEXT_2MSB	(BIT_ULL(60) | BIT_ULL(59))
85 
86 /*
87  * x86control flow change classification
88  * x86control flow changes include branches, interrupts, traps, faults
89  */
90 enum {
91 	X86_BR_NONE		= 0,      /* unknown */
92 
93 	X86_BR_USER		= 1 << 0, /* branch target is user */
94 	X86_BR_KERNEL		= 1 << 1, /* branch target is kernel */
95 
96 	X86_BR_CALL		= 1 << 2, /* call */
97 	X86_BR_RET		= 1 << 3, /* return */
98 	X86_BR_SYSCALL		= 1 << 4, /* syscall */
99 	X86_BR_SYSRET		= 1 << 5, /* syscall return */
100 	X86_BR_INT		= 1 << 6, /* sw interrupt */
101 	X86_BR_IRET		= 1 << 7, /* return from interrupt */
102 	X86_BR_JCC		= 1 << 8, /* conditional */
103 	X86_BR_JMP		= 1 << 9, /* jump */
104 	X86_BR_IRQ		= 1 << 10,/* hw interrupt or trap or fault */
105 	X86_BR_IND_CALL		= 1 << 11,/* indirect calls */
106 	X86_BR_ABORT		= 1 << 12,/* transaction abort */
107 	X86_BR_IN_TX		= 1 << 13,/* in transaction */
108 	X86_BR_NO_TX		= 1 << 14,/* not in transaction */
109 	X86_BR_ZERO_CALL	= 1 << 15,/* zero length call */
110 	X86_BR_CALL_STACK	= 1 << 16,/* call stack */
111 	X86_BR_IND_JMP		= 1 << 17,/* indirect jump */
112 };
113 
114 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
115 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
116 
117 #define X86_BR_ANY       \
118 	(X86_BR_CALL    |\
119 	 X86_BR_RET     |\
120 	 X86_BR_SYSCALL |\
121 	 X86_BR_SYSRET  |\
122 	 X86_BR_INT     |\
123 	 X86_BR_IRET    |\
124 	 X86_BR_JCC     |\
125 	 X86_BR_JMP	 |\
126 	 X86_BR_IRQ	 |\
127 	 X86_BR_ABORT	 |\
128 	 X86_BR_IND_CALL |\
129 	 X86_BR_IND_JMP  |\
130 	 X86_BR_ZERO_CALL)
131 
132 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
133 
134 #define X86_BR_ANY_CALL		 \
135 	(X86_BR_CALL		|\
136 	 X86_BR_IND_CALL	|\
137 	 X86_BR_ZERO_CALL	|\
138 	 X86_BR_SYSCALL		|\
139 	 X86_BR_IRQ		|\
140 	 X86_BR_INT)
141 
142 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
143 
144 /*
145  * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146  * otherwise it becomes near impossible to get a reliable stack.
147  */
148 
149 static void __intel_pmu_lbr_enable(bool pmi)
150 {
151 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
152 	u64 debugctl, lbr_select = 0, orig_debugctl;
153 
154 	/*
155 	 * No need to unfreeze manually, as v4 can do that as part
156 	 * of the GLOBAL_STATUS ack.
157 	 */
158 	if (pmi && x86_pmu.version >= 4)
159 		return;
160 
161 	/*
162 	 * No need to reprogram LBR_SELECT in a PMI, as it
163 	 * did not change.
164 	 */
165 	if (cpuc->lbr_sel)
166 		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
167 	if (!pmi && cpuc->lbr_sel)
168 		wrmsrl(MSR_LBR_SELECT, lbr_select);
169 
170 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
171 	orig_debugctl = debugctl;
172 	debugctl |= DEBUGCTLMSR_LBR;
173 	/*
174 	 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 	 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 	 * may cause superfluous increase/decrease of LBR_TOS.
177 	 */
178 	if (!(lbr_select & LBR_CALL_STACK))
179 		debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
180 	if (orig_debugctl != debugctl)
181 		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
182 }
183 
184 static void __intel_pmu_lbr_disable(void)
185 {
186 	u64 debugctl;
187 
188 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
189 	debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
190 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
191 }
192 
193 static void intel_pmu_lbr_reset_32(void)
194 {
195 	int i;
196 
197 	for (i = 0; i < x86_pmu.lbr_nr; i++)
198 		wrmsrl(x86_pmu.lbr_from + i, 0);
199 }
200 
201 static void intel_pmu_lbr_reset_64(void)
202 {
203 	int i;
204 
205 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
206 		wrmsrl(x86_pmu.lbr_from + i, 0);
207 		wrmsrl(x86_pmu.lbr_to   + i, 0);
208 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
209 			wrmsrl(MSR_LBR_INFO_0 + i, 0);
210 	}
211 }
212 
213 void intel_pmu_lbr_reset(void)
214 {
215 	if (!x86_pmu.lbr_nr)
216 		return;
217 
218 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
219 		intel_pmu_lbr_reset_32();
220 	else
221 		intel_pmu_lbr_reset_64();
222 }
223 
224 /*
225  * TOS = most recently recorded branch
226  */
227 static inline u64 intel_pmu_lbr_tos(void)
228 {
229 	u64 tos;
230 
231 	rdmsrl(x86_pmu.lbr_tos, tos);
232 	return tos;
233 }
234 
235 enum {
236 	LBR_NONE,
237 	LBR_VALID,
238 };
239 
240 /*
241  * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242  * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243  * TSX is not supported they have no consistent behavior:
244  *
245  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
246  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
247  *     part of the sign extension.
248  *
249  * Therefore, if:
250  *
251  *   1) LBR has TSX format
252  *   2) CPU has no TSX support enabled
253  *
254  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255  * value from rdmsr() must be converted to have a 61 bits sign extension,
256  * ignoring the TSX flags.
257  */
258 static inline bool lbr_from_signext_quirk_needed(void)
259 {
260 	int lbr_format = x86_pmu.intel_cap.lbr_format;
261 	bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 			   boot_cpu_has(X86_FEATURE_RTM);
263 
264 	return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
265 }
266 
267 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
268 
269 /* If quirk is enabled, ensure sign extension is 63 bits: */
270 inline u64 lbr_from_signext_quirk_wr(u64 val)
271 {
272 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
273 		/*
274 		 * Sign extend into bits 61:62 while preserving bit 63.
275 		 *
276 		 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 		 * in val are always OFF and must be changed to be sign
278 		 * extension bits. Since bits 59:60 are guaranteed to be
279 		 * part of the sign extension bits, we can just copy them
280 		 * to 61:62.
281 		 */
282 		val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
283 	}
284 	return val;
285 }
286 
287 /*
288  * If quirk is needed, ensure sign extension is 61 bits:
289  */
290 u64 lbr_from_signext_quirk_rd(u64 val)
291 {
292 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
293 		/*
294 		 * Quirk is on when TSX is not enabled. Therefore TSX
295 		 * flags must be read as OFF.
296 		 */
297 		val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
298 	}
299 	return val;
300 }
301 
302 static inline void wrlbr_from(unsigned int idx, u64 val)
303 {
304 	val = lbr_from_signext_quirk_wr(val);
305 	wrmsrl(x86_pmu.lbr_from + idx, val);
306 }
307 
308 static inline void wrlbr_to(unsigned int idx, u64 val)
309 {
310 	wrmsrl(x86_pmu.lbr_to + idx, val);
311 }
312 
313 static inline u64 rdlbr_from(unsigned int idx)
314 {
315 	u64 val;
316 
317 	rdmsrl(x86_pmu.lbr_from + idx, val);
318 
319 	return lbr_from_signext_quirk_rd(val);
320 }
321 
322 static inline u64 rdlbr_to(unsigned int idx)
323 {
324 	u64 val;
325 
326 	rdmsrl(x86_pmu.lbr_to + idx, val);
327 
328 	return val;
329 }
330 
331 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
332 {
333 	int i;
334 	unsigned lbr_idx, mask;
335 	u64 tos;
336 
337 	if (task_ctx->lbr_callstack_users == 0 ||
338 	    task_ctx->lbr_stack_state == LBR_NONE) {
339 		intel_pmu_lbr_reset();
340 		return;
341 	}
342 
343 	mask = x86_pmu.lbr_nr - 1;
344 	tos = task_ctx->tos;
345 	for (i = 0; i < tos; i++) {
346 		lbr_idx = (tos - i) & mask;
347 		wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
348 		wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
349 
350 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
351 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
352 	}
353 	wrmsrl(x86_pmu.lbr_tos, tos);
354 	task_ctx->lbr_stack_state = LBR_NONE;
355 }
356 
357 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
358 {
359 	unsigned lbr_idx, mask;
360 	u64 tos;
361 	int i;
362 
363 	if (task_ctx->lbr_callstack_users == 0) {
364 		task_ctx->lbr_stack_state = LBR_NONE;
365 		return;
366 	}
367 
368 	mask = x86_pmu.lbr_nr - 1;
369 	tos = intel_pmu_lbr_tos();
370 	for (i = 0; i < tos; i++) {
371 		lbr_idx = (tos - i) & mask;
372 		task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
373 		task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
374 		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
375 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
376 	}
377 	task_ctx->tos = tos;
378 	task_ctx->lbr_stack_state = LBR_VALID;
379 }
380 
381 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
382 {
383 	struct x86_perf_task_context *task_ctx;
384 
385 	/*
386 	 * If LBR callstack feature is enabled and the stack was saved when
387 	 * the task was scheduled out, restore the stack. Otherwise flush
388 	 * the LBR stack.
389 	 */
390 	task_ctx = ctx ? ctx->task_ctx_data : NULL;
391 	if (task_ctx) {
392 		if (sched_in)
393 			__intel_pmu_lbr_restore(task_ctx);
394 		else
395 			__intel_pmu_lbr_save(task_ctx);
396 		return;
397 	}
398 
399 	/*
400 	 * Since a context switch can flip the address space and LBR entries
401 	 * are not tagged with an identifier, we need to wipe the LBR, even for
402 	 * per-cpu events. You simply cannot resolve the branches from the old
403 	 * address space.
404 	 */
405 	if (sched_in)
406 		intel_pmu_lbr_reset();
407 }
408 
409 static inline bool branch_user_callstack(unsigned br_sel)
410 {
411 	return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
412 }
413 
414 void intel_pmu_lbr_add(struct perf_event *event)
415 {
416 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
417 	struct x86_perf_task_context *task_ctx;
418 
419 	if (!x86_pmu.lbr_nr)
420 		return;
421 
422 	cpuc->br_sel = event->hw.branch_reg.reg;
423 
424 	if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
425 		task_ctx = event->ctx->task_ctx_data;
426 		task_ctx->lbr_callstack_users++;
427 	}
428 
429 	/*
430 	 * Request pmu::sched_task() callback, which will fire inside the
431 	 * regular perf event scheduling, so that call will:
432 	 *
433 	 *  - restore or wipe; when LBR-callstack,
434 	 *  - wipe; otherwise,
435 	 *
436 	 * when this is from __perf_event_task_sched_in().
437 	 *
438 	 * However, if this is from perf_install_in_context(), no such callback
439 	 * will follow and we'll need to reset the LBR here if this is the
440 	 * first LBR event.
441 	 *
442 	 * The problem is, we cannot tell these cases apart... but we can
443 	 * exclude the biggest chunk of cases by looking at
444 	 * event->total_time_running. An event that has accrued runtime cannot
445 	 * be 'new'. Conversely, a new event can get installed through the
446 	 * context switch path for the first time.
447 	 */
448 	perf_sched_cb_inc(event->ctx->pmu);
449 	if (!cpuc->lbr_users++ && !event->total_time_running)
450 		intel_pmu_lbr_reset();
451 }
452 
453 void intel_pmu_lbr_del(struct perf_event *event)
454 {
455 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
456 	struct x86_perf_task_context *task_ctx;
457 
458 	if (!x86_pmu.lbr_nr)
459 		return;
460 
461 	if (branch_user_callstack(cpuc->br_sel) &&
462 	    event->ctx->task_ctx_data) {
463 		task_ctx = event->ctx->task_ctx_data;
464 		task_ctx->lbr_callstack_users--;
465 	}
466 
467 	cpuc->lbr_users--;
468 	WARN_ON_ONCE(cpuc->lbr_users < 0);
469 	perf_sched_cb_dec(event->ctx->pmu);
470 }
471 
472 void intel_pmu_lbr_enable_all(bool pmi)
473 {
474 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
475 
476 	if (cpuc->lbr_users)
477 		__intel_pmu_lbr_enable(pmi);
478 }
479 
480 void intel_pmu_lbr_disable_all(void)
481 {
482 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
483 
484 	if (cpuc->lbr_users)
485 		__intel_pmu_lbr_disable();
486 }
487 
488 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
489 {
490 	unsigned long mask = x86_pmu.lbr_nr - 1;
491 	u64 tos = intel_pmu_lbr_tos();
492 	int i;
493 
494 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
495 		unsigned long lbr_idx = (tos - i) & mask;
496 		union {
497 			struct {
498 				u32 from;
499 				u32 to;
500 			};
501 			u64     lbr;
502 		} msr_lastbranch;
503 
504 		rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
505 
506 		cpuc->lbr_entries[i].from	= msr_lastbranch.from;
507 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
508 		cpuc->lbr_entries[i].mispred	= 0;
509 		cpuc->lbr_entries[i].predicted	= 0;
510 		cpuc->lbr_entries[i].reserved	= 0;
511 	}
512 	cpuc->lbr_stack.nr = i;
513 }
514 
515 /*
516  * Due to lack of segmentation in Linux the effective address (offset)
517  * is the same as the linear address, allowing us to merge the LIP and EIP
518  * LBR formats.
519  */
520 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
521 {
522 	bool need_info = false;
523 	unsigned long mask = x86_pmu.lbr_nr - 1;
524 	int lbr_format = x86_pmu.intel_cap.lbr_format;
525 	u64 tos = intel_pmu_lbr_tos();
526 	int i;
527 	int out = 0;
528 	int num = x86_pmu.lbr_nr;
529 
530 	if (cpuc->lbr_sel) {
531 		need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
532 		if (cpuc->lbr_sel->config & LBR_CALL_STACK)
533 			num = tos;
534 	}
535 
536 	for (i = 0; i < num; i++) {
537 		unsigned long lbr_idx = (tos - i) & mask;
538 		u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
539 		int skip = 0;
540 		u16 cycles = 0;
541 		int lbr_flags = lbr_desc[lbr_format];
542 
543 		from = rdlbr_from(lbr_idx);
544 		to   = rdlbr_to(lbr_idx);
545 
546 		if (lbr_format == LBR_FORMAT_INFO && need_info) {
547 			u64 info;
548 
549 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
550 			mis = !!(info & LBR_INFO_MISPRED);
551 			pred = !mis;
552 			in_tx = !!(info & LBR_INFO_IN_TX);
553 			abort = !!(info & LBR_INFO_ABORT);
554 			cycles = (info & LBR_INFO_CYCLES);
555 		}
556 
557 		if (lbr_format == LBR_FORMAT_TIME) {
558 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
559 			pred = !mis;
560 			skip = 1;
561 			cycles = ((to >> 48) & LBR_INFO_CYCLES);
562 
563 			to = (u64)((((s64)to) << 16) >> 16);
564 		}
565 
566 		if (lbr_flags & LBR_EIP_FLAGS) {
567 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
568 			pred = !mis;
569 			skip = 1;
570 		}
571 		if (lbr_flags & LBR_TSX) {
572 			in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
573 			abort = !!(from & LBR_FROM_FLAG_ABORT);
574 			skip = 3;
575 		}
576 		from = (u64)((((s64)from) << skip) >> skip);
577 
578 		/*
579 		 * Some CPUs report duplicated abort records,
580 		 * with the second entry not having an abort bit set.
581 		 * Skip them here. This loop runs backwards,
582 		 * so we need to undo the previous record.
583 		 * If the abort just happened outside the window
584 		 * the extra entry cannot be removed.
585 		 */
586 		if (abort && x86_pmu.lbr_double_abort && out > 0)
587 			out--;
588 
589 		cpuc->lbr_entries[out].from	 = from;
590 		cpuc->lbr_entries[out].to	 = to;
591 		cpuc->lbr_entries[out].mispred	 = mis;
592 		cpuc->lbr_entries[out].predicted = pred;
593 		cpuc->lbr_entries[out].in_tx	 = in_tx;
594 		cpuc->lbr_entries[out].abort	 = abort;
595 		cpuc->lbr_entries[out].cycles	 = cycles;
596 		cpuc->lbr_entries[out].reserved	 = 0;
597 		out++;
598 	}
599 	cpuc->lbr_stack.nr = out;
600 }
601 
602 void intel_pmu_lbr_read(void)
603 {
604 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
605 
606 	if (!cpuc->lbr_users)
607 		return;
608 
609 	if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
610 		intel_pmu_lbr_read_32(cpuc);
611 	else
612 		intel_pmu_lbr_read_64(cpuc);
613 
614 	intel_pmu_lbr_filter(cpuc);
615 }
616 
617 /*
618  * SW filter is used:
619  * - in case there is no HW filter
620  * - in case the HW filter has errata or limitations
621  */
622 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
623 {
624 	u64 br_type = event->attr.branch_sample_type;
625 	int mask = 0;
626 
627 	if (br_type & PERF_SAMPLE_BRANCH_USER)
628 		mask |= X86_BR_USER;
629 
630 	if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
631 		mask |= X86_BR_KERNEL;
632 
633 	/* we ignore BRANCH_HV here */
634 
635 	if (br_type & PERF_SAMPLE_BRANCH_ANY)
636 		mask |= X86_BR_ANY;
637 
638 	if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
639 		mask |= X86_BR_ANY_CALL;
640 
641 	if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
642 		mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
643 
644 	if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
645 		mask |= X86_BR_IND_CALL;
646 
647 	if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
648 		mask |= X86_BR_ABORT;
649 
650 	if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
651 		mask |= X86_BR_IN_TX;
652 
653 	if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
654 		mask |= X86_BR_NO_TX;
655 
656 	if (br_type & PERF_SAMPLE_BRANCH_COND)
657 		mask |= X86_BR_JCC;
658 
659 	if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
660 		if (!x86_pmu_has_lbr_callstack())
661 			return -EOPNOTSUPP;
662 		if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
663 			return -EINVAL;
664 		mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
665 			X86_BR_CALL_STACK;
666 	}
667 
668 	if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
669 		mask |= X86_BR_IND_JMP;
670 
671 	if (br_type & PERF_SAMPLE_BRANCH_CALL)
672 		mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
673 	/*
674 	 * stash actual user request into reg, it may
675 	 * be used by fixup code for some CPU
676 	 */
677 	event->hw.branch_reg.reg = mask;
678 	return 0;
679 }
680 
681 /*
682  * setup the HW LBR filter
683  * Used only when available, may not be enough to disambiguate
684  * all branches, may need the help of the SW filter
685  */
686 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
687 {
688 	struct hw_perf_event_extra *reg;
689 	u64 br_type = event->attr.branch_sample_type;
690 	u64 mask = 0, v;
691 	int i;
692 
693 	for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
694 		if (!(br_type & (1ULL << i)))
695 			continue;
696 
697 		v = x86_pmu.lbr_sel_map[i];
698 		if (v == LBR_NOT_SUPP)
699 			return -EOPNOTSUPP;
700 
701 		if (v != LBR_IGN)
702 			mask |= v;
703 	}
704 
705 	reg = &event->hw.branch_reg;
706 	reg->idx = EXTRA_REG_LBR;
707 
708 	/*
709 	 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
710 	 * in suppress mode. So LBR_SELECT should be set to
711 	 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
712 	 * But the 10th bit LBR_CALL_STACK does not operate
713 	 * in suppress mode.
714 	 */
715 	reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
716 
717 	if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
718 	    (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
719 	    (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
720 		reg->config |= LBR_NO_INFO;
721 
722 	return 0;
723 }
724 
725 int intel_pmu_setup_lbr_filter(struct perf_event *event)
726 {
727 	int ret = 0;
728 
729 	/*
730 	 * no LBR on this PMU
731 	 */
732 	if (!x86_pmu.lbr_nr)
733 		return -EOPNOTSUPP;
734 
735 	/*
736 	 * setup SW LBR filter
737 	 */
738 	ret = intel_pmu_setup_sw_lbr_filter(event);
739 	if (ret)
740 		return ret;
741 
742 	/*
743 	 * setup HW LBR filter, if any
744 	 */
745 	if (x86_pmu.lbr_sel_map)
746 		ret = intel_pmu_setup_hw_lbr_filter(event);
747 
748 	return ret;
749 }
750 
751 /*
752  * return the type of control flow change at address "from"
753  * instruction is not necessarily a branch (in case of interrupt).
754  *
755  * The branch type returned also includes the priv level of the
756  * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
757  *
758  * If a branch type is unknown OR the instruction cannot be
759  * decoded (e.g., text page not present), then X86_BR_NONE is
760  * returned.
761  */
762 static int branch_type(unsigned long from, unsigned long to, int abort)
763 {
764 	struct insn insn;
765 	void *addr;
766 	int bytes_read, bytes_left;
767 	int ret = X86_BR_NONE;
768 	int ext, to_plm, from_plm;
769 	u8 buf[MAX_INSN_SIZE];
770 	int is64 = 0;
771 
772 	to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
773 	from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
774 
775 	/*
776 	 * maybe zero if lbr did not fill up after a reset by the time
777 	 * we get a PMU interrupt
778 	 */
779 	if (from == 0 || to == 0)
780 		return X86_BR_NONE;
781 
782 	if (abort)
783 		return X86_BR_ABORT | to_plm;
784 
785 	if (from_plm == X86_BR_USER) {
786 		/*
787 		 * can happen if measuring at the user level only
788 		 * and we interrupt in a kernel thread, e.g., idle.
789 		 */
790 		if (!current->mm)
791 			return X86_BR_NONE;
792 
793 		/* may fail if text not present */
794 		bytes_left = copy_from_user_nmi(buf, (void __user *)from,
795 						MAX_INSN_SIZE);
796 		bytes_read = MAX_INSN_SIZE - bytes_left;
797 		if (!bytes_read)
798 			return X86_BR_NONE;
799 
800 		addr = buf;
801 	} else {
802 		/*
803 		 * The LBR logs any address in the IP, even if the IP just
804 		 * faulted. This means userspace can control the from address.
805 		 * Ensure we don't blindy read any address by validating it is
806 		 * a known text address.
807 		 */
808 		if (kernel_text_address(from)) {
809 			addr = (void *)from;
810 			/*
811 			 * Assume we can get the maximum possible size
812 			 * when grabbing kernel data.  This is not
813 			 * _strictly_ true since we could possibly be
814 			 * executing up next to a memory hole, but
815 			 * it is very unlikely to be a problem.
816 			 */
817 			bytes_read = MAX_INSN_SIZE;
818 		} else {
819 			return X86_BR_NONE;
820 		}
821 	}
822 
823 	/*
824 	 * decoder needs to know the ABI especially
825 	 * on 64-bit systems running 32-bit apps
826 	 */
827 #ifdef CONFIG_X86_64
828 	is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
829 #endif
830 	insn_init(&insn, addr, bytes_read, is64);
831 	insn_get_opcode(&insn);
832 	if (!insn.opcode.got)
833 		return X86_BR_ABORT;
834 
835 	switch (insn.opcode.bytes[0]) {
836 	case 0xf:
837 		switch (insn.opcode.bytes[1]) {
838 		case 0x05: /* syscall */
839 		case 0x34: /* sysenter */
840 			ret = X86_BR_SYSCALL;
841 			break;
842 		case 0x07: /* sysret */
843 		case 0x35: /* sysexit */
844 			ret = X86_BR_SYSRET;
845 			break;
846 		case 0x80 ... 0x8f: /* conditional */
847 			ret = X86_BR_JCC;
848 			break;
849 		default:
850 			ret = X86_BR_NONE;
851 		}
852 		break;
853 	case 0x70 ... 0x7f: /* conditional */
854 		ret = X86_BR_JCC;
855 		break;
856 	case 0xc2: /* near ret */
857 	case 0xc3: /* near ret */
858 	case 0xca: /* far ret */
859 	case 0xcb: /* far ret */
860 		ret = X86_BR_RET;
861 		break;
862 	case 0xcf: /* iret */
863 		ret = X86_BR_IRET;
864 		break;
865 	case 0xcc ... 0xce: /* int */
866 		ret = X86_BR_INT;
867 		break;
868 	case 0xe8: /* call near rel */
869 		insn_get_immediate(&insn);
870 		if (insn.immediate1.value == 0) {
871 			/* zero length call */
872 			ret = X86_BR_ZERO_CALL;
873 			break;
874 		}
875 	case 0x9a: /* call far absolute */
876 		ret = X86_BR_CALL;
877 		break;
878 	case 0xe0 ... 0xe3: /* loop jmp */
879 		ret = X86_BR_JCC;
880 		break;
881 	case 0xe9 ... 0xeb: /* jmp */
882 		ret = X86_BR_JMP;
883 		break;
884 	case 0xff: /* call near absolute, call far absolute ind */
885 		insn_get_modrm(&insn);
886 		ext = (insn.modrm.bytes[0] >> 3) & 0x7;
887 		switch (ext) {
888 		case 2: /* near ind call */
889 		case 3: /* far ind call */
890 			ret = X86_BR_IND_CALL;
891 			break;
892 		case 4:
893 		case 5:
894 			ret = X86_BR_IND_JMP;
895 			break;
896 		}
897 		break;
898 	default:
899 		ret = X86_BR_NONE;
900 	}
901 	/*
902 	 * interrupts, traps, faults (and thus ring transition) may
903 	 * occur on any instructions. Thus, to classify them correctly,
904 	 * we need to first look at the from and to priv levels. If they
905 	 * are different and to is in the kernel, then it indicates
906 	 * a ring transition. If the from instruction is not a ring
907 	 * transition instr (syscall, systenter, int), then it means
908 	 * it was a irq, trap or fault.
909 	 *
910 	 * we have no way of detecting kernel to kernel faults.
911 	 */
912 	if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
913 	    && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
914 		ret = X86_BR_IRQ;
915 
916 	/*
917 	 * branch priv level determined by target as
918 	 * is done by HW when LBR_SELECT is implemented
919 	 */
920 	if (ret != X86_BR_NONE)
921 		ret |= to_plm;
922 
923 	return ret;
924 }
925 
926 /*
927  * implement actual branch filter based on user demand.
928  * Hardware may not exactly satisfy that request, thus
929  * we need to inspect opcodes. Mismatched branches are
930  * discarded. Therefore, the number of branches returned
931  * in PERF_SAMPLE_BRANCH_STACK sample may vary.
932  */
933 static void
934 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
935 {
936 	u64 from, to;
937 	int br_sel = cpuc->br_sel;
938 	int i, j, type;
939 	bool compress = false;
940 
941 	/* if sampling all branches, then nothing to filter */
942 	if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
943 		return;
944 
945 	for (i = 0; i < cpuc->lbr_stack.nr; i++) {
946 
947 		from = cpuc->lbr_entries[i].from;
948 		to = cpuc->lbr_entries[i].to;
949 
950 		type = branch_type(from, to, cpuc->lbr_entries[i].abort);
951 		if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
952 			if (cpuc->lbr_entries[i].in_tx)
953 				type |= X86_BR_IN_TX;
954 			else
955 				type |= X86_BR_NO_TX;
956 		}
957 
958 		/* if type does not correspond, then discard */
959 		if (type == X86_BR_NONE || (br_sel & type) != type) {
960 			cpuc->lbr_entries[i].from = 0;
961 			compress = true;
962 		}
963 	}
964 
965 	if (!compress)
966 		return;
967 
968 	/* remove all entries with from=0 */
969 	for (i = 0; i < cpuc->lbr_stack.nr; ) {
970 		if (!cpuc->lbr_entries[i].from) {
971 			j = i;
972 			while (++j < cpuc->lbr_stack.nr)
973 				cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
974 			cpuc->lbr_stack.nr--;
975 			if (!cpuc->lbr_entries[i].from)
976 				continue;
977 		}
978 		i++;
979 	}
980 }
981 
982 /*
983  * Map interface branch filters onto LBR filters
984  */
985 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
986 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
987 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
988 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
989 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
990 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_REL_JMP
991 						| LBR_IND_JMP | LBR_FAR,
992 	/*
993 	 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
994 	 */
995 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
996 	 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
997 	/*
998 	 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
999 	 */
1000 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1001 	[PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
1002 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1003 };
1004 
1005 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1006 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1007 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1008 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1009 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1010 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1011 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1012 						| LBR_FAR,
1013 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1014 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1015 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1016 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1017 };
1018 
1019 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1020 	[PERF_SAMPLE_BRANCH_ANY_SHIFT]		= LBR_ANY,
1021 	[PERF_SAMPLE_BRANCH_USER_SHIFT]		= LBR_USER,
1022 	[PERF_SAMPLE_BRANCH_KERNEL_SHIFT]	= LBR_KERNEL,
1023 	[PERF_SAMPLE_BRANCH_HV_SHIFT]		= LBR_IGN,
1024 	[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]	= LBR_RETURN | LBR_FAR,
1025 	[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1026 						| LBR_FAR,
1027 	[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]	= LBR_IND_CALL,
1028 	[PERF_SAMPLE_BRANCH_COND_SHIFT]		= LBR_JCC,
1029 	[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]	= LBR_REL_CALL | LBR_IND_CALL
1030 						| LBR_RETURN | LBR_CALL_STACK,
1031 	[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]	= LBR_IND_JMP,
1032 	[PERF_SAMPLE_BRANCH_CALL_SHIFT]		= LBR_REL_CALL,
1033 };
1034 
1035 /* core */
1036 void __init intel_pmu_lbr_init_core(void)
1037 {
1038 	x86_pmu.lbr_nr     = 4;
1039 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1040 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1041 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1042 
1043 	/*
1044 	 * SW branch filter usage:
1045 	 * - compensate for lack of HW filter
1046 	 */
1047 }
1048 
1049 /* nehalem/westmere */
1050 void __init intel_pmu_lbr_init_nhm(void)
1051 {
1052 	x86_pmu.lbr_nr     = 16;
1053 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1054 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1055 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1056 
1057 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1058 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1059 
1060 	/*
1061 	 * SW branch filter usage:
1062 	 * - workaround LBR_SEL errata (see above)
1063 	 * - support syscall, sysret capture.
1064 	 *   That requires LBR_FAR but that means far
1065 	 *   jmp need to be filtered out
1066 	 */
1067 }
1068 
1069 /* sandy bridge */
1070 void __init intel_pmu_lbr_init_snb(void)
1071 {
1072 	x86_pmu.lbr_nr	 = 16;
1073 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1074 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1075 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1076 
1077 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1078 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1079 
1080 	/*
1081 	 * SW branch filter usage:
1082 	 * - support syscall, sysret capture.
1083 	 *   That requires LBR_FAR but that means far
1084 	 *   jmp need to be filtered out
1085 	 */
1086 }
1087 
1088 /* haswell */
1089 void intel_pmu_lbr_init_hsw(void)
1090 {
1091 	x86_pmu.lbr_nr	 = 16;
1092 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1093 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1094 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1095 
1096 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1097 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1098 
1099 	if (lbr_from_signext_quirk_needed())
1100 		static_branch_enable(&lbr_from_quirk_key);
1101 }
1102 
1103 /* skylake */
1104 __init void intel_pmu_lbr_init_skl(void)
1105 {
1106 	x86_pmu.lbr_nr	 = 32;
1107 	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
1108 	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1109 	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1110 
1111 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1112 	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1113 
1114 	/*
1115 	 * SW branch filter usage:
1116 	 * - support syscall, sysret capture.
1117 	 *   That requires LBR_FAR but that means far
1118 	 *   jmp need to be filtered out
1119 	 */
1120 }
1121 
1122 /* atom */
1123 void __init intel_pmu_lbr_init_atom(void)
1124 {
1125 	/*
1126 	 * only models starting at stepping 10 seems
1127 	 * to have an operational LBR which can freeze
1128 	 * on PMU interrupt
1129 	 */
1130 	if (boot_cpu_data.x86_model == 28
1131 	    && boot_cpu_data.x86_mask < 10) {
1132 		pr_cont("LBR disabled due to erratum");
1133 		return;
1134 	}
1135 
1136 	x86_pmu.lbr_nr	   = 8;
1137 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1138 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1139 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1140 
1141 	/*
1142 	 * SW branch filter usage:
1143 	 * - compensate for lack of HW filter
1144 	 */
1145 }
1146 
1147 /* slm */
1148 void __init intel_pmu_lbr_init_slm(void)
1149 {
1150 	x86_pmu.lbr_nr	   = 8;
1151 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1152 	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1153 	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1154 
1155 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1156 	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1157 
1158 	/*
1159 	 * SW branch filter usage:
1160 	 * - compensate for lack of HW filter
1161 	 */
1162 	pr_cont("8-deep LBR, ");
1163 }
1164 
1165 /* Knights Landing */
1166 void intel_pmu_lbr_init_knl(void)
1167 {
1168 	x86_pmu.lbr_nr	   = 8;
1169 	x86_pmu.lbr_tos    = MSR_LBR_TOS;
1170 	x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1171 	x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1172 
1173 	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1174 	x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1175 }
1176