xref: /openbmc/linux/arch/s390/kernel/perf_event.c (revision 089a49b6)
1 /*
2  * Performance event support for s390x
3  *
4  *  Copyright IBM Corp. 2012
5  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License (version 2 only)
9  * as published by the Free Software Foundation.
10  */
11 #define KMSG_COMPONENT	"perf"
12 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/perf_event.h>
16 #include <linux/kvm_host.h>
17 #include <linux/percpu.h>
18 #include <linux/export.h>
19 #include <asm/irq.h>
20 #include <asm/cpu_mf.h>
21 #include <asm/lowcore.h>
22 #include <asm/processor.h>
23 
24 const char *perf_pmu_name(void)
25 {
26 	if (cpum_cf_avail() || cpum_sf_avail())
27 		return "CPU-measurement facilities (CPUMF)";
28 	return "pmu";
29 }
30 EXPORT_SYMBOL(perf_pmu_name);
31 
32 int perf_num_counters(void)
33 {
34 	int num = 0;
35 
36 	if (cpum_cf_avail())
37 		num += PERF_CPUM_CF_MAX_CTR;
38 
39 	return num;
40 }
41 EXPORT_SYMBOL(perf_num_counters);
42 
43 static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
44 {
45 	struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
46 
47 	if (!stack)
48 		return NULL;
49 
50 	return (struct kvm_s390_sie_block *) stack->empty1[0];
51 }
52 
53 static bool is_in_guest(struct pt_regs *regs)
54 {
55 	if (user_mode(regs))
56 		return false;
57 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
58 	return instruction_pointer(regs) == (unsigned long) &sie_exit;
59 #else
60 	return false;
61 #endif
62 }
63 
64 static unsigned long guest_is_user_mode(struct pt_regs *regs)
65 {
66 	return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
67 }
68 
69 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
70 {
71 	return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
72 }
73 
74 unsigned long perf_instruction_pointer(struct pt_regs *regs)
75 {
76 	return is_in_guest(regs) ? instruction_pointer_guest(regs)
77 				 : instruction_pointer(regs);
78 }
79 
80 static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
81 {
82 	return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
83 					: PERF_RECORD_MISC_GUEST_KERNEL;
84 }
85 
86 unsigned long perf_misc_flags(struct pt_regs *regs)
87 {
88 	if (is_in_guest(regs))
89 		return perf_misc_guest_flags(regs);
90 
91 	return user_mode(regs) ? PERF_RECORD_MISC_USER
92 			       : PERF_RECORD_MISC_KERNEL;
93 }
94 
95 void perf_event_print_debug(void)
96 {
97 	struct cpumf_ctr_info cf_info;
98 	unsigned long flags;
99 	int cpu;
100 
101 	if (!cpum_cf_avail())
102 		return;
103 
104 	local_irq_save(flags);
105 
106 	cpu = smp_processor_id();
107 	memset(&cf_info, 0, sizeof(cf_info));
108 	if (!qctri(&cf_info))
109 		pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
110 			cpu, cf_info.cfvn, cf_info.csvn,
111 			cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
112 
113 	local_irq_restore(flags);
114 }
115 
116 /* See also arch/s390/kernel/traps.c */
117 static unsigned long __store_trace(struct perf_callchain_entry *entry,
118 				   unsigned long sp,
119 				   unsigned long low, unsigned long high)
120 {
121 	struct stack_frame *sf;
122 	struct pt_regs *regs;
123 
124 	while (1) {
125 		sp = sp & PSW_ADDR_INSN;
126 		if (sp < low || sp > high - sizeof(*sf))
127 			return sp;
128 		sf = (struct stack_frame *) sp;
129 		perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
130 		/* Follow the backchain. */
131 		while (1) {
132 			low = sp;
133 			sp = sf->back_chain & PSW_ADDR_INSN;
134 			if (!sp)
135 				break;
136 			if (sp <= low || sp > high - sizeof(*sf))
137 				return sp;
138 			sf = (struct stack_frame *) sp;
139 			perf_callchain_store(entry,
140 					     sf->gprs[8] & PSW_ADDR_INSN);
141 		}
142 		/* Zero backchain detected, check for interrupt frame. */
143 		sp = (unsigned long) (sf + 1);
144 		if (sp <= low || sp > high - sizeof(*regs))
145 			return sp;
146 		regs = (struct pt_regs *) sp;
147 		perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
148 		low = sp;
149 		sp = regs->gprs[15];
150 	}
151 }
152 
153 void perf_callchain_kernel(struct perf_callchain_entry *entry,
154 			   struct pt_regs *regs)
155 {
156 	unsigned long head;
157 	struct stack_frame *head_sf;
158 
159 	if (user_mode(regs))
160 		return;
161 
162 	head = regs->gprs[15];
163 	head_sf = (struct stack_frame *) head;
164 
165 	if (!head_sf || !head_sf->back_chain)
166 		return;
167 
168 	head = head_sf->back_chain;
169 	head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
170 			     S390_lowcore.async_stack);
171 
172 	__store_trace(entry, head, S390_lowcore.thread_info,
173 		      S390_lowcore.thread_info + THREAD_SIZE);
174 }
175