xref: /openbmc/linux/arch/sparc/kernel/pcr.c (revision 1c2f61d4)
1 /* pcr.c: Generic sparc64 performance counter infrastructure.
2  *
3  * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/irq.h>
9 
10 #include <asm/pil.h>
11 #include <asm/pcr.h>
12 #include <asm/nmi.h>
13 
14 /* This code is shared between various users of the performance
15  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
16  * perf_counter support layer.
17  */
18 
19 #define PCR_SUN4U_ENABLE	(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
20 #define PCR_N2_ENABLE		(PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
21 				 PCR_N2_TOE_OV1 | \
22 				 (2 << PCR_N2_SL1_SHIFT) | \
23 				 (0xff << PCR_N2_MASK1_SHIFT))
24 
25 u64 pcr_enable;
26 unsigned int picl_shift;
27 
28 /* Performance counter interrupts run unmasked at PIL level 15.
29  * Therefore we can't do things like wakeups and other work
30  * that expects IRQ disabling to be adhered to in locking etc.
31  *
32  * Therefore in such situations we defer the work by signalling
33  * a lower level cpu IRQ.
34  */
35 void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
36 {
37 	clear_softint(1 << PIL_DEFERRED_PCR_WORK);
38 }
39 
40 void schedule_deferred_pcr_work(void)
41 {
42 	set_softint(1 << PIL_DEFERRED_PCR_WORK);
43 }
44 
45 const struct pcr_ops *pcr_ops;
46 EXPORT_SYMBOL_GPL(pcr_ops);
47 
48 static u64 direct_pcr_read(void)
49 {
50 	u64 val;
51 
52 	read_pcr(val);
53 	return val;
54 }
55 
56 static void direct_pcr_write(u64 val)
57 {
58 	write_pcr(val);
59 }
60 
61 static const struct pcr_ops direct_pcr_ops = {
62 	.read	= direct_pcr_read,
63 	.write	= direct_pcr_write,
64 };
65 
66 static void n2_pcr_write(u64 val)
67 {
68 	unsigned long ret;
69 
70 	ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
71 	if (val != HV_EOK)
72 		write_pcr(val);
73 }
74 
75 static const struct pcr_ops n2_pcr_ops = {
76 	.read	= direct_pcr_read,
77 	.write	= n2_pcr_write,
78 };
79 
80 static unsigned long perf_hsvc_group;
81 static unsigned long perf_hsvc_major;
82 static unsigned long perf_hsvc_minor;
83 
84 static int __init register_perf_hsvc(void)
85 {
86 	if (tlb_type == hypervisor) {
87 		switch (sun4v_chip_type) {
88 		case SUN4V_CHIP_NIAGARA1:
89 			perf_hsvc_group = HV_GRP_NIAG_PERF;
90 			break;
91 
92 		case SUN4V_CHIP_NIAGARA2:
93 			perf_hsvc_group = HV_GRP_N2_CPU;
94 			break;
95 
96 		default:
97 			return -ENODEV;
98 		}
99 
100 
101 		perf_hsvc_major = 1;
102 		perf_hsvc_minor = 0;
103 		if (sun4v_hvapi_register(perf_hsvc_group,
104 					 perf_hsvc_major,
105 					 &perf_hsvc_minor)) {
106 			printk("perfmon: Could not register hvapi.\n");
107 			return -ENODEV;
108 		}
109 	}
110 	return 0;
111 }
112 
113 static void __init unregister_perf_hsvc(void)
114 {
115 	if (tlb_type != hypervisor)
116 		return;
117 	sun4v_hvapi_unregister(perf_hsvc_group);
118 }
119 
120 int __init pcr_arch_init(void)
121 {
122 	int err = register_perf_hsvc();
123 
124 	if (err)
125 		return err;
126 
127 	switch (tlb_type) {
128 	case hypervisor:
129 		pcr_ops = &n2_pcr_ops;
130 		pcr_enable = PCR_N2_ENABLE;
131 		picl_shift = 2;
132 		break;
133 
134 	case cheetah:
135 	case cheetah_plus:
136 		pcr_ops = &direct_pcr_ops;
137 		pcr_enable = PCR_SUN4U_ENABLE;
138 		break;
139 
140 	case spitfire:
141 		/* UltraSPARC-I/II and derivatives lack a profile
142 		 * counter overflow interrupt so we can't make use of
143 		 * their hardware currently.
144 		 */
145 		/* fallthrough */
146 	default:
147 		err = -ENODEV;
148 		goto out_unregister;
149 	}
150 
151 	return nmi_init();
152 
153 out_unregister:
154 	unregister_perf_hsvc();
155 	return err;
156 }
157 
158 arch_initcall(pcr_arch_init);
159