xref: /openbmc/linux/drivers/gpu/drm/msm/msm_perf.c (revision feac8c8b)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 /* For profiling, userspace can:
19  *
20  *   tail -f /sys/kernel/debug/dri/<minor>/gpu
21  *
22  * This will enable performance counters/profiling to track the busy time
23  * and any gpu specific performance counters that are supported.
24  */
25 
26 #ifdef CONFIG_DEBUG_FS
27 
28 #include <linux/debugfs.h>
29 
30 #include "msm_drv.h"
31 #include "msm_gpu.h"
32 
33 struct msm_perf_state {
34 	struct drm_device *dev;
35 
36 	bool open;
37 	int cnt;
38 	struct mutex read_lock;
39 
40 	char buf[256];
41 	int buftot, bufpos;
42 
43 	unsigned long next_jiffies;
44 };
45 
46 #define SAMPLE_TIME (HZ/4)
47 
48 /* wait for next sample time: */
49 static int wait_sample(struct msm_perf_state *perf)
50 {
51 	unsigned long start_jiffies = jiffies;
52 
53 	if (time_after(perf->next_jiffies, start_jiffies)) {
54 		unsigned long remaining_jiffies =
55 			perf->next_jiffies - start_jiffies;
56 		int ret = schedule_timeout_interruptible(remaining_jiffies);
57 		if (ret > 0) {
58 			/* interrupted */
59 			return -ERESTARTSYS;
60 		}
61 	}
62 	perf->next_jiffies += SAMPLE_TIME;
63 	return 0;
64 }
65 
66 static int refill_buf(struct msm_perf_state *perf)
67 {
68 	struct msm_drm_private *priv = perf->dev->dev_private;
69 	struct msm_gpu *gpu = priv->gpu;
70 	char *ptr = perf->buf;
71 	int rem = sizeof(perf->buf);
72 	int i, n;
73 
74 	if ((perf->cnt++ % 32) == 0) {
75 		/* Header line: */
76 		n = snprintf(ptr, rem, "%%BUSY");
77 		ptr += n;
78 		rem -= n;
79 
80 		for (i = 0; i < gpu->num_perfcntrs; i++) {
81 			const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
82 			n = snprintf(ptr, rem, "\t%s", perfcntr->name);
83 			ptr += n;
84 			rem -= n;
85 		}
86 	} else {
87 		/* Sample line: */
88 		uint32_t activetime = 0, totaltime = 0;
89 		uint32_t cntrs[5];
90 		uint32_t val;
91 		int ret;
92 
93 		/* sleep until next sample time: */
94 		ret = wait_sample(perf);
95 		if (ret)
96 			return ret;
97 
98 		ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
99 				ARRAY_SIZE(cntrs), cntrs);
100 		if (ret < 0)
101 			return ret;
102 
103 		val = totaltime ? 1000 * activetime / totaltime : 0;
104 		n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
105 		ptr += n;
106 		rem -= n;
107 
108 		for (i = 0; i < ret; i++) {
109 			/* cycle counters (I think).. convert to MHz.. */
110 			val = cntrs[i] / 10000;
111 			n = snprintf(ptr, rem, "\t%5d.%02d",
112 					val / 100, val % 100);
113 			ptr += n;
114 			rem -= n;
115 		}
116 	}
117 
118 	n = snprintf(ptr, rem, "\n");
119 	ptr += n;
120 	rem -= n;
121 
122 	perf->bufpos = 0;
123 	perf->buftot = ptr - perf->buf;
124 
125 	return 0;
126 }
127 
128 static ssize_t perf_read(struct file *file, char __user *buf,
129 		size_t sz, loff_t *ppos)
130 {
131 	struct msm_perf_state *perf = file->private_data;
132 	int n = 0, ret = 0;
133 
134 	mutex_lock(&perf->read_lock);
135 
136 	if (perf->bufpos >= perf->buftot) {
137 		ret = refill_buf(perf);
138 		if (ret)
139 			goto out;
140 	}
141 
142 	n = min((int)sz, perf->buftot - perf->bufpos);
143 	if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
144 		ret = -EFAULT;
145 		goto out;
146 	}
147 
148 	perf->bufpos += n;
149 	*ppos += n;
150 
151 out:
152 	mutex_unlock(&perf->read_lock);
153 	if (ret)
154 		return ret;
155 	return n;
156 }
157 
158 static int perf_open(struct inode *inode, struct file *file)
159 {
160 	struct msm_perf_state *perf = inode->i_private;
161 	struct drm_device *dev = perf->dev;
162 	struct msm_drm_private *priv = dev->dev_private;
163 	struct msm_gpu *gpu = priv->gpu;
164 	int ret = 0;
165 
166 	mutex_lock(&dev->struct_mutex);
167 
168 	if (perf->open || !gpu) {
169 		ret = -EBUSY;
170 		goto out;
171 	}
172 
173 	file->private_data = perf;
174 	perf->open = true;
175 	perf->cnt = 0;
176 	perf->buftot = 0;
177 	perf->bufpos = 0;
178 	msm_gpu_perfcntr_start(gpu);
179 	perf->next_jiffies = jiffies + SAMPLE_TIME;
180 
181 out:
182 	mutex_unlock(&dev->struct_mutex);
183 	return ret;
184 }
185 
186 static int perf_release(struct inode *inode, struct file *file)
187 {
188 	struct msm_perf_state *perf = inode->i_private;
189 	struct msm_drm_private *priv = perf->dev->dev_private;
190 	msm_gpu_perfcntr_stop(priv->gpu);
191 	perf->open = false;
192 	return 0;
193 }
194 
195 
196 static const struct file_operations perf_debugfs_fops = {
197 	.owner = THIS_MODULE,
198 	.open = perf_open,
199 	.read = perf_read,
200 	.llseek = no_llseek,
201 	.release = perf_release,
202 };
203 
204 int msm_perf_debugfs_init(struct drm_minor *minor)
205 {
206 	struct msm_drm_private *priv = minor->dev->dev_private;
207 	struct msm_perf_state *perf;
208 	struct dentry *ent;
209 
210 	/* only create on first minor: */
211 	if (priv->perf)
212 		return 0;
213 
214 	perf = kzalloc(sizeof(*perf), GFP_KERNEL);
215 	if (!perf)
216 		return -ENOMEM;
217 
218 	perf->dev = minor->dev;
219 
220 	mutex_init(&perf->read_lock);
221 	priv->perf = perf;
222 
223 	ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
224 			minor->debugfs_root, perf, &perf_debugfs_fops);
225 	if (!ent) {
226 		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n",
227 				minor->debugfs_root);
228 		goto fail;
229 	}
230 
231 	return 0;
232 
233 fail:
234 	msm_perf_debugfs_cleanup(priv);
235 	return -1;
236 }
237 
238 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
239 {
240 	struct msm_perf_state *perf = priv->perf;
241 
242 	if (!perf)
243 		return;
244 
245 	priv->perf = NULL;
246 
247 	mutex_destroy(&perf->read_lock);
248 
249 	kfree(perf);
250 }
251 
252 #endif
253