1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Collabora Ltd */
3 
4 #include <drm/drm_file.h>
5 #include <drm/drm_gem_shmem_helper.h>
6 #include <drm/panfrost_drm.h>
7 #include <linux/completion.h>
8 #include <linux/iopoll.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 
13 #include "panfrost_device.h"
14 #include "panfrost_features.h"
15 #include "panfrost_gem.h"
16 #include "panfrost_issues.h"
17 #include "panfrost_job.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_perfcnt.h"
20 #include "panfrost_regs.h"
21 
22 #define COUNTERS_PER_BLOCK		64
23 #define BYTES_PER_COUNTER		4
24 #define BLOCKS_PER_COREGROUP		8
25 #define V4_SHADERS_PER_COREGROUP	4
26 
27 struct panfrost_perfcnt {
28 	struct panfrost_gem_object *bo;
29 	size_t bosize;
30 	void *buf;
31 	struct panfrost_file_priv *user;
32 	struct mutex lock;
33 	struct completion dump_comp;
34 };
35 
36 void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev)
37 {
38 	complete(&pfdev->perfcnt->dump_comp);
39 }
40 
41 void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev)
42 {
43 	gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_CACHES);
44 }
45 
46 static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
47 {
48 	u64 gpuva;
49 	int ret;
50 
51 	reinit_completion(&pfdev->perfcnt->dump_comp);
52 	gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
53 	gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
54 	gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
55 	gpu_write(pfdev, GPU_INT_CLEAR,
56 		  GPU_IRQ_CLEAN_CACHES_COMPLETED |
57 		  GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
58 	gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_SAMPLE);
59 	ret = wait_for_completion_interruptible_timeout(&pfdev->perfcnt->dump_comp,
60 							msecs_to_jiffies(1000));
61 	if (!ret)
62 		ret = -ETIMEDOUT;
63 	else if (ret > 0)
64 		ret = 0;
65 
66 	return ret;
67 }
68 
69 static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
70 					  struct drm_file *file_priv,
71 					  unsigned int counterset)
72 {
73 	struct panfrost_file_priv *user = file_priv->driver_priv;
74 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
75 	struct drm_gem_shmem_object *bo;
76 	u32 cfg;
77 	int ret;
78 
79 	if (user == perfcnt->user)
80 		return 0;
81 	else if (perfcnt->user)
82 		return -EBUSY;
83 
84 	ret = pm_runtime_get_sync(pfdev->dev);
85 	if (ret < 0)
86 		return ret;
87 
88 	bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
89 	if (IS_ERR(bo))
90 		return PTR_ERR(bo);
91 
92 	perfcnt->bo = to_panfrost_bo(&bo->base);
93 
94 	/* Map the perfcnt buf in the address space attached to file_priv. */
95 	ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
96 	if (ret)
97 		goto err_put_bo;
98 
99 	perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
100 	if (IS_ERR(perfcnt->buf)) {
101 		ret = PTR_ERR(perfcnt->buf);
102 		goto err_close_bo;
103 	}
104 
105 	/*
106 	 * Invalidate the cache and clear the counters to start from a fresh
107 	 * state.
108 	 */
109 	reinit_completion(&pfdev->perfcnt->dump_comp);
110 	gpu_write(pfdev, GPU_INT_CLEAR,
111 		  GPU_IRQ_CLEAN_CACHES_COMPLETED |
112 		  GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
113 	gpu_write(pfdev, GPU_CMD, GPU_CMD_PERFCNT_CLEAR);
114 	gpu_write(pfdev, GPU_CMD, GPU_CMD_CLEAN_INV_CACHES);
115 	ret = wait_for_completion_timeout(&pfdev->perfcnt->dump_comp,
116 					  msecs_to_jiffies(1000));
117 	if (!ret) {
118 		ret = -ETIMEDOUT;
119 		goto err_vunmap;
120 	}
121 
122 	perfcnt->user = user;
123 
124 	/*
125 	 * Always use address space 0 for now.
126 	 * FIXME: this needs to be updated when we start using different
127 	 * address space.
128 	 */
129 	cfg = GPU_PERFCNT_CFG_AS(0) |
130 	      GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
131 
132 	/*
133 	 * Bifrost GPUs have 2 set of counters, but we're only interested by
134 	 * the first one for now.
135 	 */
136 	if (panfrost_model_is_bifrost(pfdev))
137 		cfg |= GPU_PERFCNT_CFG_SETSEL(counterset);
138 
139 	gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0xffffffff);
140 	gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0xffffffff);
141 	gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0xffffffff);
142 
143 	/*
144 	 * Due to PRLAM-8186 we need to disable the Tiler before we enable HW
145 	 * counters.
146 	 */
147 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
148 		gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
149 	else
150 		gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
151 
152 	gpu_write(pfdev, GPU_PERFCNT_CFG, cfg);
153 
154 	if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
155 		gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
156 
157 	return 0;
158 
159 err_vunmap:
160 	drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
161 err_close_bo:
162 	panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
163 err_put_bo:
164 	drm_gem_object_put_unlocked(&bo->base);
165 	return ret;
166 }
167 
168 static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
169 					   struct drm_file *file_priv)
170 {
171 	struct panfrost_file_priv *user = file_priv->driver_priv;
172 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
173 
174 	if (user != perfcnt->user)
175 		return -EINVAL;
176 
177 	gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0x0);
178 	gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0x0);
179 	gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0x0);
180 	gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
181 	gpu_write(pfdev, GPU_PERFCNT_CFG,
182 		  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
183 
184 	perfcnt->user = NULL;
185 	drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
186 	perfcnt->buf = NULL;
187 	panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
188 	drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
189 	perfcnt->bo = NULL;
190 	pm_runtime_mark_last_busy(pfdev->dev);
191 	pm_runtime_put_autosuspend(pfdev->dev);
192 
193 	return 0;
194 }
195 
196 int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
197 				  struct drm_file *file_priv)
198 {
199 	struct panfrost_device *pfdev = dev->dev_private;
200 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
201 	struct drm_panfrost_perfcnt_enable *req = data;
202 	int ret;
203 
204 	ret = panfrost_unstable_ioctl_check();
205 	if (ret)
206 		return ret;
207 
208 	/* Only Bifrost GPUs have 2 set of counters. */
209 	if (req->counterset > (panfrost_model_is_bifrost(pfdev) ? 1 : 0))
210 		return -EINVAL;
211 
212 	mutex_lock(&perfcnt->lock);
213 	if (req->enable)
214 		ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
215 						     req->counterset);
216 	else
217 		ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
218 	mutex_unlock(&perfcnt->lock);
219 
220 	return ret;
221 }
222 
223 int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
224 				struct drm_file *file_priv)
225 {
226 	struct panfrost_device *pfdev = dev->dev_private;
227 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
228 	struct drm_panfrost_perfcnt_dump *req = data;
229 	void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr;
230 	int ret;
231 
232 	ret = panfrost_unstable_ioctl_check();
233 	if (ret)
234 		return ret;
235 
236 	mutex_lock(&perfcnt->lock);
237 	if (perfcnt->user != file_priv->driver_priv) {
238 		ret = -EINVAL;
239 		goto out;
240 	}
241 
242 	ret = panfrost_perfcnt_dump_locked(pfdev);
243 	if (ret)
244 		goto out;
245 
246 	if (copy_to_user(user_ptr, perfcnt->buf, perfcnt->bosize))
247 		ret = -EFAULT;
248 
249 out:
250 	mutex_unlock(&perfcnt->lock);
251 
252 	return ret;
253 }
254 
255 void panfrost_perfcnt_close(struct drm_file *file_priv)
256 {
257 	struct panfrost_file_priv *pfile = file_priv->driver_priv;
258 	struct panfrost_device *pfdev = pfile->pfdev;
259 	struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
260 
261 	pm_runtime_get_sync(pfdev->dev);
262 	mutex_lock(&perfcnt->lock);
263 	if (perfcnt->user == pfile)
264 		panfrost_perfcnt_disable_locked(pfdev, file_priv);
265 	mutex_unlock(&perfcnt->lock);
266 	pm_runtime_mark_last_busy(pfdev->dev);
267 	pm_runtime_put_autosuspend(pfdev->dev);
268 }
269 
270 int panfrost_perfcnt_init(struct panfrost_device *pfdev)
271 {
272 	struct panfrost_perfcnt *perfcnt;
273 	size_t size;
274 
275 	if (panfrost_has_hw_feature(pfdev, HW_FEATURE_V4)) {
276 		unsigned int ncoregroups;
277 
278 		ncoregroups = hweight64(pfdev->features.l2_present);
279 		size = ncoregroups * BLOCKS_PER_COREGROUP *
280 		       COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
281 	} else {
282 		unsigned int nl2c, ncores;
283 
284 		/*
285 		 * TODO: define a macro to extract the number of l2 caches from
286 		 * mem_features.
287 		 */
288 		nl2c = ((pfdev->features.mem_features >> 8) & GENMASK(3, 0)) + 1;
289 
290 		/*
291 		 * shader_present might be sparse, but the counters layout
292 		 * forces to dump unused regions too, hence the fls64() call
293 		 * instead of hweight64().
294 		 */
295 		ncores = fls64(pfdev->features.shader_present);
296 
297 		/*
298 		 * There's always one JM and one Tiler block, hence the '+ 2'
299 		 * here.
300 		 */
301 		size = (nl2c + ncores + 2) *
302 		       COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
303 	}
304 
305 	perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL);
306 	if (!perfcnt)
307 		return -ENOMEM;
308 
309 	perfcnt->bosize = size;
310 
311 	/* Start with everything disabled. */
312 	gpu_write(pfdev, GPU_PERFCNT_CFG,
313 		  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
314 	gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0);
315 	gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0);
316 	gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0);
317 	gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
318 
319 	init_completion(&perfcnt->dump_comp);
320 	mutex_init(&perfcnt->lock);
321 	pfdev->perfcnt = perfcnt;
322 
323 	return 0;
324 }
325 
326 void panfrost_perfcnt_fini(struct panfrost_device *pfdev)
327 {
328 	/* Disable everything before leaving. */
329 	gpu_write(pfdev, GPU_PERFCNT_CFG,
330 		  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
331 	gpu_write(pfdev, GPU_PRFCNT_JM_EN, 0);
332 	gpu_write(pfdev, GPU_PRFCNT_SHADER_EN, 0);
333 	gpu_write(pfdev, GPU_PRFCNT_MMU_L2_EN, 0);
334 	gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0);
335 }
336