1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2014 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
7  */
8 
9 #include "adreno_gpu.h"
10 
11 bool hang_debug = false;
12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug, hang_debug, bool, 0600);
14 
15 bool snapshot_debugbus = false;
16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
18 
19 bool allow_vram_carveout = false;
20 MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
21 module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
22 
23 static const struct adreno_info gpulist[] = {
24 	{
25 		.rev   = ADRENO_REV(2, 0, 0, 0),
26 		.revn  = 200,
27 		.name  = "A200",
28 		.fw = {
29 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
30 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
31 		},
32 		.gmem  = SZ_256K,
33 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
34 		.init  = a2xx_gpu_init,
35 	}, { /* a200 on i.mx51 has only 128kib gmem */
36 		.rev   = ADRENO_REV(2, 0, 0, 1),
37 		.revn  = 201,
38 		.name  = "A200",
39 		.fw = {
40 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
41 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
42 		},
43 		.gmem  = SZ_128K,
44 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
45 		.init  = a2xx_gpu_init,
46 	}, {
47 		.rev   = ADRENO_REV(2, 2, 0, ANY_ID),
48 		.revn  = 220,
49 		.name  = "A220",
50 		.fw = {
51 			[ADRENO_FW_PM4] = "leia_pm4_470.fw",
52 			[ADRENO_FW_PFP] = "leia_pfp_470.fw",
53 		},
54 		.gmem  = SZ_512K,
55 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
56 		.init  = a2xx_gpu_init,
57 	}, {
58 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
59 		.revn  = 305,
60 		.name  = "A305",
61 		.fw = {
62 			[ADRENO_FW_PM4] = "a300_pm4.fw",
63 			[ADRENO_FW_PFP] = "a300_pfp.fw",
64 		},
65 		.gmem  = SZ_256K,
66 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
67 		.init  = a3xx_gpu_init,
68 	}, {
69 		.rev   = ADRENO_REV(3, 0, 6, 0),
70 		.revn  = 307,        /* because a305c is revn==306 */
71 		.name  = "A306",
72 		.fw = {
73 			[ADRENO_FW_PM4] = "a300_pm4.fw",
74 			[ADRENO_FW_PFP] = "a300_pfp.fw",
75 		},
76 		.gmem  = SZ_128K,
77 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
78 		.init  = a3xx_gpu_init,
79 	}, {
80 		.rev   = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
81 		.revn  = 320,
82 		.name  = "A320",
83 		.fw = {
84 			[ADRENO_FW_PM4] = "a300_pm4.fw",
85 			[ADRENO_FW_PFP] = "a300_pfp.fw",
86 		},
87 		.gmem  = SZ_512K,
88 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
89 		.init  = a3xx_gpu_init,
90 	}, {
91 		.rev   = ADRENO_REV(3, 3, 0, ANY_ID),
92 		.revn  = 330,
93 		.name  = "A330",
94 		.fw = {
95 			[ADRENO_FW_PM4] = "a330_pm4.fw",
96 			[ADRENO_FW_PFP] = "a330_pfp.fw",
97 		},
98 		.gmem  = SZ_1M,
99 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
100 		.init  = a3xx_gpu_init,
101 	}, {
102 		.rev   = ADRENO_REV(4, 0, 5, ANY_ID),
103 		.revn  = 405,
104 		.name  = "A405",
105 		.fw = {
106 			[ADRENO_FW_PM4] = "a420_pm4.fw",
107 			[ADRENO_FW_PFP] = "a420_pfp.fw",
108 		},
109 		.gmem  = SZ_256K,
110 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
111 		.init  = a4xx_gpu_init,
112 	}, {
113 		.rev   = ADRENO_REV(4, 2, 0, ANY_ID),
114 		.revn  = 420,
115 		.name  = "A420",
116 		.fw = {
117 			[ADRENO_FW_PM4] = "a420_pm4.fw",
118 			[ADRENO_FW_PFP] = "a420_pfp.fw",
119 		},
120 		.gmem  = (SZ_1M + SZ_512K),
121 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
122 		.init  = a4xx_gpu_init,
123 	}, {
124 		.rev   = ADRENO_REV(4, 3, 0, ANY_ID),
125 		.revn  = 430,
126 		.name  = "A430",
127 		.fw = {
128 			[ADRENO_FW_PM4] = "a420_pm4.fw",
129 			[ADRENO_FW_PFP] = "a420_pfp.fw",
130 		},
131 		.gmem  = (SZ_1M + SZ_512K),
132 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
133 		.init  = a4xx_gpu_init,
134 	}, {
135 		.rev   = ADRENO_REV(5, 0, 6, ANY_ID),
136 		.revn = 506,
137 		.name = "A506",
138 		.fw = {
139 			[ADRENO_FW_PM4] = "a530_pm4.fw",
140 			[ADRENO_FW_PFP] = "a530_pfp.fw",
141 		},
142 		.gmem = (SZ_128K + SZ_8K),
143 		/*
144 		 * Increase inactive period to 250 to avoid bouncing
145 		 * the GDSC which appears to make it grumpy
146 		 */
147 		.inactive_period = 250,
148 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
149 			  ADRENO_QUIRK_LMLOADKILL_DISABLE,
150 		.init = a5xx_gpu_init,
151 		.zapfw = "a506_zap.mdt",
152 	}, {
153 		.rev   = ADRENO_REV(5, 0, 8, ANY_ID),
154 		.revn = 508,
155 		.name = "A508",
156 		.fw = {
157 			[ADRENO_FW_PM4] = "a530_pm4.fw",
158 			[ADRENO_FW_PFP] = "a530_pfp.fw",
159 		},
160 		.gmem = (SZ_128K + SZ_8K),
161 		/*
162 		 * Increase inactive period to 250 to avoid bouncing
163 		 * the GDSC which appears to make it grumpy
164 		 */
165 		.inactive_period = 250,
166 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
167 		.init = a5xx_gpu_init,
168 		.zapfw = "a508_zap.mdt",
169 	}, {
170 		.rev   = ADRENO_REV(5, 0, 9, ANY_ID),
171 		.revn = 509,
172 		.name = "A509",
173 		.fw = {
174 			[ADRENO_FW_PM4] = "a530_pm4.fw",
175 			[ADRENO_FW_PFP] = "a530_pfp.fw",
176 		},
177 		.gmem = (SZ_256K + SZ_16K),
178 		/*
179 		 * Increase inactive period to 250 to avoid bouncing
180 		 * the GDSC which appears to make it grumpy
181 		 */
182 		.inactive_period = 250,
183 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
184 		.init = a5xx_gpu_init,
185 		/* Adreno 509 uses the same ZAP as 512 */
186 		.zapfw = "a512_zap.mdt",
187 	}, {
188 		.rev   = ADRENO_REV(5, 1, 0, ANY_ID),
189 		.revn = 510,
190 		.name = "A510",
191 		.fw = {
192 			[ADRENO_FW_PM4] = "a530_pm4.fw",
193 			[ADRENO_FW_PFP] = "a530_pfp.fw",
194 		},
195 		.gmem = SZ_256K,
196 		/*
197 		 * Increase inactive period to 250 to avoid bouncing
198 		 * the GDSC which appears to make it grumpy
199 		 */
200 		.inactive_period = 250,
201 		.init = a5xx_gpu_init,
202 	}, {
203 		.rev   = ADRENO_REV(5, 1, 2, ANY_ID),
204 		.revn = 512,
205 		.name = "A512",
206 		.fw = {
207 			[ADRENO_FW_PM4] = "a530_pm4.fw",
208 			[ADRENO_FW_PFP] = "a530_pfp.fw",
209 		},
210 		.gmem = (SZ_256K + SZ_16K),
211 		/*
212 		 * Increase inactive period to 250 to avoid bouncing
213 		 * the GDSC which appears to make it grumpy
214 		 */
215 		.inactive_period = 250,
216 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
217 		.init = a5xx_gpu_init,
218 		.zapfw = "a512_zap.mdt",
219 	}, {
220 		.rev = ADRENO_REV(5, 3, 0, 2),
221 		.revn = 530,
222 		.name = "A530",
223 		.fw = {
224 			[ADRENO_FW_PM4] = "a530_pm4.fw",
225 			[ADRENO_FW_PFP] = "a530_pfp.fw",
226 			[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
227 		},
228 		.gmem = SZ_1M,
229 		/*
230 		 * Increase inactive period to 250 to avoid bouncing
231 		 * the GDSC which appears to make it grumpy
232 		 */
233 		.inactive_period = 250,
234 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
235 			ADRENO_QUIRK_FAULT_DETECT_MASK,
236 		.init = a5xx_gpu_init,
237 		.zapfw = "a530_zap.mdt",
238 	}, {
239 		.rev = ADRENO_REV(5, 4, 0, ANY_ID),
240 		.revn = 540,
241 		.name = "A540",
242 		.fw = {
243 			[ADRENO_FW_PM4] = "a530_pm4.fw",
244 			[ADRENO_FW_PFP] = "a530_pfp.fw",
245 			[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
246 		},
247 		.gmem = SZ_1M,
248 		/*
249 		 * Increase inactive period to 250 to avoid bouncing
250 		 * the GDSC which appears to make it grumpy
251 		 */
252 		.inactive_period = 250,
253 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
254 		.init = a5xx_gpu_init,
255 		.zapfw = "a540_zap.mdt",
256 	}, {
257 		.rev = ADRENO_REV(6, 1, 8, ANY_ID),
258 		.revn = 618,
259 		.name = "A618",
260 		.fw = {
261 			[ADRENO_FW_SQE] = "a630_sqe.fw",
262 			[ADRENO_FW_GMU] = "a630_gmu.bin",
263 		},
264 		.gmem = SZ_512K,
265 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
266 		.init = a6xx_gpu_init,
267 	}, {
268 		.rev = ADRENO_REV(6, 1, 9, ANY_ID),
269 		.revn = 619,
270 		.name = "A619",
271 		.fw = {
272 			[ADRENO_FW_SQE] = "a630_sqe.fw",
273 			[ADRENO_FW_GMU] = "a619_gmu.bin",
274 		},
275 		.gmem = SZ_512K,
276 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
277 		.init = a6xx_gpu_init,
278 		.zapfw = "a615_zap.mdt",
279 		.hwcg = a615_hwcg,
280 	}, {
281 		.rev = ADRENO_REV(6, 3, 0, ANY_ID),
282 		.revn = 630,
283 		.name = "A630",
284 		.fw = {
285 			[ADRENO_FW_SQE] = "a630_sqe.fw",
286 			[ADRENO_FW_GMU] = "a630_gmu.bin",
287 		},
288 		.gmem = SZ_1M,
289 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
290 		.init = a6xx_gpu_init,
291 		.zapfw = "a630_zap.mdt",
292 		.hwcg = a630_hwcg,
293 	}, {
294 		.rev = ADRENO_REV(6, 4, 0, ANY_ID),
295 		.revn = 640,
296 		.name = "A640",
297 		.fw = {
298 			[ADRENO_FW_SQE] = "a630_sqe.fw",
299 			[ADRENO_FW_GMU] = "a640_gmu.bin",
300 		},
301 		.gmem = SZ_1M,
302 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
303 		.init = a6xx_gpu_init,
304 		.zapfw = "a640_zap.mdt",
305 		.hwcg = a640_hwcg,
306 	}, {
307 		.rev = ADRENO_REV(6, 5, 0, ANY_ID),
308 		.revn = 650,
309 		.name = "A650",
310 		.fw = {
311 			[ADRENO_FW_SQE] = "a650_sqe.fw",
312 			[ADRENO_FW_GMU] = "a650_gmu.bin",
313 		},
314 		.gmem = SZ_1M + SZ_128K,
315 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
316 		.init = a6xx_gpu_init,
317 		.zapfw = "a650_zap.mdt",
318 		.hwcg = a650_hwcg,
319 		.address_space_size = SZ_16G,
320 	}, {
321 		.rev = ADRENO_REV(6, 6, 0, ANY_ID),
322 		.revn = 660,
323 		.name = "A660",
324 		.fw = {
325 			[ADRENO_FW_SQE] = "a660_sqe.fw",
326 			[ADRENO_FW_GMU] = "a660_gmu.bin",
327 		},
328 		.gmem = SZ_1M + SZ_512K,
329 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
330 		.init = a6xx_gpu_init,
331 		.zapfw = "a660_zap.mdt",
332 		.hwcg = a660_hwcg,
333 		.address_space_size = SZ_16G,
334 	}, {
335 		.rev = ADRENO_REV(6, 3, 5, ANY_ID),
336 		.fw = {
337 			[ADRENO_FW_SQE] = "a660_sqe.fw",
338 			[ADRENO_FW_GMU] = "a660_gmu.bin",
339 		},
340 		.gmem = SZ_512K,
341 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
342 		.init = a6xx_gpu_init,
343 		.hwcg = a660_hwcg,
344 		.address_space_size = SZ_16G,
345 	}, {
346 		.rev = ADRENO_REV(6, 8, 0, ANY_ID),
347 		.revn = 680,
348 		.name = "A680",
349 		.fw = {
350 			[ADRENO_FW_SQE] = "a630_sqe.fw",
351 			[ADRENO_FW_GMU] = "a640_gmu.bin",
352 		},
353 		.gmem = SZ_2M,
354 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
355 		.init = a6xx_gpu_init,
356 		.zapfw = "a640_zap.mdt",
357 		.hwcg = a640_hwcg,
358 	},
359 };
360 
361 MODULE_FIRMWARE("qcom/a300_pm4.fw");
362 MODULE_FIRMWARE("qcom/a300_pfp.fw");
363 MODULE_FIRMWARE("qcom/a330_pm4.fw");
364 MODULE_FIRMWARE("qcom/a330_pfp.fw");
365 MODULE_FIRMWARE("qcom/a420_pm4.fw");
366 MODULE_FIRMWARE("qcom/a420_pfp.fw");
367 MODULE_FIRMWARE("qcom/a530_pm4.fw");
368 MODULE_FIRMWARE("qcom/a530_pfp.fw");
369 MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
370 MODULE_FIRMWARE("qcom/a530_zap.mdt");
371 MODULE_FIRMWARE("qcom/a530_zap.b00");
372 MODULE_FIRMWARE("qcom/a530_zap.b01");
373 MODULE_FIRMWARE("qcom/a530_zap.b02");
374 MODULE_FIRMWARE("qcom/a619_gmu.bin");
375 MODULE_FIRMWARE("qcom/a630_sqe.fw");
376 MODULE_FIRMWARE("qcom/a630_gmu.bin");
377 MODULE_FIRMWARE("qcom/a630_zap.mbn");
378 
379 static inline bool _rev_match(uint8_t entry, uint8_t id)
380 {
381 	return (entry == ANY_ID) || (entry == id);
382 }
383 
384 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
385 {
386 
387 	return _rev_match(rev1.core, rev2.core) &&
388 		_rev_match(rev1.major, rev2.major) &&
389 		_rev_match(rev1.minor, rev2.minor) &&
390 		_rev_match(rev1.patchid, rev2.patchid);
391 }
392 
393 const struct adreno_info *adreno_info(struct adreno_rev rev)
394 {
395 	int i;
396 
397 	/* identify gpu: */
398 	for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
399 		const struct adreno_info *info = &gpulist[i];
400 		if (adreno_cmp_rev(info->rev, rev))
401 			return info;
402 	}
403 
404 	return NULL;
405 }
406 
407 struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
408 {
409 	struct msm_drm_private *priv = dev->dev_private;
410 	struct platform_device *pdev = priv->gpu_pdev;
411 	struct msm_gpu *gpu = NULL;
412 	struct adreno_gpu *adreno_gpu;
413 	int ret;
414 
415 	if (pdev)
416 		gpu = dev_to_gpu(&pdev->dev);
417 
418 	if (!gpu) {
419 		dev_err_once(dev->dev, "no GPU device was found\n");
420 		return NULL;
421 	}
422 
423 	adreno_gpu = to_adreno_gpu(gpu);
424 
425 	/*
426 	 * The number one reason for HW init to fail is if the firmware isn't
427 	 * loaded yet. Try that first and don't bother continuing on
428 	 * otherwise
429 	 */
430 
431 	ret = adreno_load_fw(adreno_gpu);
432 	if (ret)
433 		return NULL;
434 
435 	/*
436 	 * Now that we have firmware loaded, and are ready to begin
437 	 * booting the gpu, go ahead and enable runpm:
438 	 */
439 	pm_runtime_enable(&pdev->dev);
440 
441 	/* Make sure pm runtime is active and reset any previous errors */
442 	pm_runtime_set_active(&pdev->dev);
443 
444 	ret = pm_runtime_get_sync(&pdev->dev);
445 	if (ret < 0) {
446 		pm_runtime_put_sync(&pdev->dev);
447 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
448 		return NULL;
449 	}
450 
451 	mutex_lock(&gpu->lock);
452 	ret = msm_gpu_hw_init(gpu);
453 	mutex_unlock(&gpu->lock);
454 	pm_runtime_put_autosuspend(&pdev->dev);
455 	if (ret) {
456 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
457 		return NULL;
458 	}
459 
460 #ifdef CONFIG_DEBUG_FS
461 	if (gpu->funcs->debugfs_init) {
462 		gpu->funcs->debugfs_init(gpu, dev->primary);
463 		gpu->funcs->debugfs_init(gpu, dev->render);
464 	}
465 #endif
466 
467 	return gpu;
468 }
469 
470 static int find_chipid(struct device *dev, struct adreno_rev *rev)
471 {
472 	struct device_node *node = dev->of_node;
473 	const char *compat;
474 	int ret;
475 	u32 chipid;
476 
477 	/* first search the compat strings for qcom,adreno-XYZ.W: */
478 	ret = of_property_read_string_index(node, "compatible", 0, &compat);
479 	if (ret == 0) {
480 		unsigned int r, patch;
481 
482 		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
483 		    sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
484 			rev->core = r / 100;
485 			r %= 100;
486 			rev->major = r / 10;
487 			r %= 10;
488 			rev->minor = r;
489 			rev->patchid = patch;
490 
491 			return 0;
492 		}
493 	}
494 
495 	/* and if that fails, fall back to legacy "qcom,chipid" property: */
496 	ret = of_property_read_u32(node, "qcom,chipid", &chipid);
497 	if (ret) {
498 		DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
499 		return ret;
500 	}
501 
502 	rev->core = (chipid >> 24) & 0xff;
503 	rev->major = (chipid >> 16) & 0xff;
504 	rev->minor = (chipid >> 8) & 0xff;
505 	rev->patchid = (chipid & 0xff);
506 
507 	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
508 	dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
509 		rev->core, rev->major, rev->minor, rev->patchid);
510 
511 	return 0;
512 }
513 
514 static int adreno_bind(struct device *dev, struct device *master, void *data)
515 {
516 	static struct adreno_platform_config config = {};
517 	const struct adreno_info *info;
518 	struct msm_drm_private *priv = dev_get_drvdata(master);
519 	struct drm_device *drm = priv->dev;
520 	struct msm_gpu *gpu;
521 	int ret;
522 
523 	ret = find_chipid(dev, &config.rev);
524 	if (ret)
525 		return ret;
526 
527 	dev->platform_data = &config;
528 	priv->gpu_pdev = to_platform_device(dev);
529 
530 	info = adreno_info(config.rev);
531 
532 	if (!info) {
533 		dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
534 			config.rev.core, config.rev.major,
535 			config.rev.minor, config.rev.patchid);
536 		return -ENXIO;
537 	}
538 
539 	DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
540 		config.rev.minor, config.rev.patchid);
541 
542 	priv->is_a2xx = config.rev.core == 2;
543 	priv->has_cached_coherent = config.rev.core >= 6;
544 
545 	gpu = info->init(drm);
546 	if (IS_ERR(gpu)) {
547 		dev_warn(drm->dev, "failed to load adreno gpu\n");
548 		return PTR_ERR(gpu);
549 	}
550 
551 	return 0;
552 }
553 
554 static void adreno_unbind(struct device *dev, struct device *master,
555 		void *data)
556 {
557 	struct msm_drm_private *priv = dev_get_drvdata(master);
558 	struct msm_gpu *gpu = dev_to_gpu(dev);
559 
560 	pm_runtime_force_suspend(dev);
561 	gpu->funcs->destroy(gpu);
562 
563 	priv->gpu_pdev = NULL;
564 }
565 
566 static const struct component_ops a3xx_ops = {
567 		.bind   = adreno_bind,
568 		.unbind = adreno_unbind,
569 };
570 
571 static void adreno_device_register_headless(void)
572 {
573 	/* on imx5, we don't have a top-level mdp/dpu node
574 	 * this creates a dummy node for the driver for that case
575 	 */
576 	struct platform_device_info dummy_info = {
577 		.parent = NULL,
578 		.name = "msm",
579 		.id = -1,
580 		.res = NULL,
581 		.num_res = 0,
582 		.data = NULL,
583 		.size_data = 0,
584 		.dma_mask = ~0,
585 	};
586 	platform_device_register_full(&dummy_info);
587 }
588 
589 static int adreno_probe(struct platform_device *pdev)
590 {
591 
592 	int ret;
593 
594 	ret = component_add(&pdev->dev, &a3xx_ops);
595 	if (ret)
596 		return ret;
597 
598 	if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
599 		adreno_device_register_headless();
600 
601 	return 0;
602 }
603 
604 static int adreno_remove(struct platform_device *pdev)
605 {
606 	component_del(&pdev->dev, &a3xx_ops);
607 	return 0;
608 }
609 
610 static void adreno_shutdown(struct platform_device *pdev)
611 {
612 	pm_runtime_force_suspend(&pdev->dev);
613 }
614 
615 static const struct of_device_id dt_match[] = {
616 	{ .compatible = "qcom,adreno" },
617 	{ .compatible = "qcom,adreno-3xx" },
618 	/* for compatibility with imx5 gpu: */
619 	{ .compatible = "amd,imageon" },
620 	/* for backwards compat w/ downstream kgsl DT files: */
621 	{ .compatible = "qcom,kgsl-3d0" },
622 	{}
623 };
624 
625 static int adreno_runtime_resume(struct device *dev)
626 {
627 	struct msm_gpu *gpu = dev_to_gpu(dev);
628 
629 	return gpu->funcs->pm_resume(gpu);
630 }
631 
632 static int adreno_runtime_suspend(struct device *dev)
633 {
634 	struct msm_gpu *gpu = dev_to_gpu(dev);
635 
636 	/*
637 	 * We should be holding a runpm ref, which will prevent
638 	 * runtime suspend.  In the system suspend path, we've
639 	 * already waited for active jobs to complete.
640 	 */
641 	WARN_ON_ONCE(gpu->active_submits);
642 
643 	return gpu->funcs->pm_suspend(gpu);
644 }
645 
646 static void suspend_scheduler(struct msm_gpu *gpu)
647 {
648 	int i;
649 
650 	/*
651 	 * Shut down the scheduler before we force suspend, so that
652 	 * suspend isn't racing with scheduler kthread feeding us
653 	 * more work.
654 	 *
655 	 * Note, we just want to park the thread, and let any jobs
656 	 * that are already on the hw queue complete normally, as
657 	 * opposed to the drm_sched_stop() path used for handling
658 	 * faulting/timed-out jobs.  We can't really cancel any jobs
659 	 * already on the hw queue without racing with the GPU.
660 	 */
661 	for (i = 0; i < gpu->nr_rings; i++) {
662 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
663 		kthread_park(sched->thread);
664 	}
665 }
666 
667 static void resume_scheduler(struct msm_gpu *gpu)
668 {
669 	int i;
670 
671 	for (i = 0; i < gpu->nr_rings; i++) {
672 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
673 		kthread_unpark(sched->thread);
674 	}
675 }
676 
677 static int adreno_system_suspend(struct device *dev)
678 {
679 	struct msm_gpu *gpu = dev_to_gpu(dev);
680 	int remaining, ret;
681 
682 	suspend_scheduler(gpu);
683 
684 	remaining = wait_event_timeout(gpu->retire_event,
685 				       gpu->active_submits == 0,
686 				       msecs_to_jiffies(1000));
687 	if (remaining == 0) {
688 		dev_err(dev, "Timeout waiting for GPU to suspend\n");
689 		ret = -EBUSY;
690 		goto out;
691 	}
692 
693 	ret = pm_runtime_force_suspend(dev);
694 out:
695 	if (ret)
696 		resume_scheduler(gpu);
697 
698 	return ret;
699 }
700 
701 static int adreno_system_resume(struct device *dev)
702 {
703 	resume_scheduler(dev_to_gpu(dev));
704 	return pm_runtime_force_resume(dev);
705 }
706 
707 static const struct dev_pm_ops adreno_pm_ops = {
708 	SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
709 	RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
710 };
711 
712 static struct platform_driver adreno_driver = {
713 	.probe = adreno_probe,
714 	.remove = adreno_remove,
715 	.shutdown = adreno_shutdown,
716 	.driver = {
717 		.name = "adreno",
718 		.of_match_table = dt_match,
719 		.pm = &adreno_pm_ops,
720 	},
721 };
722 
723 void __init adreno_register(void)
724 {
725 	platform_driver_register(&adreno_driver);
726 }
727 
728 void __exit adreno_unregister(void)
729 {
730 	platform_driver_unregister(&adreno_driver);
731 }
732