1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2014 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
7  */
8 
9 #include "adreno_gpu.h"
10 
11 bool hang_debug = false;
12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug, hang_debug, bool, 0600);
14 
15 bool snapshot_debugbus = false;
16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
18 
19 bool allow_vram_carveout = false;
20 MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
21 module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
22 
23 static const struct adreno_info gpulist[] = {
24 	{
25 		.rev   = ADRENO_REV(2, 0, 0, 0),
26 		.revn  = 200,
27 		.name  = "A200",
28 		.fw = {
29 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
30 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
31 		},
32 		.gmem  = SZ_256K,
33 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
34 		.init  = a2xx_gpu_init,
35 	}, { /* a200 on i.mx51 has only 128kib gmem */
36 		.rev   = ADRENO_REV(2, 0, 0, 1),
37 		.revn  = 201,
38 		.name  = "A200",
39 		.fw = {
40 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
41 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
42 		},
43 		.gmem  = SZ_128K,
44 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
45 		.init  = a2xx_gpu_init,
46 	}, {
47 		.rev   = ADRENO_REV(2, 2, 0, ANY_ID),
48 		.revn  = 220,
49 		.name  = "A220",
50 		.fw = {
51 			[ADRENO_FW_PM4] = "leia_pm4_470.fw",
52 			[ADRENO_FW_PFP] = "leia_pfp_470.fw",
53 		},
54 		.gmem  = SZ_512K,
55 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
56 		.init  = a2xx_gpu_init,
57 	}, {
58 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
59 		.revn  = 305,
60 		.name  = "A305",
61 		.fw = {
62 			[ADRENO_FW_PM4] = "a300_pm4.fw",
63 			[ADRENO_FW_PFP] = "a300_pfp.fw",
64 		},
65 		.gmem  = SZ_256K,
66 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
67 		.init  = a3xx_gpu_init,
68 	}, {
69 		.rev   = ADRENO_REV(3, 0, 6, 0),
70 		.revn  = 307,        /* because a305c is revn==306 */
71 		.name  = "A306",
72 		.fw = {
73 			[ADRENO_FW_PM4] = "a300_pm4.fw",
74 			[ADRENO_FW_PFP] = "a300_pfp.fw",
75 		},
76 		.gmem  = SZ_128K,
77 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
78 		.init  = a3xx_gpu_init,
79 	}, {
80 		.rev   = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
81 		.revn  = 320,
82 		.name  = "A320",
83 		.fw = {
84 			[ADRENO_FW_PM4] = "a300_pm4.fw",
85 			[ADRENO_FW_PFP] = "a300_pfp.fw",
86 		},
87 		.gmem  = SZ_512K,
88 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
89 		.init  = a3xx_gpu_init,
90 	}, {
91 		.rev   = ADRENO_REV(3, 3, 0, ANY_ID),
92 		.revn  = 330,
93 		.name  = "A330",
94 		.fw = {
95 			[ADRENO_FW_PM4] = "a330_pm4.fw",
96 			[ADRENO_FW_PFP] = "a330_pfp.fw",
97 		},
98 		.gmem  = SZ_1M,
99 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
100 		.init  = a3xx_gpu_init,
101 	}, {
102 		.rev   = ADRENO_REV(4, 0, 5, ANY_ID),
103 		.revn  = 405,
104 		.name  = "A405",
105 		.fw = {
106 			[ADRENO_FW_PM4] = "a420_pm4.fw",
107 			[ADRENO_FW_PFP] = "a420_pfp.fw",
108 		},
109 		.gmem  = SZ_256K,
110 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
111 		.init  = a4xx_gpu_init,
112 	}, {
113 		.rev   = ADRENO_REV(4, 2, 0, ANY_ID),
114 		.revn  = 420,
115 		.name  = "A420",
116 		.fw = {
117 			[ADRENO_FW_PM4] = "a420_pm4.fw",
118 			[ADRENO_FW_PFP] = "a420_pfp.fw",
119 		},
120 		.gmem  = (SZ_1M + SZ_512K),
121 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
122 		.init  = a4xx_gpu_init,
123 	}, {
124 		.rev   = ADRENO_REV(4, 3, 0, ANY_ID),
125 		.revn  = 430,
126 		.name  = "A430",
127 		.fw = {
128 			[ADRENO_FW_PM4] = "a420_pm4.fw",
129 			[ADRENO_FW_PFP] = "a420_pfp.fw",
130 		},
131 		.gmem  = (SZ_1M + SZ_512K),
132 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
133 		.init  = a4xx_gpu_init,
134 	}, {
135 		.rev   = ADRENO_REV(5, 0, 6, ANY_ID),
136 		.revn = 506,
137 		.name = "A506",
138 		.fw = {
139 			[ADRENO_FW_PM4] = "a530_pm4.fw",
140 			[ADRENO_FW_PFP] = "a530_pfp.fw",
141 		},
142 		.gmem = (SZ_128K + SZ_8K),
143 		/*
144 		 * Increase inactive period to 250 to avoid bouncing
145 		 * the GDSC which appears to make it grumpy
146 		 */
147 		.inactive_period = 250,
148 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
149 			  ADRENO_QUIRK_LMLOADKILL_DISABLE,
150 		.init = a5xx_gpu_init,
151 		.zapfw = "a506_zap.mdt",
152 	}, {
153 		.rev   = ADRENO_REV(5, 0, 8, ANY_ID),
154 		.revn = 508,
155 		.name = "A508",
156 		.fw = {
157 			[ADRENO_FW_PM4] = "a530_pm4.fw",
158 			[ADRENO_FW_PFP] = "a530_pfp.fw",
159 		},
160 		.gmem = (SZ_128K + SZ_8K),
161 		/*
162 		 * Increase inactive period to 250 to avoid bouncing
163 		 * the GDSC which appears to make it grumpy
164 		 */
165 		.inactive_period = 250,
166 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
167 		.init = a5xx_gpu_init,
168 		.zapfw = "a508_zap.mdt",
169 	}, {
170 		.rev   = ADRENO_REV(5, 0, 9, ANY_ID),
171 		.revn = 509,
172 		.name = "A509",
173 		.fw = {
174 			[ADRENO_FW_PM4] = "a530_pm4.fw",
175 			[ADRENO_FW_PFP] = "a530_pfp.fw",
176 		},
177 		.gmem = (SZ_256K + SZ_16K),
178 		/*
179 		 * Increase inactive period to 250 to avoid bouncing
180 		 * the GDSC which appears to make it grumpy
181 		 */
182 		.inactive_period = 250,
183 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
184 		.init = a5xx_gpu_init,
185 		/* Adreno 509 uses the same ZAP as 512 */
186 		.zapfw = "a512_zap.mdt",
187 	}, {
188 		.rev   = ADRENO_REV(5, 1, 0, ANY_ID),
189 		.revn = 510,
190 		.name = "A510",
191 		.fw = {
192 			[ADRENO_FW_PM4] = "a530_pm4.fw",
193 			[ADRENO_FW_PFP] = "a530_pfp.fw",
194 		},
195 		.gmem = SZ_256K,
196 		/*
197 		 * Increase inactive period to 250 to avoid bouncing
198 		 * the GDSC which appears to make it grumpy
199 		 */
200 		.inactive_period = 250,
201 		.init = a5xx_gpu_init,
202 	}, {
203 		.rev   = ADRENO_REV(5, 1, 2, ANY_ID),
204 		.revn = 512,
205 		.name = "A512",
206 		.fw = {
207 			[ADRENO_FW_PM4] = "a530_pm4.fw",
208 			[ADRENO_FW_PFP] = "a530_pfp.fw",
209 		},
210 		.gmem = (SZ_256K + SZ_16K),
211 		/*
212 		 * Increase inactive period to 250 to avoid bouncing
213 		 * the GDSC which appears to make it grumpy
214 		 */
215 		.inactive_period = 250,
216 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
217 		.init = a5xx_gpu_init,
218 		.zapfw = "a512_zap.mdt",
219 	}, {
220 		.rev = ADRENO_REV(5, 3, 0, 2),
221 		.revn = 530,
222 		.name = "A530",
223 		.fw = {
224 			[ADRENO_FW_PM4] = "a530_pm4.fw",
225 			[ADRENO_FW_PFP] = "a530_pfp.fw",
226 			[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
227 		},
228 		.gmem = SZ_1M,
229 		/*
230 		 * Increase inactive period to 250 to avoid bouncing
231 		 * the GDSC which appears to make it grumpy
232 		 */
233 		.inactive_period = 250,
234 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
235 			ADRENO_QUIRK_FAULT_DETECT_MASK,
236 		.init = a5xx_gpu_init,
237 		.zapfw = "a530_zap.mdt",
238 	}, {
239 		.rev = ADRENO_REV(5, 4, 0, ANY_ID),
240 		.revn = 540,
241 		.name = "A540",
242 		.fw = {
243 			[ADRENO_FW_PM4] = "a530_pm4.fw",
244 			[ADRENO_FW_PFP] = "a530_pfp.fw",
245 			[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
246 		},
247 		.gmem = SZ_1M,
248 		/*
249 		 * Increase inactive period to 250 to avoid bouncing
250 		 * the GDSC which appears to make it grumpy
251 		 */
252 		.inactive_period = 250,
253 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
254 		.init = a5xx_gpu_init,
255 		.zapfw = "a540_zap.mdt",
256 	}, {
257 		.rev = ADRENO_REV(6, 1, 0, ANY_ID),
258 		.revn = 610,
259 		.name = "A610",
260 		.fw = {
261 			[ADRENO_FW_SQE] = "a630_sqe.fw",
262 		},
263 		.gmem = (SZ_128K + SZ_4K),
264 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
265 		.init = a6xx_gpu_init,
266 		.zapfw = "a610_zap.mdt",
267 		.hwcg = a612_hwcg,
268 	}, {
269 		.rev = ADRENO_REV(6, 1, 8, ANY_ID),
270 		.revn = 618,
271 		.name = "A618",
272 		.fw = {
273 			[ADRENO_FW_SQE] = "a630_sqe.fw",
274 			[ADRENO_FW_GMU] = "a630_gmu.bin",
275 		},
276 		.gmem = SZ_512K,
277 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
278 		.init = a6xx_gpu_init,
279 	}, {
280 		.rev = ADRENO_REV(6, 1, 9, ANY_ID),
281 		.revn = 619,
282 		.name = "A619",
283 		.fw = {
284 			[ADRENO_FW_SQE] = "a630_sqe.fw",
285 			[ADRENO_FW_GMU] = "a619_gmu.bin",
286 		},
287 		.gmem = SZ_512K,
288 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
289 		.init = a6xx_gpu_init,
290 		.zapfw = "a615_zap.mdt",
291 		.hwcg = a615_hwcg,
292 	}, {
293 		.rev = ADRENO_REV(6, 3, 0, ANY_ID),
294 		.revn = 630,
295 		.name = "A630",
296 		.fw = {
297 			[ADRENO_FW_SQE] = "a630_sqe.fw",
298 			[ADRENO_FW_GMU] = "a630_gmu.bin",
299 		},
300 		.gmem = SZ_1M,
301 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
302 		.init = a6xx_gpu_init,
303 		.zapfw = "a630_zap.mdt",
304 		.hwcg = a630_hwcg,
305 	}, {
306 		.rev = ADRENO_REV(6, 4, 0, ANY_ID),
307 		.revn = 640,
308 		.name = "A640",
309 		.fw = {
310 			[ADRENO_FW_SQE] = "a630_sqe.fw",
311 			[ADRENO_FW_GMU] = "a640_gmu.bin",
312 		},
313 		.gmem = SZ_1M,
314 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
315 		.init = a6xx_gpu_init,
316 		.zapfw = "a640_zap.mdt",
317 		.hwcg = a640_hwcg,
318 	}, {
319 		.rev = ADRENO_REV(6, 5, 0, ANY_ID),
320 		.revn = 650,
321 		.name = "A650",
322 		.fw = {
323 			[ADRENO_FW_SQE] = "a650_sqe.fw",
324 			[ADRENO_FW_GMU] = "a650_gmu.bin",
325 		},
326 		.gmem = SZ_1M + SZ_128K,
327 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
328 		.init = a6xx_gpu_init,
329 		.zapfw = "a650_zap.mdt",
330 		.hwcg = a650_hwcg,
331 		.address_space_size = SZ_16G,
332 	}, {
333 		.rev = ADRENO_REV(6, 6, 0, ANY_ID),
334 		.revn = 660,
335 		.name = "A660",
336 		.fw = {
337 			[ADRENO_FW_SQE] = "a660_sqe.fw",
338 			[ADRENO_FW_GMU] = "a660_gmu.bin",
339 		},
340 		.gmem = SZ_1M + SZ_512K,
341 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
342 		.init = a6xx_gpu_init,
343 		.zapfw = "a660_zap.mdt",
344 		.hwcg = a660_hwcg,
345 		.address_space_size = SZ_16G,
346 	}, {
347 		.rev = ADRENO_REV(6, 3, 5, ANY_ID),
348 		.fw = {
349 			[ADRENO_FW_SQE] = "a660_sqe.fw",
350 			[ADRENO_FW_GMU] = "a660_gmu.bin",
351 		},
352 		.gmem = SZ_512K,
353 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
354 		.init = a6xx_gpu_init,
355 		.hwcg = a660_hwcg,
356 		.address_space_size = SZ_16G,
357 	}, {
358 		.rev = ADRENO_REV(6, 8, 0, ANY_ID),
359 		.revn = 680,
360 		.name = "A680",
361 		.fw = {
362 			[ADRENO_FW_SQE] = "a630_sqe.fw",
363 			[ADRENO_FW_GMU] = "a640_gmu.bin",
364 		},
365 		.gmem = SZ_2M,
366 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
367 		.init = a6xx_gpu_init,
368 		.zapfw = "a640_zap.mdt",
369 		.hwcg = a640_hwcg,
370 	}, {
371 		.rev = ADRENO_REV(6, 9, 0, ANY_ID),
372 		.revn = 690,
373 		.name = "A690",
374 		.fw = {
375 			[ADRENO_FW_SQE] = "a660_sqe.fw",
376 			[ADRENO_FW_GMU] = "a690_gmu.bin",
377 		},
378 		.gmem = SZ_4M,
379 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
380 		.init = a6xx_gpu_init,
381 		.zapfw = "a690_zap.mdt",
382 		.hwcg = a690_hwcg,
383 		.address_space_size = SZ_16G,
384 	},
385 };
386 
387 MODULE_FIRMWARE("qcom/a300_pm4.fw");
388 MODULE_FIRMWARE("qcom/a300_pfp.fw");
389 MODULE_FIRMWARE("qcom/a330_pm4.fw");
390 MODULE_FIRMWARE("qcom/a330_pfp.fw");
391 MODULE_FIRMWARE("qcom/a420_pm4.fw");
392 MODULE_FIRMWARE("qcom/a420_pfp.fw");
393 MODULE_FIRMWARE("qcom/a530_pm4.fw");
394 MODULE_FIRMWARE("qcom/a530_pfp.fw");
395 MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
396 MODULE_FIRMWARE("qcom/a530_zap.mdt");
397 MODULE_FIRMWARE("qcom/a530_zap.b00");
398 MODULE_FIRMWARE("qcom/a530_zap.b01");
399 MODULE_FIRMWARE("qcom/a530_zap.b02");
400 MODULE_FIRMWARE("qcom/a619_gmu.bin");
401 MODULE_FIRMWARE("qcom/a630_sqe.fw");
402 MODULE_FIRMWARE("qcom/a630_gmu.bin");
403 MODULE_FIRMWARE("qcom/a630_zap.mbn");
404 
405 static inline bool _rev_match(uint8_t entry, uint8_t id)
406 {
407 	return (entry == ANY_ID) || (entry == id);
408 }
409 
410 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
411 {
412 
413 	return _rev_match(rev1.core, rev2.core) &&
414 		_rev_match(rev1.major, rev2.major) &&
415 		_rev_match(rev1.minor, rev2.minor) &&
416 		_rev_match(rev1.patchid, rev2.patchid);
417 }
418 
419 const struct adreno_info *adreno_info(struct adreno_rev rev)
420 {
421 	int i;
422 
423 	/* identify gpu: */
424 	for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
425 		const struct adreno_info *info = &gpulist[i];
426 		if (adreno_cmp_rev(info->rev, rev))
427 			return info;
428 	}
429 
430 	return NULL;
431 }
432 
433 struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
434 {
435 	struct msm_drm_private *priv = dev->dev_private;
436 	struct platform_device *pdev = priv->gpu_pdev;
437 	struct msm_gpu *gpu = NULL;
438 	struct adreno_gpu *adreno_gpu;
439 	int ret;
440 
441 	if (pdev)
442 		gpu = dev_to_gpu(&pdev->dev);
443 
444 	if (!gpu) {
445 		dev_err_once(dev->dev, "no GPU device was found\n");
446 		return NULL;
447 	}
448 
449 	adreno_gpu = to_adreno_gpu(gpu);
450 
451 	/*
452 	 * The number one reason for HW init to fail is if the firmware isn't
453 	 * loaded yet. Try that first and don't bother continuing on
454 	 * otherwise
455 	 */
456 
457 	ret = adreno_load_fw(adreno_gpu);
458 	if (ret)
459 		return NULL;
460 
461 	if (gpu->funcs->ucode_load) {
462 		ret = gpu->funcs->ucode_load(gpu);
463 		if (ret)
464 			return NULL;
465 	}
466 
467 	/*
468 	 * Now that we have firmware loaded, and are ready to begin
469 	 * booting the gpu, go ahead and enable runpm:
470 	 */
471 	pm_runtime_enable(&pdev->dev);
472 
473 	ret = pm_runtime_get_sync(&pdev->dev);
474 	if (ret < 0) {
475 		pm_runtime_put_noidle(&pdev->dev);
476 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
477 		goto err_disable_rpm;
478 	}
479 
480 	mutex_lock(&gpu->lock);
481 	ret = msm_gpu_hw_init(gpu);
482 	mutex_unlock(&gpu->lock);
483 	if (ret) {
484 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
485 		goto err_put_rpm;
486 	}
487 
488 	pm_runtime_put_autosuspend(&pdev->dev);
489 
490 #ifdef CONFIG_DEBUG_FS
491 	if (gpu->funcs->debugfs_init) {
492 		gpu->funcs->debugfs_init(gpu, dev->primary);
493 		gpu->funcs->debugfs_init(gpu, dev->render);
494 	}
495 #endif
496 
497 	return gpu;
498 
499 err_put_rpm:
500 	pm_runtime_put_sync_suspend(&pdev->dev);
501 err_disable_rpm:
502 	pm_runtime_disable(&pdev->dev);
503 
504 	return NULL;
505 }
506 
507 static int find_chipid(struct device *dev, struct adreno_rev *rev)
508 {
509 	struct device_node *node = dev->of_node;
510 	const char *compat;
511 	int ret;
512 	u32 chipid;
513 
514 	/* first search the compat strings for qcom,adreno-XYZ.W: */
515 	ret = of_property_read_string_index(node, "compatible", 0, &compat);
516 	if (ret == 0) {
517 		unsigned int r, patch;
518 
519 		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
520 		    sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
521 			rev->core = r / 100;
522 			r %= 100;
523 			rev->major = r / 10;
524 			r %= 10;
525 			rev->minor = r;
526 			rev->patchid = patch;
527 
528 			return 0;
529 		}
530 	}
531 
532 	/* and if that fails, fall back to legacy "qcom,chipid" property: */
533 	ret = of_property_read_u32(node, "qcom,chipid", &chipid);
534 	if (ret) {
535 		DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
536 		return ret;
537 	}
538 
539 	rev->core = (chipid >> 24) & 0xff;
540 	rev->major = (chipid >> 16) & 0xff;
541 	rev->minor = (chipid >> 8) & 0xff;
542 	rev->patchid = (chipid & 0xff);
543 
544 	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
545 	dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
546 		rev->core, rev->major, rev->minor, rev->patchid);
547 
548 	return 0;
549 }
550 
551 static int adreno_bind(struct device *dev, struct device *master, void *data)
552 {
553 	static struct adreno_platform_config config = {};
554 	const struct adreno_info *info;
555 	struct msm_drm_private *priv = dev_get_drvdata(master);
556 	struct drm_device *drm = priv->dev;
557 	struct msm_gpu *gpu;
558 	int ret;
559 
560 	ret = find_chipid(dev, &config.rev);
561 	if (ret)
562 		return ret;
563 
564 	dev->platform_data = &config;
565 	priv->gpu_pdev = to_platform_device(dev);
566 
567 	info = adreno_info(config.rev);
568 
569 	if (!info) {
570 		dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
571 			config.rev.core, config.rev.major,
572 			config.rev.minor, config.rev.patchid);
573 		return -ENXIO;
574 	}
575 
576 	DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
577 		config.rev.minor, config.rev.patchid);
578 
579 	priv->is_a2xx = config.rev.core == 2;
580 
581 	gpu = info->init(drm);
582 	if (IS_ERR(gpu)) {
583 		dev_warn(drm->dev, "failed to load adreno gpu\n");
584 		return PTR_ERR(gpu);
585 	}
586 
587 	ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
588 	if (ret)
589 		return ret;
590 
591 	if (config.rev.core >= 6)
592 		if (!adreno_has_gmu_wrapper(to_adreno_gpu(gpu)))
593 			priv->has_cached_coherent = true;
594 
595 	return 0;
596 }
597 
598 static int adreno_system_suspend(struct device *dev);
599 static void adreno_unbind(struct device *dev, struct device *master,
600 		void *data)
601 {
602 	struct msm_drm_private *priv = dev_get_drvdata(master);
603 	struct msm_gpu *gpu = dev_to_gpu(dev);
604 
605 	if (pm_runtime_enabled(dev))
606 		WARN_ON_ONCE(adreno_system_suspend(dev));
607 	gpu->funcs->destroy(gpu);
608 
609 	priv->gpu_pdev = NULL;
610 }
611 
612 static const struct component_ops a3xx_ops = {
613 	.bind   = adreno_bind,
614 	.unbind = adreno_unbind,
615 };
616 
617 static void adreno_device_register_headless(void)
618 {
619 	/* on imx5, we don't have a top-level mdp/dpu node
620 	 * this creates a dummy node for the driver for that case
621 	 */
622 	struct platform_device_info dummy_info = {
623 		.parent = NULL,
624 		.name = "msm",
625 		.id = -1,
626 		.res = NULL,
627 		.num_res = 0,
628 		.data = NULL,
629 		.size_data = 0,
630 		.dma_mask = ~0,
631 	};
632 	platform_device_register_full(&dummy_info);
633 }
634 
635 static int adreno_probe(struct platform_device *pdev)
636 {
637 
638 	int ret;
639 
640 	ret = component_add(&pdev->dev, &a3xx_ops);
641 	if (ret)
642 		return ret;
643 
644 	if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
645 		adreno_device_register_headless();
646 
647 	return 0;
648 }
649 
650 static int adreno_remove(struct platform_device *pdev)
651 {
652 	component_del(&pdev->dev, &a3xx_ops);
653 	return 0;
654 }
655 
656 static void adreno_shutdown(struct platform_device *pdev)
657 {
658 	WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
659 }
660 
661 static const struct of_device_id dt_match[] = {
662 	{ .compatible = "qcom,adreno" },
663 	{ .compatible = "qcom,adreno-3xx" },
664 	/* for compatibility with imx5 gpu: */
665 	{ .compatible = "amd,imageon" },
666 	/* for backwards compat w/ downstream kgsl DT files: */
667 	{ .compatible = "qcom,kgsl-3d0" },
668 	{}
669 };
670 
671 static int adreno_runtime_resume(struct device *dev)
672 {
673 	struct msm_gpu *gpu = dev_to_gpu(dev);
674 
675 	return gpu->funcs->pm_resume(gpu);
676 }
677 
678 static int adreno_runtime_suspend(struct device *dev)
679 {
680 	struct msm_gpu *gpu = dev_to_gpu(dev);
681 
682 	/*
683 	 * We should be holding a runpm ref, which will prevent
684 	 * runtime suspend.  In the system suspend path, we've
685 	 * already waited for active jobs to complete.
686 	 */
687 	WARN_ON_ONCE(gpu->active_submits);
688 
689 	return gpu->funcs->pm_suspend(gpu);
690 }
691 
692 static void suspend_scheduler(struct msm_gpu *gpu)
693 {
694 	int i;
695 
696 	/*
697 	 * Shut down the scheduler before we force suspend, so that
698 	 * suspend isn't racing with scheduler kthread feeding us
699 	 * more work.
700 	 *
701 	 * Note, we just want to park the thread, and let any jobs
702 	 * that are already on the hw queue complete normally, as
703 	 * opposed to the drm_sched_stop() path used for handling
704 	 * faulting/timed-out jobs.  We can't really cancel any jobs
705 	 * already on the hw queue without racing with the GPU.
706 	 */
707 	for (i = 0; i < gpu->nr_rings; i++) {
708 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
709 		kthread_park(sched->thread);
710 	}
711 }
712 
713 static void resume_scheduler(struct msm_gpu *gpu)
714 {
715 	int i;
716 
717 	for (i = 0; i < gpu->nr_rings; i++) {
718 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
719 		kthread_unpark(sched->thread);
720 	}
721 }
722 
723 static int adreno_system_suspend(struct device *dev)
724 {
725 	struct msm_gpu *gpu = dev_to_gpu(dev);
726 	int remaining, ret;
727 
728 	if (!gpu)
729 		return 0;
730 
731 	suspend_scheduler(gpu);
732 
733 	remaining = wait_event_timeout(gpu->retire_event,
734 				       gpu->active_submits == 0,
735 				       msecs_to_jiffies(1000));
736 	if (remaining == 0) {
737 		dev_err(dev, "Timeout waiting for GPU to suspend\n");
738 		ret = -EBUSY;
739 		goto out;
740 	}
741 
742 	ret = pm_runtime_force_suspend(dev);
743 out:
744 	if (ret)
745 		resume_scheduler(gpu);
746 
747 	return ret;
748 }
749 
750 static int adreno_system_resume(struct device *dev)
751 {
752 	struct msm_gpu *gpu = dev_to_gpu(dev);
753 
754 	if (!gpu)
755 		return 0;
756 
757 	resume_scheduler(gpu);
758 	return pm_runtime_force_resume(dev);
759 }
760 
761 static const struct dev_pm_ops adreno_pm_ops = {
762 	SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
763 	RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
764 };
765 
766 static struct platform_driver adreno_driver = {
767 	.probe = adreno_probe,
768 	.remove = adreno_remove,
769 	.shutdown = adreno_shutdown,
770 	.driver = {
771 		.name = "adreno",
772 		.of_match_table = dt_match,
773 		.pm = &adreno_pm_ops,
774 	},
775 };
776 
777 void __init adreno_register(void)
778 {
779 	platform_driver_register(&adreno_driver);
780 }
781 
782 void __exit adreno_unregister(void)
783 {
784 	platform_driver_unregister(&adreno_driver);
785 }
786