1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2014 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
7  */
8 
9 #include "adreno_gpu.h"
10 
11 bool hang_debug = false;
12 MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug, hang_debug, bool, 0600);
14 
15 bool snapshot_debugbus = false;
16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
18 
19 bool allow_vram_carveout = false;
20 MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
21 module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
22 
23 static const struct adreno_info gpulist[] = {
24 	{
25 		.rev   = ADRENO_REV(2, 0, 0, 0),
26 		.revn  = 200,
27 		.name  = "A200",
28 		.fw = {
29 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
30 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
31 		},
32 		.gmem  = SZ_256K,
33 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
34 		.init  = a2xx_gpu_init,
35 	}, { /* a200 on i.mx51 has only 128kib gmem */
36 		.rev   = ADRENO_REV(2, 0, 0, 1),
37 		.revn  = 201,
38 		.name  = "A200",
39 		.fw = {
40 			[ADRENO_FW_PM4] = "yamato_pm4.fw",
41 			[ADRENO_FW_PFP] = "yamato_pfp.fw",
42 		},
43 		.gmem  = SZ_128K,
44 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
45 		.init  = a2xx_gpu_init,
46 	}, {
47 		.rev   = ADRENO_REV(2, 2, 0, ANY_ID),
48 		.revn  = 220,
49 		.name  = "A220",
50 		.fw = {
51 			[ADRENO_FW_PM4] = "leia_pm4_470.fw",
52 			[ADRENO_FW_PFP] = "leia_pfp_470.fw",
53 		},
54 		.gmem  = SZ_512K,
55 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
56 		.init  = a2xx_gpu_init,
57 	}, {
58 		.rev   = ADRENO_REV(3, 0, 5, ANY_ID),
59 		.revn  = 305,
60 		.name  = "A305",
61 		.fw = {
62 			[ADRENO_FW_PM4] = "a300_pm4.fw",
63 			[ADRENO_FW_PFP] = "a300_pfp.fw",
64 		},
65 		.gmem  = SZ_256K,
66 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
67 		.init  = a3xx_gpu_init,
68 	}, {
69 		.rev   = ADRENO_REV(3, 0, 6, 0),
70 		.revn  = 307,        /* because a305c is revn==306 */
71 		.name  = "A306",
72 		.fw = {
73 			[ADRENO_FW_PM4] = "a300_pm4.fw",
74 			[ADRENO_FW_PFP] = "a300_pfp.fw",
75 		},
76 		.gmem  = SZ_128K,
77 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
78 		.init  = a3xx_gpu_init,
79 	}, {
80 		.rev   = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
81 		.revn  = 320,
82 		.name  = "A320",
83 		.fw = {
84 			[ADRENO_FW_PM4] = "a300_pm4.fw",
85 			[ADRENO_FW_PFP] = "a300_pfp.fw",
86 		},
87 		.gmem  = SZ_512K,
88 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
89 		.init  = a3xx_gpu_init,
90 	}, {
91 		.rev   = ADRENO_REV(3, 3, 0, ANY_ID),
92 		.revn  = 330,
93 		.name  = "A330",
94 		.fw = {
95 			[ADRENO_FW_PM4] = "a330_pm4.fw",
96 			[ADRENO_FW_PFP] = "a330_pfp.fw",
97 		},
98 		.gmem  = SZ_1M,
99 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
100 		.init  = a3xx_gpu_init,
101 	}, {
102 		.rev   = ADRENO_REV(4, 0, 5, ANY_ID),
103 		.revn  = 405,
104 		.name  = "A405",
105 		.fw = {
106 			[ADRENO_FW_PM4] = "a420_pm4.fw",
107 			[ADRENO_FW_PFP] = "a420_pfp.fw",
108 		},
109 		.gmem  = SZ_256K,
110 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
111 		.init  = a4xx_gpu_init,
112 	}, {
113 		.rev   = ADRENO_REV(4, 2, 0, ANY_ID),
114 		.revn  = 420,
115 		.name  = "A420",
116 		.fw = {
117 			[ADRENO_FW_PM4] = "a420_pm4.fw",
118 			[ADRENO_FW_PFP] = "a420_pfp.fw",
119 		},
120 		.gmem  = (SZ_1M + SZ_512K),
121 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
122 		.init  = a4xx_gpu_init,
123 	}, {
124 		.rev   = ADRENO_REV(4, 3, 0, ANY_ID),
125 		.revn  = 430,
126 		.name  = "A430",
127 		.fw = {
128 			[ADRENO_FW_PM4] = "a420_pm4.fw",
129 			[ADRENO_FW_PFP] = "a420_pfp.fw",
130 		},
131 		.gmem  = (SZ_1M + SZ_512K),
132 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
133 		.init  = a4xx_gpu_init,
134 	}, {
135 		.rev   = ADRENO_REV(5, 0, 6, ANY_ID),
136 		.revn = 506,
137 		.name = "A506",
138 		.fw = {
139 			[ADRENO_FW_PM4] = "a530_pm4.fw",
140 			[ADRENO_FW_PFP] = "a530_pfp.fw",
141 		},
142 		.gmem = (SZ_128K + SZ_8K),
143 		/*
144 		 * Increase inactive period to 250 to avoid bouncing
145 		 * the GDSC which appears to make it grumpy
146 		 */
147 		.inactive_period = 250,
148 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
149 			  ADRENO_QUIRK_LMLOADKILL_DISABLE,
150 		.init = a5xx_gpu_init,
151 		.zapfw = "a506_zap.mdt",
152 	}, {
153 		.rev   = ADRENO_REV(5, 0, 8, ANY_ID),
154 		.revn = 508,
155 		.name = "A508",
156 		.fw = {
157 			[ADRENO_FW_PM4] = "a530_pm4.fw",
158 			[ADRENO_FW_PFP] = "a530_pfp.fw",
159 		},
160 		.gmem = (SZ_128K + SZ_8K),
161 		/*
162 		 * Increase inactive period to 250 to avoid bouncing
163 		 * the GDSC which appears to make it grumpy
164 		 */
165 		.inactive_period = 250,
166 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
167 		.init = a5xx_gpu_init,
168 		.zapfw = "a508_zap.mdt",
169 	}, {
170 		.rev   = ADRENO_REV(5, 0, 9, ANY_ID),
171 		.revn = 509,
172 		.name = "A509",
173 		.fw = {
174 			[ADRENO_FW_PM4] = "a530_pm4.fw",
175 			[ADRENO_FW_PFP] = "a530_pfp.fw",
176 		},
177 		.gmem = (SZ_256K + SZ_16K),
178 		/*
179 		 * Increase inactive period to 250 to avoid bouncing
180 		 * the GDSC which appears to make it grumpy
181 		 */
182 		.inactive_period = 250,
183 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
184 		.init = a5xx_gpu_init,
185 		/* Adreno 509 uses the same ZAP as 512 */
186 		.zapfw = "a512_zap.mdt",
187 	}, {
188 		.rev   = ADRENO_REV(5, 1, 0, ANY_ID),
189 		.revn = 510,
190 		.name = "A510",
191 		.fw = {
192 			[ADRENO_FW_PM4] = "a530_pm4.fw",
193 			[ADRENO_FW_PFP] = "a530_pfp.fw",
194 		},
195 		.gmem = SZ_256K,
196 		/*
197 		 * Increase inactive period to 250 to avoid bouncing
198 		 * the GDSC which appears to make it grumpy
199 		 */
200 		.inactive_period = 250,
201 		.init = a5xx_gpu_init,
202 	}, {
203 		.rev   = ADRENO_REV(5, 1, 2, ANY_ID),
204 		.revn = 512,
205 		.name = "A512",
206 		.fw = {
207 			[ADRENO_FW_PM4] = "a530_pm4.fw",
208 			[ADRENO_FW_PFP] = "a530_pfp.fw",
209 		},
210 		.gmem = (SZ_256K + SZ_16K),
211 		/*
212 		 * Increase inactive period to 250 to avoid bouncing
213 		 * the GDSC which appears to make it grumpy
214 		 */
215 		.inactive_period = 250,
216 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
217 		.init = a5xx_gpu_init,
218 		.zapfw = "a512_zap.mdt",
219 	}, {
220 		.rev = ADRENO_REV(5, 3, 0, 2),
221 		.revn = 530,
222 		.name = "A530",
223 		.fw = {
224 			[ADRENO_FW_PM4] = "a530_pm4.fw",
225 			[ADRENO_FW_PFP] = "a530_pfp.fw",
226 			[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
227 		},
228 		.gmem = SZ_1M,
229 		/*
230 		 * Increase inactive period to 250 to avoid bouncing
231 		 * the GDSC which appears to make it grumpy
232 		 */
233 		.inactive_period = 250,
234 		.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
235 			ADRENO_QUIRK_FAULT_DETECT_MASK,
236 		.init = a5xx_gpu_init,
237 		.zapfw = "a530_zap.mdt",
238 	}, {
239 		.rev = ADRENO_REV(5, 4, 0, ANY_ID),
240 		.revn = 540,
241 		.name = "A540",
242 		.fw = {
243 			[ADRENO_FW_PM4] = "a530_pm4.fw",
244 			[ADRENO_FW_PFP] = "a530_pfp.fw",
245 			[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
246 		},
247 		.gmem = SZ_1M,
248 		/*
249 		 * Increase inactive period to 250 to avoid bouncing
250 		 * the GDSC which appears to make it grumpy
251 		 */
252 		.inactive_period = 250,
253 		.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
254 		.init = a5xx_gpu_init,
255 		.zapfw = "a540_zap.mdt",
256 	}, {
257 		.rev = ADRENO_REV(6, 1, 0, ANY_ID),
258 		.revn = 610,
259 		.name = "A610",
260 		.fw = {
261 			[ADRENO_FW_SQE] = "a630_sqe.fw",
262 		},
263 		.gmem = (SZ_128K + SZ_4K),
264 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
265 		.init = a6xx_gpu_init,
266 		.zapfw = "a610_zap.mdt",
267 		.hwcg = a612_hwcg,
268 	}, {
269 		.rev = ADRENO_REV(6, 1, 8, ANY_ID),
270 		.revn = 618,
271 		.name = "A618",
272 		.fw = {
273 			[ADRENO_FW_SQE] = "a630_sqe.fw",
274 			[ADRENO_FW_GMU] = "a630_gmu.bin",
275 		},
276 		.gmem = SZ_512K,
277 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
278 		.init = a6xx_gpu_init,
279 	}, {
280 		.rev = ADRENO_REV(6, 1, 9, ANY_ID),
281 		.revn = 619,
282 		.name = "A619",
283 		.fw = {
284 			[ADRENO_FW_SQE] = "a630_sqe.fw",
285 			[ADRENO_FW_GMU] = "a619_gmu.bin",
286 		},
287 		.gmem = SZ_512K,
288 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
289 		.init = a6xx_gpu_init,
290 		.zapfw = "a615_zap.mdt",
291 		.hwcg = a615_hwcg,
292 	}, {
293 		.rev = ADRENO_REV(6, 3, 0, ANY_ID),
294 		.revn = 630,
295 		.name = "A630",
296 		.fw = {
297 			[ADRENO_FW_SQE] = "a630_sqe.fw",
298 			[ADRENO_FW_GMU] = "a630_gmu.bin",
299 		},
300 		.gmem = SZ_1M,
301 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
302 		.init = a6xx_gpu_init,
303 		.zapfw = "a630_zap.mdt",
304 		.hwcg = a630_hwcg,
305 	}, {
306 		.rev = ADRENO_REV(6, 4, 0, ANY_ID),
307 		.revn = 640,
308 		.name = "A640",
309 		.fw = {
310 			[ADRENO_FW_SQE] = "a630_sqe.fw",
311 			[ADRENO_FW_GMU] = "a640_gmu.bin",
312 		},
313 		.gmem = SZ_1M,
314 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
315 		.init = a6xx_gpu_init,
316 		.zapfw = "a640_zap.mdt",
317 		.hwcg = a640_hwcg,
318 	}, {
319 		.rev = ADRENO_REV(6, 5, 0, ANY_ID),
320 		.revn = 650,
321 		.name = "A650",
322 		.fw = {
323 			[ADRENO_FW_SQE] = "a650_sqe.fw",
324 			[ADRENO_FW_GMU] = "a650_gmu.bin",
325 		},
326 		.gmem = SZ_1M + SZ_128K,
327 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
328 		.init = a6xx_gpu_init,
329 		.zapfw = "a650_zap.mdt",
330 		.hwcg = a650_hwcg,
331 		.address_space_size = SZ_16G,
332 	}, {
333 		.rev = ADRENO_REV(6, 6, 0, ANY_ID),
334 		.revn = 660,
335 		.name = "A660",
336 		.fw = {
337 			[ADRENO_FW_SQE] = "a660_sqe.fw",
338 			[ADRENO_FW_GMU] = "a660_gmu.bin",
339 		},
340 		.gmem = SZ_1M + SZ_512K,
341 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
342 		.init = a6xx_gpu_init,
343 		.zapfw = "a660_zap.mdt",
344 		.hwcg = a660_hwcg,
345 		.address_space_size = SZ_16G,
346 	}, {
347 		.rev = ADRENO_REV(6, 3, 5, ANY_ID),
348 		.fw = {
349 			[ADRENO_FW_SQE] = "a660_sqe.fw",
350 			[ADRENO_FW_GMU] = "a660_gmu.bin",
351 		},
352 		.gmem = SZ_512K,
353 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
354 		.init = a6xx_gpu_init,
355 		.hwcg = a660_hwcg,
356 		.address_space_size = SZ_16G,
357 	}, {
358 		.rev = ADRENO_REV(6, 8, 0, ANY_ID),
359 		.revn = 680,
360 		.name = "A680",
361 		.fw = {
362 			[ADRENO_FW_SQE] = "a630_sqe.fw",
363 			[ADRENO_FW_GMU] = "a640_gmu.bin",
364 		},
365 		.gmem = SZ_2M,
366 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
367 		.init = a6xx_gpu_init,
368 		.zapfw = "a640_zap.mdt",
369 		.hwcg = a640_hwcg,
370 	}, {
371 		.rev = ADRENO_REV(6, 9, 0, ANY_ID),
372 		.fw = {
373 			[ADRENO_FW_SQE] = "a660_sqe.fw",
374 			[ADRENO_FW_GMU] = "a690_gmu.bin",
375 		},
376 		.gmem = SZ_4M,
377 		.inactive_period = DRM_MSM_INACTIVE_PERIOD,
378 		.init = a6xx_gpu_init,
379 		.zapfw = "a690_zap.mdt",
380 		.hwcg = a690_hwcg,
381 		.address_space_size = SZ_16G,
382 	},
383 };
384 
385 MODULE_FIRMWARE("qcom/a300_pm4.fw");
386 MODULE_FIRMWARE("qcom/a300_pfp.fw");
387 MODULE_FIRMWARE("qcom/a330_pm4.fw");
388 MODULE_FIRMWARE("qcom/a330_pfp.fw");
389 MODULE_FIRMWARE("qcom/a420_pm4.fw");
390 MODULE_FIRMWARE("qcom/a420_pfp.fw");
391 MODULE_FIRMWARE("qcom/a530_pm4.fw");
392 MODULE_FIRMWARE("qcom/a530_pfp.fw");
393 MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
394 MODULE_FIRMWARE("qcom/a530_zap.mdt");
395 MODULE_FIRMWARE("qcom/a530_zap.b00");
396 MODULE_FIRMWARE("qcom/a530_zap.b01");
397 MODULE_FIRMWARE("qcom/a530_zap.b02");
398 MODULE_FIRMWARE("qcom/a619_gmu.bin");
399 MODULE_FIRMWARE("qcom/a630_sqe.fw");
400 MODULE_FIRMWARE("qcom/a630_gmu.bin");
401 MODULE_FIRMWARE("qcom/a630_zap.mbn");
402 
403 static inline bool _rev_match(uint8_t entry, uint8_t id)
404 {
405 	return (entry == ANY_ID) || (entry == id);
406 }
407 
408 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
409 {
410 
411 	return _rev_match(rev1.core, rev2.core) &&
412 		_rev_match(rev1.major, rev2.major) &&
413 		_rev_match(rev1.minor, rev2.minor) &&
414 		_rev_match(rev1.patchid, rev2.patchid);
415 }
416 
417 const struct adreno_info *adreno_info(struct adreno_rev rev)
418 {
419 	int i;
420 
421 	/* identify gpu: */
422 	for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
423 		const struct adreno_info *info = &gpulist[i];
424 		if (adreno_cmp_rev(info->rev, rev))
425 			return info;
426 	}
427 
428 	return NULL;
429 }
430 
431 struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
432 {
433 	struct msm_drm_private *priv = dev->dev_private;
434 	struct platform_device *pdev = priv->gpu_pdev;
435 	struct msm_gpu *gpu = NULL;
436 	struct adreno_gpu *adreno_gpu;
437 	int ret;
438 
439 	if (pdev)
440 		gpu = dev_to_gpu(&pdev->dev);
441 
442 	if (!gpu) {
443 		dev_err_once(dev->dev, "no GPU device was found\n");
444 		return NULL;
445 	}
446 
447 	adreno_gpu = to_adreno_gpu(gpu);
448 
449 	/*
450 	 * The number one reason for HW init to fail is if the firmware isn't
451 	 * loaded yet. Try that first and don't bother continuing on
452 	 * otherwise
453 	 */
454 
455 	ret = adreno_load_fw(adreno_gpu);
456 	if (ret)
457 		return NULL;
458 
459 	if (gpu->funcs->ucode_load) {
460 		ret = gpu->funcs->ucode_load(gpu);
461 		if (ret)
462 			return NULL;
463 	}
464 
465 	/*
466 	 * Now that we have firmware loaded, and are ready to begin
467 	 * booting the gpu, go ahead and enable runpm:
468 	 */
469 	pm_runtime_enable(&pdev->dev);
470 
471 	ret = pm_runtime_get_sync(&pdev->dev);
472 	if (ret < 0) {
473 		pm_runtime_put_noidle(&pdev->dev);
474 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
475 		goto err_disable_rpm;
476 	}
477 
478 	mutex_lock(&gpu->lock);
479 	ret = msm_gpu_hw_init(gpu);
480 	mutex_unlock(&gpu->lock);
481 	if (ret) {
482 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
483 		goto err_put_rpm;
484 	}
485 
486 	pm_runtime_put_autosuspend(&pdev->dev);
487 
488 #ifdef CONFIG_DEBUG_FS
489 	if (gpu->funcs->debugfs_init) {
490 		gpu->funcs->debugfs_init(gpu, dev->primary);
491 		gpu->funcs->debugfs_init(gpu, dev->render);
492 	}
493 #endif
494 
495 	return gpu;
496 
497 err_put_rpm:
498 	pm_runtime_put_sync_suspend(&pdev->dev);
499 err_disable_rpm:
500 	pm_runtime_disable(&pdev->dev);
501 
502 	return NULL;
503 }
504 
505 static int find_chipid(struct device *dev, struct adreno_rev *rev)
506 {
507 	struct device_node *node = dev->of_node;
508 	const char *compat;
509 	int ret;
510 	u32 chipid;
511 
512 	/* first search the compat strings for qcom,adreno-XYZ.W: */
513 	ret = of_property_read_string_index(node, "compatible", 0, &compat);
514 	if (ret == 0) {
515 		unsigned int r, patch;
516 
517 		if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
518 		    sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
519 			rev->core = r / 100;
520 			r %= 100;
521 			rev->major = r / 10;
522 			r %= 10;
523 			rev->minor = r;
524 			rev->patchid = patch;
525 
526 			return 0;
527 		}
528 	}
529 
530 	/* and if that fails, fall back to legacy "qcom,chipid" property: */
531 	ret = of_property_read_u32(node, "qcom,chipid", &chipid);
532 	if (ret) {
533 		DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
534 		return ret;
535 	}
536 
537 	rev->core = (chipid >> 24) & 0xff;
538 	rev->major = (chipid >> 16) & 0xff;
539 	rev->minor = (chipid >> 8) & 0xff;
540 	rev->patchid = (chipid & 0xff);
541 
542 	dev_warn(dev, "Using legacy qcom,chipid binding!\n");
543 	dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
544 		rev->core, rev->major, rev->minor, rev->patchid);
545 
546 	return 0;
547 }
548 
549 static int adreno_bind(struct device *dev, struct device *master, void *data)
550 {
551 	static struct adreno_platform_config config = {};
552 	const struct adreno_info *info;
553 	struct msm_drm_private *priv = dev_get_drvdata(master);
554 	struct drm_device *drm = priv->dev;
555 	struct msm_gpu *gpu;
556 	int ret;
557 
558 	ret = find_chipid(dev, &config.rev);
559 	if (ret)
560 		return ret;
561 
562 	dev->platform_data = &config;
563 	priv->gpu_pdev = to_platform_device(dev);
564 
565 	info = adreno_info(config.rev);
566 
567 	if (!info) {
568 		dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
569 			config.rev.core, config.rev.major,
570 			config.rev.minor, config.rev.patchid);
571 		return -ENXIO;
572 	}
573 
574 	DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
575 		config.rev.minor, config.rev.patchid);
576 
577 	priv->is_a2xx = config.rev.core == 2;
578 
579 	gpu = info->init(drm);
580 	if (IS_ERR(gpu)) {
581 		dev_warn(drm->dev, "failed to load adreno gpu\n");
582 		return PTR_ERR(gpu);
583 	}
584 
585 	ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
586 	if (ret)
587 		return ret;
588 
589 	if (config.rev.core >= 6)
590 		if (!adreno_has_gmu_wrapper(to_adreno_gpu(gpu)))
591 			priv->has_cached_coherent = true;
592 
593 	return 0;
594 }
595 
596 static int adreno_system_suspend(struct device *dev);
597 static void adreno_unbind(struct device *dev, struct device *master,
598 		void *data)
599 {
600 	struct msm_drm_private *priv = dev_get_drvdata(master);
601 	struct msm_gpu *gpu = dev_to_gpu(dev);
602 
603 	if (pm_runtime_enabled(dev))
604 		WARN_ON_ONCE(adreno_system_suspend(dev));
605 	gpu->funcs->destroy(gpu);
606 
607 	priv->gpu_pdev = NULL;
608 }
609 
610 static const struct component_ops a3xx_ops = {
611 	.bind   = adreno_bind,
612 	.unbind = adreno_unbind,
613 };
614 
615 static void adreno_device_register_headless(void)
616 {
617 	/* on imx5, we don't have a top-level mdp/dpu node
618 	 * this creates a dummy node for the driver for that case
619 	 */
620 	struct platform_device_info dummy_info = {
621 		.parent = NULL,
622 		.name = "msm",
623 		.id = -1,
624 		.res = NULL,
625 		.num_res = 0,
626 		.data = NULL,
627 		.size_data = 0,
628 		.dma_mask = ~0,
629 	};
630 	platform_device_register_full(&dummy_info);
631 }
632 
633 static int adreno_probe(struct platform_device *pdev)
634 {
635 
636 	int ret;
637 
638 	ret = component_add(&pdev->dev, &a3xx_ops);
639 	if (ret)
640 		return ret;
641 
642 	if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
643 		adreno_device_register_headless();
644 
645 	return 0;
646 }
647 
648 static int adreno_remove(struct platform_device *pdev)
649 {
650 	component_del(&pdev->dev, &a3xx_ops);
651 	return 0;
652 }
653 
654 static void adreno_shutdown(struct platform_device *pdev)
655 {
656 	WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
657 }
658 
659 static const struct of_device_id dt_match[] = {
660 	{ .compatible = "qcom,adreno" },
661 	{ .compatible = "qcom,adreno-3xx" },
662 	/* for compatibility with imx5 gpu: */
663 	{ .compatible = "amd,imageon" },
664 	/* for backwards compat w/ downstream kgsl DT files: */
665 	{ .compatible = "qcom,kgsl-3d0" },
666 	{}
667 };
668 
669 static int adreno_runtime_resume(struct device *dev)
670 {
671 	struct msm_gpu *gpu = dev_to_gpu(dev);
672 
673 	return gpu->funcs->pm_resume(gpu);
674 }
675 
676 static int adreno_runtime_suspend(struct device *dev)
677 {
678 	struct msm_gpu *gpu = dev_to_gpu(dev);
679 
680 	/*
681 	 * We should be holding a runpm ref, which will prevent
682 	 * runtime suspend.  In the system suspend path, we've
683 	 * already waited for active jobs to complete.
684 	 */
685 	WARN_ON_ONCE(gpu->active_submits);
686 
687 	return gpu->funcs->pm_suspend(gpu);
688 }
689 
690 static void suspend_scheduler(struct msm_gpu *gpu)
691 {
692 	int i;
693 
694 	/*
695 	 * Shut down the scheduler before we force suspend, so that
696 	 * suspend isn't racing with scheduler kthread feeding us
697 	 * more work.
698 	 *
699 	 * Note, we just want to park the thread, and let any jobs
700 	 * that are already on the hw queue complete normally, as
701 	 * opposed to the drm_sched_stop() path used for handling
702 	 * faulting/timed-out jobs.  We can't really cancel any jobs
703 	 * already on the hw queue without racing with the GPU.
704 	 */
705 	for (i = 0; i < gpu->nr_rings; i++) {
706 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
707 		kthread_park(sched->thread);
708 	}
709 }
710 
711 static void resume_scheduler(struct msm_gpu *gpu)
712 {
713 	int i;
714 
715 	for (i = 0; i < gpu->nr_rings; i++) {
716 		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
717 		kthread_unpark(sched->thread);
718 	}
719 }
720 
721 static int adreno_system_suspend(struct device *dev)
722 {
723 	struct msm_gpu *gpu = dev_to_gpu(dev);
724 	int remaining, ret;
725 
726 	if (!gpu)
727 		return 0;
728 
729 	suspend_scheduler(gpu);
730 
731 	remaining = wait_event_timeout(gpu->retire_event,
732 				       gpu->active_submits == 0,
733 				       msecs_to_jiffies(1000));
734 	if (remaining == 0) {
735 		dev_err(dev, "Timeout waiting for GPU to suspend\n");
736 		ret = -EBUSY;
737 		goto out;
738 	}
739 
740 	ret = pm_runtime_force_suspend(dev);
741 out:
742 	if (ret)
743 		resume_scheduler(gpu);
744 
745 	return ret;
746 }
747 
748 static int adreno_system_resume(struct device *dev)
749 {
750 	struct msm_gpu *gpu = dev_to_gpu(dev);
751 
752 	if (!gpu)
753 		return 0;
754 
755 	resume_scheduler(gpu);
756 	return pm_runtime_force_resume(dev);
757 }
758 
759 static const struct dev_pm_ops adreno_pm_ops = {
760 	SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
761 	RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
762 };
763 
764 static struct platform_driver adreno_driver = {
765 	.probe = adreno_probe,
766 	.remove = adreno_remove,
767 	.shutdown = adreno_shutdown,
768 	.driver = {
769 		.name = "adreno",
770 		.of_match_table = dt_match,
771 		.pm = &adreno_pm_ops,
772 	},
773 };
774 
775 void __init adreno_register(void)
776 {
777 	platform_driver_register(&adreno_driver);
778 }
779 
780 void __exit adreno_unregister(void)
781 {
782 	platform_driver_unregister(&adreno_driver);
783 }
784