xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a6xx_gmu.c (revision 3136a0f8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3 
4 #include <linux/clk.h>
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
9 #include <drm/drm_gem.h>
10 
11 #include "a6xx_gpu.h"
12 #include "a6xx_gmu.xml.h"
13 #include "msm_gem.h"
14 #include "msm_gpu_trace.h"
15 #include "msm_mmu.h"
16 
17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
18 {
19 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
20 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
21 	struct msm_gpu *gpu = &adreno_gpu->base;
22 
23 	/* FIXME: add a banner here */
24 	gmu->hung = true;
25 
26 	/* Turn off the hangcheck timer while we are resetting */
27 	del_timer(&gpu->hangcheck_timer);
28 
29 	/* Queue the GPU handler because we need to treat this as a recovery */
30 	kthread_queue_work(gpu->worker, &gpu->recover_work);
31 }
32 
33 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
34 {
35 	struct a6xx_gmu *gmu = data;
36 	u32 status;
37 
38 	status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
39 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
40 
41 	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
42 		dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
43 
44 		a6xx_gmu_fault(gmu);
45 	}
46 
47 	if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
48 		dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
49 
50 	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
51 		dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
52 			gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
53 
54 	return IRQ_HANDLED;
55 }
56 
57 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
58 {
59 	struct a6xx_gmu *gmu = data;
60 	u32 status;
61 
62 	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
63 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
64 
65 	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
66 		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
67 
68 		a6xx_gmu_fault(gmu);
69 	}
70 
71 	return IRQ_HANDLED;
72 }
73 
74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
75 {
76 	u32 val;
77 
78 	/* This can be called from gpu state code so make sure GMU is valid */
79 	if (!gmu->initialized)
80 		return false;
81 
82 	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
83 
84 	return !(val &
85 		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
86 		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
87 }
88 
89 /* Check to see if the GX rail is still powered */
90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
91 {
92 	u32 val;
93 
94 	/* This can be called from gpu state code so make sure GMU is valid */
95 	if (!gmu->initialized)
96 		return false;
97 
98 	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
99 
100 	return !(val &
101 		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
102 		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
103 }
104 
105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
106 		       bool suspended)
107 {
108 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
109 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
110 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
111 	u32 perf_index;
112 	unsigned long gpu_freq;
113 	int ret = 0;
114 
115 	gpu_freq = dev_pm_opp_get_freq(opp);
116 
117 	if (gpu_freq == gmu->freq)
118 		return;
119 
120 	for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
121 		if (gpu_freq == gmu->gpu_freqs[perf_index])
122 			break;
123 
124 	gmu->current_perf_index = perf_index;
125 	gmu->freq = gmu->gpu_freqs[perf_index];
126 
127 	trace_msm_gmu_freq_change(gmu->freq, perf_index);
128 
129 	/*
130 	 * This can get called from devfreq while the hardware is idle. Don't
131 	 * bring up the power if it isn't already active. All we're doing here
132 	 * is updating the frequency so that when we come back online we're at
133 	 * the right rate.
134 	 */
135 	if (suspended)
136 		return;
137 
138 	if (!gmu->legacy) {
139 		a6xx_hfi_set_freq(gmu, perf_index);
140 		dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
141 		return;
142 	}
143 
144 	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
145 
146 	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
147 			((3 & 0xf) << 28) | perf_index);
148 
149 	/*
150 	 * Send an invalid index as a vote for the bus bandwidth and let the
151 	 * firmware decide on the right vote
152 	 */
153 	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
154 
155 	/* Set and clear the OOB for DCVS to trigger the GMU */
156 	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
157 	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
158 
159 	ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
160 	if (ret)
161 		dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
162 
163 	dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
164 }
165 
166 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
167 {
168 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
169 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
170 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
171 
172 	return  gmu->freq;
173 }
174 
175 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
176 {
177 	u32 val;
178 	int local = gmu->idle_level;
179 
180 	/* SPTP and IFPC both report as IFPC */
181 	if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
182 		local = GMU_IDLE_STATE_IFPC;
183 
184 	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
185 
186 	if (val == local) {
187 		if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
188 			!a6xx_gmu_gx_is_on(gmu))
189 			return true;
190 	}
191 
192 	return false;
193 }
194 
195 /* Wait for the GMU to get to its most idle state */
196 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
197 {
198 	return spin_until(a6xx_gmu_check_idle_level(gmu));
199 }
200 
201 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
202 {
203 	int ret;
204 	u32 val;
205 	u32 mask, reset_val;
206 
207 	val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
208 	if (val <= 0x20010004) {
209 		mask = 0xffffffff;
210 		reset_val = 0xbabeface;
211 	} else {
212 		mask = 0x1ff;
213 		reset_val = 0x100;
214 	}
215 
216 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
217 
218 	/* Set the log wptr index
219 	 * note: downstream saves the value in poweroff and restores it here
220 	 */
221 	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
222 
223 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
224 
225 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
226 		(val & mask) == reset_val, 100, 10000);
227 
228 	if (ret)
229 		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
230 
231 	return ret;
232 }
233 
234 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
235 {
236 	u32 val;
237 	int ret;
238 
239 	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
240 
241 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
242 		val & 1, 100, 10000);
243 	if (ret)
244 		DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
245 
246 	return ret;
247 }
248 
249 struct a6xx_gmu_oob_bits {
250 	int set, ack, set_new, ack_new, clear, clear_new;
251 	const char *name;
252 };
253 
254 /* These are the interrupt / ack bits for each OOB request that are set
255  * in a6xx_gmu_set_oob and a6xx_clear_oob
256  */
257 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
258 	[GMU_OOB_GPU_SET] = {
259 		.name = "GPU_SET",
260 		.set = 16,
261 		.ack = 24,
262 		.set_new = 30,
263 		.ack_new = 31,
264 		.clear = 24,
265 		.clear_new = 31,
266 	},
267 
268 	[GMU_OOB_PERFCOUNTER_SET] = {
269 		.name = "PERFCOUNTER",
270 		.set = 17,
271 		.ack = 25,
272 		.set_new = 28,
273 		.ack_new = 30,
274 		.clear = 25,
275 		.clear_new = 29,
276 	},
277 
278 	[GMU_OOB_BOOT_SLUMBER] = {
279 		.name = "BOOT_SLUMBER",
280 		.set = 22,
281 		.ack = 30,
282 		.clear = 30,
283 	},
284 
285 	[GMU_OOB_DCVS_SET] = {
286 		.name = "GPU_DCVS",
287 		.set = 23,
288 		.ack = 31,
289 		.clear = 31,
290 	},
291 };
292 
293 /* Trigger a OOB (out of band) request to the GMU */
294 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
295 {
296 	int ret;
297 	u32 val;
298 	int request, ack;
299 
300 	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
301 
302 	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
303 		return -EINVAL;
304 
305 	if (gmu->legacy) {
306 		request = a6xx_gmu_oob_bits[state].set;
307 		ack = a6xx_gmu_oob_bits[state].ack;
308 	} else {
309 		request = a6xx_gmu_oob_bits[state].set_new;
310 		ack = a6xx_gmu_oob_bits[state].ack_new;
311 		if (!request || !ack) {
312 			DRM_DEV_ERROR(gmu->dev,
313 				      "Invalid non-legacy GMU request %s\n",
314 				      a6xx_gmu_oob_bits[state].name);
315 			return -EINVAL;
316 		}
317 	}
318 
319 	/* Trigger the equested OOB operation */
320 	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
321 
322 	/* Wait for the acknowledge interrupt */
323 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
324 		val & (1 << ack), 100, 10000);
325 
326 	if (ret)
327 		DRM_DEV_ERROR(gmu->dev,
328 			"Timeout waiting for GMU OOB set %s: 0x%x\n",
329 				a6xx_gmu_oob_bits[state].name,
330 				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
331 
332 	/* Clear the acknowledge interrupt */
333 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
334 
335 	return ret;
336 }
337 
338 /* Clear a pending OOB state in the GMU */
339 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
340 {
341 	int bit;
342 
343 	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
344 
345 	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
346 		return;
347 
348 	if (gmu->legacy)
349 		bit = a6xx_gmu_oob_bits[state].clear;
350 	else
351 		bit = a6xx_gmu_oob_bits[state].clear_new;
352 
353 	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
354 }
355 
356 /* Enable CPU control of SPTP power power collapse */
357 int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
358 {
359 	int ret;
360 	u32 val;
361 
362 	if (!gmu->legacy)
363 		return 0;
364 
365 	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
366 
367 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
368 		(val & 0x38) == 0x28, 1, 100);
369 
370 	if (ret) {
371 		DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
372 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
373 	}
374 
375 	return 0;
376 }
377 
378 /* Disable CPU control of SPTP power power collapse */
379 void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
380 {
381 	u32 val;
382 	int ret;
383 
384 	if (!gmu->legacy)
385 		return;
386 
387 	/* Make sure retention is on */
388 	gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
389 
390 	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
391 
392 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
393 		(val & 0x04), 100, 10000);
394 
395 	if (ret)
396 		DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
397 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
398 }
399 
400 /* Let the GMU know we are starting a boot sequence */
401 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
402 {
403 	u32 vote;
404 
405 	/* Let the GMU know we are getting ready for boot */
406 	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
407 
408 	/* Choose the "default" power level as the highest available */
409 	vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
410 
411 	gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
412 	gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
413 
414 	/* Let the GMU know the boot sequence has started */
415 	return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
416 }
417 
418 /* Let the GMU know that we are about to go into slumber */
419 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
420 {
421 	int ret;
422 
423 	/* Disable the power counter so the GMU isn't busy */
424 	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
425 
426 	/* Disable SPTP_PC if the CPU is responsible for it */
427 	if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
428 		a6xx_sptprac_disable(gmu);
429 
430 	if (!gmu->legacy) {
431 		ret = a6xx_hfi_send_prep_slumber(gmu);
432 		goto out;
433 	}
434 
435 	/* Tell the GMU to get ready to slumber */
436 	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
437 
438 	ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
439 	a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
440 
441 	if (!ret) {
442 		/* Check to see if the GMU really did slumber */
443 		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
444 			!= 0x0f) {
445 			DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
446 			ret = -ETIMEDOUT;
447 		}
448 	}
449 
450 out:
451 	/* Put fence into allow mode */
452 	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
453 	return ret;
454 }
455 
456 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
457 {
458 	int ret;
459 	u32 val;
460 
461 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
462 	/* Wait for the register to finish posting */
463 	wmb();
464 
465 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
466 		val & (1 << 1), 100, 10000);
467 	if (ret) {
468 		DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
469 		return ret;
470 	}
471 
472 	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
473 		!val, 100, 10000);
474 
475 	if (ret) {
476 		DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
477 		return ret;
478 	}
479 
480 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
481 
482 	return 0;
483 }
484 
485 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
486 {
487 	int ret;
488 	u32 val;
489 
490 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
491 
492 	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
493 		val, val & (1 << 16), 100, 10000);
494 	if (ret)
495 		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
496 
497 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
498 }
499 
500 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
501 {
502 	msm_writel(value, ptr + (offset << 2));
503 }
504 
505 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
506 		const char *name);
507 
508 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
509 {
510 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
511 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
512 	struct platform_device *pdev = to_platform_device(gmu->dev);
513 	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
514 	void __iomem *seqptr = NULL;
515 	uint32_t pdc_address_offset;
516 	bool pdc_in_aop = false;
517 
518 	if (IS_ERR(pdcptr))
519 		goto err;
520 
521 	if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
522 		pdc_in_aop = true;
523 	else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
524 		pdc_address_offset = 0x30090;
525 	else if (adreno_is_a619(adreno_gpu))
526 		pdc_address_offset = 0x300a0;
527 	else
528 		pdc_address_offset = 0x30080;
529 
530 	if (!pdc_in_aop) {
531 		seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
532 		if (IS_ERR(seqptr))
533 			goto err;
534 	}
535 
536 	/* Disable SDE clock gating */
537 	gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
538 
539 	/* Setup RSC PDC handshake for sleep and wakeup */
540 	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
541 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
542 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
543 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
544 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
545 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
546 	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
547 	gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
548 	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
549 	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
550 	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
551 
552 	/* Load RSC sequencer uCode for sleep and wakeup */
553 	if (adreno_is_a650_family(adreno_gpu)) {
554 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
555 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
556 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
557 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
558 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
559 	} else {
560 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
561 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
562 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
563 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
564 		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
565 	}
566 
567 	if (pdc_in_aop)
568 		goto setup_pdc;
569 
570 	/* Load PDC sequencer uCode for power up and power down sequence */
571 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
572 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
573 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
574 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
575 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
576 
577 	/* Set TCS commands used by PDC sequence for low power modes */
578 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
579 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
580 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
581 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
582 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
583 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
584 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
585 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
586 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
587 
588 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
589 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
590 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
591 
592 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
593 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
594 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
595 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
596 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
597 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
598 
599 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
600 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
601 	if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
602 			adreno_is_a650_family(adreno_gpu))
603 		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
604 	else
605 		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
606 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
607 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
608 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
609 
610 	/* Setup GPU PDC */
611 setup_pdc:
612 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
613 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
614 
615 	/* ensure no writes happen before the uCode is fully written */
616 	wmb();
617 
618 	a6xx_rpmh_stop(gmu);
619 
620 err:
621 	if (!IS_ERR_OR_NULL(pdcptr))
622 		iounmap(pdcptr);
623 	if (!IS_ERR_OR_NULL(seqptr))
624 		iounmap(seqptr);
625 }
626 
627 /*
628  * The lowest 16 bits of this value are the number of XO clock cycles for main
629  * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
630  * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
631  */
632 
633 #define GMU_PWR_COL_HYST 0x000a1680
634 
635 /* Set up the idle state for the GMU */
636 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
637 {
638 	/* Disable GMU WB/RB buffer */
639 	gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
640 	gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
641 	gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
642 
643 	gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
644 
645 	switch (gmu->idle_level) {
646 	case GMU_IDLE_STATE_IFPC:
647 		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
648 			GMU_PWR_COL_HYST);
649 		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
650 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
651 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
652 		fallthrough;
653 	case GMU_IDLE_STATE_SPTP:
654 		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
655 			GMU_PWR_COL_HYST);
656 		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
657 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
658 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
659 	}
660 
661 	/* Enable RPMh GPU client */
662 	gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
663 		A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
664 		A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
665 		A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
666 		A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
667 		A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
668 		A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
669 }
670 
671 struct block_header {
672 	u32 addr;
673 	u32 size;
674 	u32 type;
675 	u32 value;
676 	u32 data[];
677 };
678 
679 /* this should be a general kernel helper */
680 static int in_range(u32 addr, u32 start, u32 size)
681 {
682 	return addr >= start && addr < start + size;
683 }
684 
685 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
686 {
687 	if (!in_range(blk->addr, bo->iova, bo->size))
688 		return false;
689 
690 	memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
691 	return true;
692 }
693 
694 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
695 {
696 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
697 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
698 	const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
699 	const struct block_header *blk;
700 	u32 reg_offset;
701 
702 	u32 itcm_base = 0x00000000;
703 	u32 dtcm_base = 0x00040000;
704 
705 	if (adreno_is_a650_family(adreno_gpu))
706 		dtcm_base = 0x10004000;
707 
708 	if (gmu->legacy) {
709 		/* Sanity check the size of the firmware that was loaded */
710 		if (fw_image->size > 0x8000) {
711 			DRM_DEV_ERROR(gmu->dev,
712 				"GMU firmware is bigger than the available region\n");
713 			return -EINVAL;
714 		}
715 
716 		gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
717 			       (u32*) fw_image->data, fw_image->size);
718 		return 0;
719 	}
720 
721 
722 	for (blk = (const struct block_header *) fw_image->data;
723 	     (const u8*) blk < fw_image->data + fw_image->size;
724 	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
725 		if (blk->size == 0)
726 			continue;
727 
728 		if (in_range(blk->addr, itcm_base, SZ_16K)) {
729 			reg_offset = (blk->addr - itcm_base) >> 2;
730 			gmu_write_bulk(gmu,
731 				REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
732 				blk->data, blk->size);
733 		} else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
734 			reg_offset = (blk->addr - dtcm_base) >> 2;
735 			gmu_write_bulk(gmu,
736 				REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
737 				blk->data, blk->size);
738 		} else if (!fw_block_mem(&gmu->icache, blk) &&
739 			   !fw_block_mem(&gmu->dcache, blk) &&
740 			   !fw_block_mem(&gmu->dummy, blk)) {
741 			DRM_DEV_ERROR(gmu->dev,
742 				"failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
743 				blk->addr, blk->size, blk->data[0]);
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
751 {
752 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
753 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
754 	int ret;
755 	u32 chipid;
756 
757 	if (adreno_is_a650_family(adreno_gpu)) {
758 		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
759 		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
760 	}
761 
762 	if (state == GMU_WARM_BOOT) {
763 		ret = a6xx_rpmh_start(gmu);
764 		if (ret)
765 			return ret;
766 	} else {
767 		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
768 			"GMU firmware is not loaded\n"))
769 			return -ENOENT;
770 
771 		/* Turn on register retention */
772 		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
773 
774 		ret = a6xx_rpmh_start(gmu);
775 		if (ret)
776 			return ret;
777 
778 		ret = a6xx_gmu_fw_load(gmu);
779 		if (ret)
780 			return ret;
781 	}
782 
783 	gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
784 	gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
785 
786 	/* Write the iova of the HFI table */
787 	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
788 	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
789 
790 	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
791 		(1 << 31) | (0xa << 18) | (0xa0));
792 
793 	/*
794 	 * Snapshots toggle the NMI bit which will result in a jump to the NMI
795 	 * handler instead of __main. Set the M3 config value to avoid that.
796 	 */
797 	gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
798 
799 	/*
800 	 * Note that the GMU has a slightly different layout for
801 	 * chip_id, for whatever reason, so a bit of massaging
802 	 * is needed.  The upper 16b are the same, but minor and
803 	 * patchid are packed in four bits each with the lower
804 	 * 8b unused:
805 	 */
806 	chipid  = adreno_gpu->chip_id & 0xffff0000;
807 	chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
808 	chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
809 
810 	gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
811 
812 	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
813 		  gmu->log.iova | (gmu->log.size / SZ_4K - 1));
814 
815 	/* Set up the lowest idle level on the GMU */
816 	a6xx_gmu_power_config(gmu);
817 
818 	ret = a6xx_gmu_start(gmu);
819 	if (ret)
820 		return ret;
821 
822 	if (gmu->legacy) {
823 		ret = a6xx_gmu_gfx_rail_on(gmu);
824 		if (ret)
825 			return ret;
826 	}
827 
828 	/* Enable SPTP_PC if the CPU is responsible for it */
829 	if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
830 		ret = a6xx_sptprac_enable(gmu);
831 		if (ret)
832 			return ret;
833 	}
834 
835 	ret = a6xx_gmu_hfi_start(gmu);
836 	if (ret)
837 		return ret;
838 
839 	/* FIXME: Do we need this wmb() here? */
840 	wmb();
841 
842 	return 0;
843 }
844 
845 #define A6XX_HFI_IRQ_MASK \
846 	(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
847 
848 #define A6XX_GMU_IRQ_MASK \
849 	(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
850 	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
851 	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
852 
853 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
854 {
855 	disable_irq(gmu->gmu_irq);
856 	disable_irq(gmu->hfi_irq);
857 
858 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
859 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
860 }
861 
862 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
863 {
864 	u32 val;
865 
866 	/* Make sure there are no outstanding RPMh votes */
867 	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
868 		(val & 1), 100, 10000);
869 	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
870 		(val & 1), 100, 10000);
871 	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
872 		(val & 1), 100, 10000);
873 	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
874 		(val & 1), 100, 1000);
875 }
876 
877 /* Force the GMU off in case it isn't responsive */
878 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
879 {
880 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
881 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
882 	struct msm_gpu *gpu = &adreno_gpu->base;
883 
884 	/*
885 	 * Turn off keep alive that might have been enabled by the hang
886 	 * interrupt
887 	 */
888 	gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
889 
890 	/* Flush all the queues */
891 	a6xx_hfi_stop(gmu);
892 
893 	/* Stop the interrupts */
894 	a6xx_gmu_irq_disable(gmu);
895 
896 	/* Force off SPTP in case the GMU is managing it */
897 	a6xx_sptprac_disable(gmu);
898 
899 	/* Make sure there are no outstanding RPMh votes */
900 	a6xx_gmu_rpmh_off(gmu);
901 
902 	/* Clear the WRITEDROPPED fields and put fence into allow mode */
903 	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
904 	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
905 
906 	/* Make sure the above writes go through */
907 	wmb();
908 
909 	/* Halt the gmu cm3 core */
910 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
911 
912 	a6xx_bus_clear_pending_transactions(adreno_gpu, true);
913 
914 	/* Reset GPU core blocks */
915 	a6xx_gpu_sw_reset(gpu, true);
916 }
917 
918 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
919 {
920 	struct dev_pm_opp *gpu_opp;
921 	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
922 
923 	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
924 	if (IS_ERR(gpu_opp))
925 		return;
926 
927 	gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
928 	a6xx_gmu_set_freq(gpu, gpu_opp, false);
929 	dev_pm_opp_put(gpu_opp);
930 }
931 
932 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
933 {
934 	struct dev_pm_opp *gpu_opp;
935 	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
936 
937 	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
938 	if (IS_ERR(gpu_opp))
939 		return;
940 
941 	dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
942 	dev_pm_opp_put(gpu_opp);
943 }
944 
945 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
946 {
947 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
948 	struct msm_gpu *gpu = &adreno_gpu->base;
949 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
950 	int status, ret;
951 
952 	if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
953 		return -EINVAL;
954 
955 	gmu->hung = false;
956 
957 	/* Turn on the resources */
958 	pm_runtime_get_sync(gmu->dev);
959 
960 	/*
961 	 * "enable" the GX power domain which won't actually do anything but it
962 	 * will make sure that the refcounting is correct in case we need to
963 	 * bring down the GX after a GMU failure
964 	 */
965 	if (!IS_ERR_OR_NULL(gmu->gxpd))
966 		pm_runtime_get_sync(gmu->gxpd);
967 
968 	/* Use a known rate to bring up the GMU */
969 	clk_set_rate(gmu->core_clk, 200000000);
970 	clk_set_rate(gmu->hub_clk, 150000000);
971 	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
972 	if (ret) {
973 		pm_runtime_put(gmu->gxpd);
974 		pm_runtime_put(gmu->dev);
975 		return ret;
976 	}
977 
978 	/* Set the bus quota to a reasonable value for boot */
979 	a6xx_gmu_set_initial_bw(gpu, gmu);
980 
981 	/* Enable the GMU interrupt */
982 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
983 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
984 	enable_irq(gmu->gmu_irq);
985 
986 	/* Check to see if we are doing a cold or warm boot */
987 	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
988 		GMU_WARM_BOOT : GMU_COLD_BOOT;
989 
990 	/*
991 	 * Warm boot path does not work on newer GPUs
992 	 * Presumably this is because icache/dcache regions must be restored
993 	 */
994 	if (!gmu->legacy)
995 		status = GMU_COLD_BOOT;
996 
997 	ret = a6xx_gmu_fw_start(gmu, status);
998 	if (ret)
999 		goto out;
1000 
1001 	ret = a6xx_hfi_start(gmu, status);
1002 	if (ret)
1003 		goto out;
1004 
1005 	/*
1006 	 * Turn on the GMU firmware fault interrupt after we know the boot
1007 	 * sequence is successful
1008 	 */
1009 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
1010 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
1011 	enable_irq(gmu->hfi_irq);
1012 
1013 	/* Set the GPU to the current freq */
1014 	a6xx_gmu_set_initial_freq(gpu, gmu);
1015 
1016 out:
1017 	/* On failure, shut down the GMU to leave it in a good state */
1018 	if (ret) {
1019 		disable_irq(gmu->gmu_irq);
1020 		a6xx_rpmh_stop(gmu);
1021 		pm_runtime_put(gmu->gxpd);
1022 		pm_runtime_put(gmu->dev);
1023 	}
1024 
1025 	return ret;
1026 }
1027 
1028 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1029 {
1030 	u32 reg;
1031 
1032 	if (!gmu->initialized)
1033 		return true;
1034 
1035 	reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1036 
1037 	if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1038 		return false;
1039 
1040 	return true;
1041 }
1042 
1043 /* Gracefully try to shut down the GMU and by extension the GPU */
1044 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1045 {
1046 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1047 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1048 	u32 val;
1049 
1050 	/*
1051 	 * The GMU may still be in slumber unless the GPU started so check and
1052 	 * skip putting it back into slumber if so
1053 	 */
1054 	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1055 
1056 	if (val != 0xf) {
1057 		int ret = a6xx_gmu_wait_for_idle(gmu);
1058 
1059 		/* If the GMU isn't responding assume it is hung */
1060 		if (ret) {
1061 			a6xx_gmu_force_off(gmu);
1062 			return;
1063 		}
1064 
1065 		a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
1066 
1067 		/* tell the GMU we want to slumber */
1068 		ret = a6xx_gmu_notify_slumber(gmu);
1069 		if (ret) {
1070 			a6xx_gmu_force_off(gmu);
1071 			return;
1072 		}
1073 
1074 		ret = gmu_poll_timeout(gmu,
1075 			REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1076 			!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1077 			100, 10000);
1078 
1079 		/*
1080 		 * Let the user know we failed to slumber but don't worry too
1081 		 * much because we are powering down anyway
1082 		 */
1083 
1084 		if (ret)
1085 			DRM_DEV_ERROR(gmu->dev,
1086 				"Unable to slumber GMU: status = 0%x/0%x\n",
1087 				gmu_read(gmu,
1088 					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1089 				gmu_read(gmu,
1090 					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1091 	}
1092 
1093 	/* Turn off HFI */
1094 	a6xx_hfi_stop(gmu);
1095 
1096 	/* Stop the interrupts and mask the hardware */
1097 	a6xx_gmu_irq_disable(gmu);
1098 
1099 	/* Tell RPMh to power off the GPU */
1100 	a6xx_rpmh_stop(gmu);
1101 }
1102 
1103 
1104 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1105 {
1106 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1107 	struct msm_gpu *gpu = &a6xx_gpu->base.base;
1108 
1109 	if (!pm_runtime_active(gmu->dev))
1110 		return 0;
1111 
1112 	/*
1113 	 * Force the GMU off if we detected a hang, otherwise try to shut it
1114 	 * down gracefully
1115 	 */
1116 	if (gmu->hung)
1117 		a6xx_gmu_force_off(gmu);
1118 	else
1119 		a6xx_gmu_shutdown(gmu);
1120 
1121 	/* Remove the bus vote */
1122 	dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1123 
1124 	/*
1125 	 * Make sure the GX domain is off before turning off the GMU (CX)
1126 	 * domain. Usually the GMU does this but only if the shutdown sequence
1127 	 * was successful
1128 	 */
1129 	if (!IS_ERR_OR_NULL(gmu->gxpd))
1130 		pm_runtime_put_sync(gmu->gxpd);
1131 
1132 	clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1133 
1134 	pm_runtime_put_sync(gmu->dev);
1135 
1136 	return 0;
1137 }
1138 
1139 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1140 {
1141 	msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1142 	msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1143 	msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1144 	msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1145 	msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1146 	msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1147 
1148 	gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1149 	msm_gem_address_space_put(gmu->aspace);
1150 }
1151 
1152 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1153 		size_t size, u64 iova, const char *name)
1154 {
1155 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1156 	struct drm_device *dev = a6xx_gpu->base.base.dev;
1157 	uint32_t flags = MSM_BO_WC;
1158 	u64 range_start, range_end;
1159 	int ret;
1160 
1161 	size = PAGE_ALIGN(size);
1162 	if (!iova) {
1163 		/* no fixed address - use GMU's uncached range */
1164 		range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1165 		range_end = 0x80000000;
1166 	} else {
1167 		/* range for fixed address */
1168 		range_start = iova;
1169 		range_end = iova + size;
1170 		/* use IOMMU_PRIV for icache/dcache */
1171 		flags |= MSM_BO_MAP_PRIV;
1172 	}
1173 
1174 	bo->obj = msm_gem_new(dev, size, flags);
1175 	if (IS_ERR(bo->obj))
1176 		return PTR_ERR(bo->obj);
1177 
1178 	ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1179 					     range_start, range_end);
1180 	if (ret) {
1181 		drm_gem_object_put(bo->obj);
1182 		return ret;
1183 	}
1184 
1185 	bo->virt = msm_gem_get_vaddr(bo->obj);
1186 	bo->size = size;
1187 
1188 	msm_gem_object_set_name(bo->obj, name);
1189 
1190 	return 0;
1191 }
1192 
1193 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1194 {
1195 	struct msm_mmu *mmu;
1196 
1197 	mmu = msm_iommu_new(gmu->dev, 0);
1198 	if (!mmu)
1199 		return -ENODEV;
1200 	if (IS_ERR(mmu))
1201 		return PTR_ERR(mmu);
1202 
1203 	gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1204 	if (IS_ERR(gmu->aspace))
1205 		return PTR_ERR(gmu->aspace);
1206 
1207 	return 0;
1208 }
1209 
1210 /* Return the 'arc-level' for the given frequency */
1211 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1212 					   unsigned long freq)
1213 {
1214 	struct dev_pm_opp *opp;
1215 	unsigned int val;
1216 
1217 	if (!freq)
1218 		return 0;
1219 
1220 	opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1221 	if (IS_ERR(opp))
1222 		return 0;
1223 
1224 	val = dev_pm_opp_get_level(opp);
1225 
1226 	dev_pm_opp_put(opp);
1227 
1228 	return val;
1229 }
1230 
1231 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1232 		unsigned long *freqs, int freqs_count, const char *id)
1233 {
1234 	int i, j;
1235 	const u16 *pri, *sec;
1236 	size_t pri_count, sec_count;
1237 
1238 	pri = cmd_db_read_aux_data(id, &pri_count);
1239 	if (IS_ERR(pri))
1240 		return PTR_ERR(pri);
1241 	/*
1242 	 * The data comes back as an array of unsigned shorts so adjust the
1243 	 * count accordingly
1244 	 */
1245 	pri_count >>= 1;
1246 	if (!pri_count)
1247 		return -EINVAL;
1248 
1249 	sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1250 	if (IS_ERR(sec))
1251 		return PTR_ERR(sec);
1252 
1253 	sec_count >>= 1;
1254 	if (!sec_count)
1255 		return -EINVAL;
1256 
1257 	/* Construct a vote for each frequency */
1258 	for (i = 0; i < freqs_count; i++) {
1259 		u8 pindex = 0, sindex = 0;
1260 		unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1261 
1262 		/* Get the primary index that matches the arc level */
1263 		for (j = 0; j < pri_count; j++) {
1264 			if (pri[j] >= level) {
1265 				pindex = j;
1266 				break;
1267 			}
1268 		}
1269 
1270 		if (j == pri_count) {
1271 			DRM_DEV_ERROR(dev,
1272 				      "Level %u not found in the RPMh list\n",
1273 				      level);
1274 			DRM_DEV_ERROR(dev, "Available levels:\n");
1275 			for (j = 0; j < pri_count; j++)
1276 				DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
1277 
1278 			return -EINVAL;
1279 		}
1280 
1281 		/*
1282 		 * Look for a level in in the secondary list that matches. If
1283 		 * nothing fits, use the maximum non zero vote
1284 		 */
1285 
1286 		for (j = 0; j < sec_count; j++) {
1287 			if (sec[j] >= level) {
1288 				sindex = j;
1289 				break;
1290 			} else if (sec[j]) {
1291 				sindex = j;
1292 			}
1293 		}
1294 
1295 		/* Construct the vote */
1296 		votes[i] = ((pri[pindex] & 0xffff) << 16) |
1297 			(sindex << 8) | pindex;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 /*
1304  * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1305  * to construct the list of votes on the CPU and send it over. Query the RPMh
1306  * voltage levels and build the votes
1307  */
1308 
1309 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1310 {
1311 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1312 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1313 	struct msm_gpu *gpu = &adreno_gpu->base;
1314 	int ret;
1315 
1316 	/* Build the GX votes */
1317 	ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1318 		gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1319 
1320 	/* Build the CX votes */
1321 	ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1322 		gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1323 
1324 	return ret;
1325 }
1326 
1327 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1328 		u32 size)
1329 {
1330 	int count = dev_pm_opp_get_opp_count(dev);
1331 	struct dev_pm_opp *opp;
1332 	int i, index = 0;
1333 	unsigned long freq = 1;
1334 
1335 	/*
1336 	 * The OPP table doesn't contain the "off" frequency level so we need to
1337 	 * add 1 to the table size to account for it
1338 	 */
1339 
1340 	if (WARN(count + 1 > size,
1341 		"The GMU frequency table is being truncated\n"))
1342 		count = size - 1;
1343 
1344 	/* Set the "off" frequency */
1345 	freqs[index++] = 0;
1346 
1347 	for (i = 0; i < count; i++) {
1348 		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1349 		if (IS_ERR(opp))
1350 			break;
1351 
1352 		dev_pm_opp_put(opp);
1353 		freqs[index++] = freq++;
1354 	}
1355 
1356 	return index;
1357 }
1358 
1359 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1360 {
1361 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1362 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1363 	struct msm_gpu *gpu = &adreno_gpu->base;
1364 
1365 	int ret = 0;
1366 
1367 	/*
1368 	 * The GMU handles its own frequency switching so build a list of
1369 	 * available frequencies to send during initialization
1370 	 */
1371 	ret = devm_pm_opp_of_add_table(gmu->dev);
1372 	if (ret) {
1373 		DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1374 		return ret;
1375 	}
1376 
1377 	gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1378 		gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1379 
1380 	/*
1381 	 * The GMU also handles GPU frequency switching so build a list
1382 	 * from the GPU OPP table
1383 	 */
1384 	gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1385 		gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1386 
1387 	gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1388 
1389 	/* Build the list of RPMh votes that we'll send to the GMU */
1390 	return a6xx_gmu_rpmh_votes_init(gmu);
1391 }
1392 
1393 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1394 {
1395 	int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1396 
1397 	if (ret < 1)
1398 		return ret;
1399 
1400 	gmu->nr_clocks = ret;
1401 
1402 	gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1403 		gmu->nr_clocks, "gmu");
1404 
1405 	gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1406 		gmu->nr_clocks, "hub");
1407 
1408 	return 0;
1409 }
1410 
1411 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1412 		const char *name)
1413 {
1414 	void __iomem *ret;
1415 	struct resource *res = platform_get_resource_byname(pdev,
1416 			IORESOURCE_MEM, name);
1417 
1418 	if (!res) {
1419 		DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1420 		return ERR_PTR(-EINVAL);
1421 	}
1422 
1423 	ret = ioremap(res->start, resource_size(res));
1424 	if (!ret) {
1425 		DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1426 		return ERR_PTR(-EINVAL);
1427 	}
1428 
1429 	return ret;
1430 }
1431 
1432 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1433 		const char *name, irq_handler_t handler)
1434 {
1435 	int irq, ret;
1436 
1437 	irq = platform_get_irq_byname(pdev, name);
1438 
1439 	ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1440 	if (ret) {
1441 		DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1442 			      name, ret);
1443 		return ret;
1444 	}
1445 
1446 	disable_irq(irq);
1447 
1448 	return irq;
1449 }
1450 
1451 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1452 {
1453 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1454 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1455 	struct platform_device *pdev = to_platform_device(gmu->dev);
1456 
1457 	mutex_lock(&gmu->lock);
1458 	if (!gmu->initialized) {
1459 		mutex_unlock(&gmu->lock);
1460 		return;
1461 	}
1462 
1463 	gmu->initialized = false;
1464 
1465 	mutex_unlock(&gmu->lock);
1466 
1467 	pm_runtime_force_suspend(gmu->dev);
1468 
1469 	/*
1470 	 * Since cxpd is a virt device, the devlink with gmu-dev will be removed
1471 	 * automatically when we do detach
1472 	 */
1473 	dev_pm_domain_detach(gmu->cxpd, false);
1474 
1475 	if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1476 		pm_runtime_disable(gmu->gxpd);
1477 		dev_pm_domain_detach(gmu->gxpd, false);
1478 	}
1479 
1480 	iounmap(gmu->mmio);
1481 	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1482 		iounmap(gmu->rscc);
1483 	gmu->mmio = NULL;
1484 	gmu->rscc = NULL;
1485 
1486 	if (!adreno_has_gmu_wrapper(adreno_gpu)) {
1487 		a6xx_gmu_memory_free(gmu);
1488 
1489 		free_irq(gmu->gmu_irq, gmu);
1490 		free_irq(gmu->hfi_irq, gmu);
1491 	}
1492 
1493 	/* Drop reference taken in of_find_device_by_node */
1494 	put_device(gmu->dev);
1495 }
1496 
1497 static int cxpd_notifier_cb(struct notifier_block *nb,
1498 			unsigned long action, void *data)
1499 {
1500 	struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
1501 
1502 	if (action == GENPD_NOTIFY_OFF)
1503 		complete_all(&gmu->pd_gate);
1504 
1505 	return 0;
1506 }
1507 
1508 int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1509 {
1510 	struct platform_device *pdev = of_find_device_by_node(node);
1511 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1512 	int ret;
1513 
1514 	if (!pdev)
1515 		return -ENODEV;
1516 
1517 	gmu->dev = &pdev->dev;
1518 
1519 	of_dma_configure(gmu->dev, node, true);
1520 
1521 	pm_runtime_enable(gmu->dev);
1522 
1523 	/* Mark legacy for manual SPTPRAC control */
1524 	gmu->legacy = true;
1525 
1526 	/* Map the GMU registers */
1527 	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1528 	if (IS_ERR(gmu->mmio)) {
1529 		ret = PTR_ERR(gmu->mmio);
1530 		goto err_mmio;
1531 	}
1532 
1533 	gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1534 	if (IS_ERR(gmu->cxpd)) {
1535 		ret = PTR_ERR(gmu->cxpd);
1536 		goto err_mmio;
1537 	}
1538 
1539 	if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
1540 		ret = -ENODEV;
1541 		goto detach_cxpd;
1542 	}
1543 
1544 	init_completion(&gmu->pd_gate);
1545 	complete_all(&gmu->pd_gate);
1546 	gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1547 
1548 	/* Get a link to the GX power domain to reset the GPU */
1549 	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1550 	if (IS_ERR(gmu->gxpd)) {
1551 		ret = PTR_ERR(gmu->gxpd);
1552 		goto err_mmio;
1553 	}
1554 
1555 	gmu->initialized = true;
1556 
1557 	return 0;
1558 
1559 detach_cxpd:
1560 	dev_pm_domain_detach(gmu->cxpd, false);
1561 
1562 err_mmio:
1563 	iounmap(gmu->mmio);
1564 
1565 	/* Drop reference taken in of_find_device_by_node */
1566 	put_device(gmu->dev);
1567 
1568 	return ret;
1569 }
1570 
1571 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1572 {
1573 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1574 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1575 	struct platform_device *pdev = of_find_device_by_node(node);
1576 	int ret;
1577 
1578 	if (!pdev)
1579 		return -ENODEV;
1580 
1581 	gmu->dev = &pdev->dev;
1582 
1583 	of_dma_configure(gmu->dev, node, true);
1584 
1585 	/* Fow now, don't do anything fancy until we get our feet under us */
1586 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1587 
1588 	pm_runtime_enable(gmu->dev);
1589 
1590 	/* Get the list of clocks */
1591 	ret = a6xx_gmu_clocks_probe(gmu);
1592 	if (ret)
1593 		goto err_put_device;
1594 
1595 	ret = a6xx_gmu_memory_probe(gmu);
1596 	if (ret)
1597 		goto err_put_device;
1598 
1599 
1600 	/* A660 now requires handling "prealloc requests" in GMU firmware
1601 	 * For now just hardcode allocations based on the known firmware.
1602 	 * note: there is no indication that these correspond to "dummy" or
1603 	 * "debug" regions, but this "guess" allows reusing these BOs which
1604 	 * are otherwise unused by a660.
1605 	 */
1606 	gmu->dummy.size = SZ_4K;
1607 	if (adreno_is_a660_family(adreno_gpu)) {
1608 		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1609 					    0x60400000, "debug");
1610 		if (ret)
1611 			goto err_memory;
1612 
1613 		gmu->dummy.size = SZ_8K;
1614 	}
1615 
1616 	/* Allocate memory for the GMU dummy page */
1617 	ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1618 				    0x60000000, "dummy");
1619 	if (ret)
1620 		goto err_memory;
1621 
1622 	/* Note that a650 family also includes a660 family: */
1623 	if (adreno_is_a650_family(adreno_gpu)) {
1624 		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1625 			SZ_16M - SZ_16K, 0x04000, "icache");
1626 		if (ret)
1627 			goto err_memory;
1628 	/*
1629 	 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
1630 	 * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
1631 	 * necessary. If you omit this step and you don't get random pagefaults, you are likely
1632 	 * good to go without this!
1633 	 */
1634 	} else if (adreno_is_a640_family(adreno_gpu)) {
1635 		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1636 			SZ_256K - SZ_16K, 0x04000, "icache");
1637 		if (ret)
1638 			goto err_memory;
1639 
1640 		ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1641 			SZ_256K - SZ_16K, 0x44000, "dcache");
1642 		if (ret)
1643 			goto err_memory;
1644 	} else if (adreno_is_a630_family(adreno_gpu)) {
1645 		/* HFI v1, has sptprac */
1646 		gmu->legacy = true;
1647 
1648 		/* Allocate memory for the GMU debug region */
1649 		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
1650 		if (ret)
1651 			goto err_memory;
1652 	}
1653 
1654 	/* Allocate memory for the GMU log region */
1655 	ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
1656 	if (ret)
1657 		goto err_memory;
1658 
1659 	/* Allocate memory for for the HFI queues */
1660 	ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
1661 	if (ret)
1662 		goto err_memory;
1663 
1664 	/* Map the GMU registers */
1665 	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1666 	if (IS_ERR(gmu->mmio)) {
1667 		ret = PTR_ERR(gmu->mmio);
1668 		goto err_memory;
1669 	}
1670 
1671 	if (adreno_is_a650_family(adreno_gpu)) {
1672 		gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1673 		if (IS_ERR(gmu->rscc)) {
1674 			ret = -ENODEV;
1675 			goto err_mmio;
1676 		}
1677 	} else {
1678 		gmu->rscc = gmu->mmio + 0x23000;
1679 	}
1680 
1681 	/* Get the HFI and GMU interrupts */
1682 	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1683 	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1684 
1685 	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
1686 		ret = -ENODEV;
1687 		goto err_mmio;
1688 	}
1689 
1690 	gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1691 	if (IS_ERR(gmu->cxpd)) {
1692 		ret = PTR_ERR(gmu->cxpd);
1693 		goto err_mmio;
1694 	}
1695 
1696 	if (!device_link_add(gmu->dev, gmu->cxpd,
1697 					DL_FLAG_PM_RUNTIME)) {
1698 		ret = -ENODEV;
1699 		goto detach_cxpd;
1700 	}
1701 
1702 	init_completion(&gmu->pd_gate);
1703 	complete_all(&gmu->pd_gate);
1704 	gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1705 
1706 	/*
1707 	 * Get a link to the GX power domain to reset the GPU in case of GMU
1708 	 * crash
1709 	 */
1710 	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1711 
1712 	/* Get the power levels for the GMU and GPU */
1713 	a6xx_gmu_pwrlevels_probe(gmu);
1714 
1715 	/* Set up the HFI queues */
1716 	a6xx_hfi_init(gmu);
1717 
1718 	/* Initialize RPMh */
1719 	a6xx_gmu_rpmh_init(gmu);
1720 
1721 	gmu->initialized = true;
1722 
1723 	return 0;
1724 
1725 detach_cxpd:
1726 	dev_pm_domain_detach(gmu->cxpd, false);
1727 
1728 err_mmio:
1729 	iounmap(gmu->mmio);
1730 	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1731 		iounmap(gmu->rscc);
1732 	free_irq(gmu->gmu_irq, gmu);
1733 	free_irq(gmu->hfi_irq, gmu);
1734 
1735 err_memory:
1736 	a6xx_gmu_memory_free(gmu);
1737 err_put_device:
1738 	/* Drop reference taken in of_find_device_by_node */
1739 	put_device(gmu->dev);
1740 
1741 	return ret;
1742 }
1743