xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a6xx_gmu.c (revision 32aa27e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
3 
4 #include <linux/clk.h>
5 #include <linux/iopoll.h>
6 #include <linux/pm_opp.h>
7 #include <soc/qcom/cmd-db.h>
8 
9 #include "a6xx_gpu.h"
10 #include "a6xx_gmu.xml.h"
11 
12 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
13 {
14 	struct a6xx_gmu *gmu = data;
15 	u32 status;
16 
17 	status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
18 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
19 
20 	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
21 		dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
22 
23 		/* Temporary until we can recover safely */
24 		BUG();
25 	}
26 
27 	if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
28 		dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
29 
30 	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
31 		dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
32 			gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
33 
34 	return IRQ_HANDLED;
35 }
36 
37 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
38 {
39 	struct a6xx_gmu *gmu = data;
40 	u32 status;
41 
42 	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
43 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
44 
45 	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
46 		tasklet_schedule(&gmu->hfi_tasklet);
47 
48 	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
49 		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
50 
51 		/* Temporary until we can recover safely */
52 		BUG();
53 	}
54 
55 	return IRQ_HANDLED;
56 }
57 
58 /* Check to see if the GX rail is still powered */
59 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
60 {
61 	u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
62 
63 	return !(val &
64 		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
65 		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
66 }
67 
68 static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
69 {
70 	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
71 
72 	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
73 		((3 & 0xf) << 28) | index);
74 
75 	/*
76 	 * Send an invalid index as a vote for the bus bandwidth and let the
77 	 * firmware decide on the right vote
78 	 */
79 	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
80 
81 	/* Set and clear the OOB for DCVS to trigger the GMU */
82 	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
83 	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
84 
85 	return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
86 }
87 
88 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
89 {
90 	u32 val;
91 	int local = gmu->idle_level;
92 
93 	/* SPTP and IFPC both report as IFPC */
94 	if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
95 		local = GMU_IDLE_STATE_IFPC;
96 
97 	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
98 
99 	if (val == local) {
100 		if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
101 			!a6xx_gmu_gx_is_on(gmu))
102 			return true;
103 	}
104 
105 	return false;
106 }
107 
108 /* Wait for the GMU to get to its most idle state */
109 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
110 {
111 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
112 
113 	return spin_until(a6xx_gmu_check_idle_level(gmu));
114 }
115 
116 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
117 {
118 	int ret;
119 	u32 val;
120 
121 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
122 	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
123 
124 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
125 		val == 0xbabeface, 100, 10000);
126 
127 	if (ret)
128 		dev_err(gmu->dev, "GMU firmware initialization timed out\n");
129 
130 	return ret;
131 }
132 
133 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
134 {
135 	u32 val;
136 	int ret;
137 
138 	gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
139 		A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
140 
141 	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
142 
143 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
144 		val & 1, 100, 10000);
145 	if (ret)
146 		dev_err(gmu->dev, "Unable to start the HFI queues\n");
147 
148 	return ret;
149 }
150 
151 /* Trigger a OOB (out of band) request to the GMU */
152 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
153 {
154 	int ret;
155 	u32 val;
156 	int request, ack;
157 	const char *name;
158 
159 	switch (state) {
160 	case GMU_OOB_GPU_SET:
161 		request = GMU_OOB_GPU_SET_REQUEST;
162 		ack = GMU_OOB_GPU_SET_ACK;
163 		name = "GPU_SET";
164 		break;
165 	case GMU_OOB_BOOT_SLUMBER:
166 		request = GMU_OOB_BOOT_SLUMBER_REQUEST;
167 		ack = GMU_OOB_BOOT_SLUMBER_ACK;
168 		name = "BOOT_SLUMBER";
169 		break;
170 	case GMU_OOB_DCVS_SET:
171 		request = GMU_OOB_DCVS_REQUEST;
172 		ack = GMU_OOB_DCVS_ACK;
173 		name = "GPU_DCVS";
174 		break;
175 	default:
176 		return -EINVAL;
177 	}
178 
179 	/* Trigger the equested OOB operation */
180 	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
181 
182 	/* Wait for the acknowledge interrupt */
183 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
184 		val & (1 << ack), 100, 10000);
185 
186 	if (ret)
187 		dev_err(gmu->dev,
188 			"Timeout waiting for GMU OOB set %s: 0x%x\n",
189 				name,
190 				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
191 
192 	/* Clear the acknowledge interrupt */
193 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
194 
195 	return ret;
196 }
197 
198 /* Clear a pending OOB state in the GMU */
199 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
200 {
201 	switch (state) {
202 	case GMU_OOB_GPU_SET:
203 		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
204 			1 << GMU_OOB_GPU_SET_CLEAR);
205 		break;
206 	case GMU_OOB_BOOT_SLUMBER:
207 		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
208 			1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
209 		break;
210 	case GMU_OOB_DCVS_SET:
211 		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
212 			1 << GMU_OOB_DCVS_CLEAR);
213 		break;
214 	}
215 }
216 
217 /* Enable CPU control of SPTP power power collapse */
218 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
219 {
220 	int ret;
221 	u32 val;
222 
223 	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
224 
225 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
226 		(val & 0x38) == 0x28, 1, 100);
227 
228 	if (ret) {
229 		dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
230 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
231 	}
232 
233 	return 0;
234 }
235 
236 /* Disable CPU control of SPTP power power collapse */
237 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
238 {
239 	u32 val;
240 	int ret;
241 
242 	/* Make sure retention is on */
243 	gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
244 
245 	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
246 
247 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
248 		(val & 0x04), 100, 10000);
249 
250 	if (ret)
251 		dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
252 			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
253 }
254 
255 /* Let the GMU know we are starting a boot sequence */
256 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
257 {
258 	u32 vote;
259 
260 	/* Let the GMU know we are getting ready for boot */
261 	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
262 
263 	/* Choose the "default" power level as the highest available */
264 	vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
265 
266 	gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
267 	gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
268 
269 	/* Let the GMU know the boot sequence has started */
270 	return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
271 }
272 
273 /* Let the GMU know that we are about to go into slumber */
274 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
275 {
276 	int ret;
277 
278 	/* Disable the power counter so the GMU isn't busy */
279 	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
280 
281 	/* Disable SPTP_PC if the CPU is responsible for it */
282 	if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
283 		a6xx_sptprac_disable(gmu);
284 
285 	/* Tell the GMU to get ready to slumber */
286 	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
287 
288 	ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
289 	a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
290 
291 	if (!ret) {
292 		/* Check to see if the GMU really did slumber */
293 		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
294 			!= 0x0f) {
295 			dev_err(gmu->dev, "The GMU did not go into slumber\n");
296 			ret = -ETIMEDOUT;
297 		}
298 	}
299 
300 	/* Put fence into allow mode */
301 	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
302 	return ret;
303 }
304 
305 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
306 {
307 	int ret;
308 	u32 val;
309 
310 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
311 	/* Wait for the register to finish posting */
312 	wmb();
313 
314 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
315 		val & (1 << 1), 100, 10000);
316 	if (ret) {
317 		dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
318 		return ret;
319 	}
320 
321 	ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
322 		!val, 100, 10000);
323 
324 	if (!ret) {
325 		gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
326 
327 		/* Re-enable the power counter */
328 		gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
329 		return 0;
330 	}
331 
332 	dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
333 	return ret;
334 }
335 
336 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
337 {
338 	int ret;
339 	u32 val;
340 
341 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
342 
343 	ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
344 		val, val & (1 << 16), 100, 10000);
345 	if (ret)
346 		dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
347 
348 	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
349 }
350 
351 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
352 {
353 	return msm_writel(value, ptr + (offset << 2));
354 }
355 
356 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
357 		const char *name);
358 
359 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
360 {
361 	struct platform_device *pdev = to_platform_device(gmu->dev);
362 	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
363 	void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
364 
365 	if (!pdcptr || !seqptr)
366 		goto err;
367 
368 	/* Disable SDE clock gating */
369 	gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
370 
371 	/* Setup RSC PDC handshake for sleep and wakeup */
372 	gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
373 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
374 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
375 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
376 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
377 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
378 	gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
379 	gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
380 	gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
381 	gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
382 	gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
383 
384 	/* Load RSC sequencer uCode for sleep and wakeup */
385 	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
386 	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
387 	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
388 	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
389 	gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
390 
391 	/* Load PDC sequencer uCode for power up and power down sequence */
392 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
393 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
394 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
395 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
396 	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
397 
398 	/* Set TCS commands used by PDC sequence for low power modes */
399 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
400 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
401 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
402 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
403 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
404 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
405 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
406 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
407 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
408 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
409 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
410 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
411 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
412 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
413 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
414 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
415 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
416 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
417 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
418 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
419 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
420 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
421 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
422 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
423 
424 	/* Setup GPU PDC */
425 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
426 	pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
427 
428 	/* ensure no writes happen before the uCode is fully written */
429 	wmb();
430 
431 err:
432 	devm_iounmap(gmu->dev, pdcptr);
433 	devm_iounmap(gmu->dev, seqptr);
434 }
435 
436 /*
437  * The lowest 16 bits of this value are the number of XO clock cycles for main
438  * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
439  * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
440  */
441 
442 #define GMU_PWR_COL_HYST 0x000a1680
443 
444 /* Set up the idle state for the GMU */
445 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
446 {
447 	/* Disable GMU WB/RB buffer */
448 	gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
449 
450 	gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
451 
452 	switch (gmu->idle_level) {
453 	case GMU_IDLE_STATE_IFPC:
454 		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
455 			GMU_PWR_COL_HYST);
456 		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
457 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
458 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
459 		/* Fall through */
460 	case GMU_IDLE_STATE_SPTP:
461 		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
462 			GMU_PWR_COL_HYST);
463 		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
464 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
465 			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
466 	}
467 
468 	/* Enable RPMh GPU client */
469 	gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
470 		A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
471 		A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
472 		A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
473 		A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
474 		A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
475 		A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
476 }
477 
478 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
479 {
480 	static bool rpmh_init;
481 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
482 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
483 	int i, ret;
484 	u32 chipid;
485 	u32 *image;
486 
487 	if (state == GMU_WARM_BOOT) {
488 		ret = a6xx_rpmh_start(gmu);
489 		if (ret)
490 			return ret;
491 	} else {
492 		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
493 			"GMU firmware is not loaded\n"))
494 			return -ENOENT;
495 
496 		/* Sanity check the size of the firmware that was loaded */
497 		if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
498 			dev_err(gmu->dev,
499 				"GMU firmware is bigger than the available region\n");
500 			return -EINVAL;
501 		}
502 
503 		/* Turn on register retention */
504 		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
505 
506 		/* We only need to load the RPMh microcode once */
507 		if (!rpmh_init) {
508 			a6xx_gmu_rpmh_init(gmu);
509 			rpmh_init = true;
510 		} else if (state != GMU_RESET) {
511 			ret = a6xx_rpmh_start(gmu);
512 			if (ret)
513 				return ret;
514 		}
515 
516 		image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
517 
518 		for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
519 			gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
520 				image[i]);
521 	}
522 
523 	gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
524 	gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
525 
526 	/* Write the iova of the HFI table */
527 	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
528 	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
529 
530 	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
531 		(1 << 31) | (0xa << 18) | (0xa0));
532 
533 	chipid = adreno_gpu->rev.core << 24;
534 	chipid |= adreno_gpu->rev.major << 16;
535 	chipid |= adreno_gpu->rev.minor << 12;
536 	chipid |= adreno_gpu->rev.patchid << 8;
537 
538 	gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
539 
540 	/* Set up the lowest idle level on the GMU */
541 	a6xx_gmu_power_config(gmu);
542 
543 	ret = a6xx_gmu_start(gmu);
544 	if (ret)
545 		return ret;
546 
547 	ret = a6xx_gmu_gfx_rail_on(gmu);
548 	if (ret)
549 		return ret;
550 
551 	/* Enable SPTP_PC if the CPU is responsible for it */
552 	if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
553 		ret = a6xx_sptprac_enable(gmu);
554 		if (ret)
555 			return ret;
556 	}
557 
558 	ret = a6xx_gmu_hfi_start(gmu);
559 	if (ret)
560 		return ret;
561 
562 	/* FIXME: Do we need this wmb() here? */
563 	wmb();
564 
565 	return 0;
566 }
567 
568 #define A6XX_HFI_IRQ_MASK \
569 	(A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
570 	 A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
571 
572 #define A6XX_GMU_IRQ_MASK \
573 	(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
574 	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
575 	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
576 
577 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
578 {
579 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
580 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
581 
582 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
583 		~A6XX_GMU_IRQ_MASK);
584 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
585 		~A6XX_HFI_IRQ_MASK);
586 
587 	enable_irq(gmu->gmu_irq);
588 	enable_irq(gmu->hfi_irq);
589 }
590 
591 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
592 {
593 	disable_irq(gmu->gmu_irq);
594 	disable_irq(gmu->hfi_irq);
595 
596 	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
597 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
598 }
599 
600 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
601 {
602 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
603 	int ret;
604 	u32 val;
605 
606 	/* Flush all the queues */
607 	a6xx_hfi_stop(gmu);
608 
609 	/* Stop the interrupts */
610 	a6xx_gmu_irq_disable(gmu);
611 
612 	/* Force off SPTP in case the GMU is managing it */
613 	a6xx_sptprac_disable(gmu);
614 
615 	/* Make sure there are no outstanding RPMh votes */
616 	gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
617 		(val & 1), 100, 10000);
618 	gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
619 		(val & 1), 100, 10000);
620 	gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
621 		(val & 1), 100, 10000);
622 	gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
623 		(val & 1), 100, 1000);
624 
625 	/* Force off the GX GSDC */
626 	regulator_force_disable(gmu->gx);
627 
628 	/* Disable the resources */
629 	clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
630 	pm_runtime_put_sync(gmu->dev);
631 
632 	/* Re-enable the resources */
633 	pm_runtime_get_sync(gmu->dev);
634 
635 	/* Use a known rate to bring up the GMU */
636 	clk_set_rate(gmu->core_clk, 200000000);
637 	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
638 	if (ret)
639 		goto out;
640 
641 	a6xx_gmu_irq_enable(gmu);
642 
643 	ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
644 	if (!ret)
645 		ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
646 
647 	/* Set the GPU back to the highest power frequency */
648 	a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
649 
650 out:
651 	if (ret)
652 		a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
653 
654 	return ret;
655 }
656 
657 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
658 {
659 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
660 	int status, ret;
661 
662 	if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
663 		return 0;
664 
665 	/* Turn on the resources */
666 	pm_runtime_get_sync(gmu->dev);
667 
668 	/* Use a known rate to bring up the GMU */
669 	clk_set_rate(gmu->core_clk, 200000000);
670 	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
671 	if (ret)
672 		goto out;
673 
674 	a6xx_gmu_irq_enable(gmu);
675 
676 	/* Check to see if we are doing a cold or warm boot */
677 	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
678 		GMU_WARM_BOOT : GMU_COLD_BOOT;
679 
680 	ret = a6xx_gmu_fw_start(gmu, status);
681 	if (ret)
682 		goto out;
683 
684 	ret = a6xx_hfi_start(gmu, status);
685 
686 	/* Set the GPU to the highest power frequency */
687 	a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
688 
689 out:
690 	/* Make sure to turn off the boot OOB request on error */
691 	if (ret)
692 		a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
693 
694 	return ret;
695 }
696 
697 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
698 {
699 	u32 reg;
700 
701 	if (!gmu->mmio)
702 		return true;
703 
704 	reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
705 
706 	if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
707 		return false;
708 
709 	return true;
710 }
711 
712 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
713 {
714 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
715 	u32 val;
716 
717 	/*
718 	 * The GMU may still be in slumber unless the GPU started so check and
719 	 * skip putting it back into slumber if so
720 	 */
721 	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
722 
723 	if (val != 0xf) {
724 		int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
725 
726 		/* Temporary until we can recover safely */
727 		BUG_ON(ret);
728 
729 		/* tell the GMU we want to slumber */
730 		a6xx_gmu_notify_slumber(gmu);
731 
732 		ret = gmu_poll_timeout(gmu,
733 			REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
734 			!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
735 			100, 10000);
736 
737 		/*
738 		 * Let the user know we failed to slumber but don't worry too
739 		 * much because we are powering down anyway
740 		 */
741 
742 		if (ret)
743 			dev_err(gmu->dev,
744 				"Unable to slumber GMU: status = 0%x/0%x\n",
745 				gmu_read(gmu,
746 					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
747 				gmu_read(gmu,
748 					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
749 	}
750 
751 	/* Turn off HFI */
752 	a6xx_hfi_stop(gmu);
753 
754 	/* Stop the interrupts and mask the hardware */
755 	a6xx_gmu_irq_disable(gmu);
756 
757 	/* Tell RPMh to power off the GPU */
758 	a6xx_rpmh_stop(gmu);
759 
760 	clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
761 
762 	pm_runtime_put_sync(gmu->dev);
763 
764 	return 0;
765 }
766 
767 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
768 {
769 	int count, i;
770 	u64 iova;
771 
772 	if (IS_ERR_OR_NULL(bo))
773 		return;
774 
775 	count = bo->size >> PAGE_SHIFT;
776 	iova = bo->iova;
777 
778 	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
779 		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
780 		__free_pages(bo->pages[i], 0);
781 	}
782 
783 	kfree(bo->pages);
784 	kfree(bo);
785 }
786 
787 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
788 		size_t size)
789 {
790 	struct a6xx_gmu_bo *bo;
791 	int ret, count, i;
792 
793 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
794 	if (!bo)
795 		return ERR_PTR(-ENOMEM);
796 
797 	bo->size = PAGE_ALIGN(size);
798 
799 	count = bo->size >> PAGE_SHIFT;
800 
801 	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
802 	if (!bo->pages) {
803 		kfree(bo);
804 		return ERR_PTR(-ENOMEM);
805 	}
806 
807 	for (i = 0; i < count; i++) {
808 		bo->pages[i] = alloc_page(GFP_KERNEL);
809 		if (!bo->pages[i])
810 			goto err;
811 	}
812 
813 	bo->iova = gmu->uncached_iova_base;
814 
815 	for (i = 0; i < count; i++) {
816 		ret = iommu_map(gmu->domain,
817 			bo->iova + (PAGE_SIZE * i),
818 			page_to_phys(bo->pages[i]), PAGE_SIZE,
819 			IOMMU_READ | IOMMU_WRITE);
820 
821 		if (ret) {
822 			dev_err(gmu->dev, "Unable to map GMU buffer object\n");
823 
824 			for (i = i - 1 ; i >= 0; i--)
825 				iommu_unmap(gmu->domain,
826 					bo->iova + (PAGE_SIZE * i),
827 					PAGE_SIZE);
828 
829 			goto err;
830 		}
831 	}
832 
833 	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
834 		pgprot_writecombine(PAGE_KERNEL));
835 	if (!bo->virt)
836 		goto err;
837 
838 	/* Align future IOVA addresses on 1MB boundaries */
839 	gmu->uncached_iova_base += ALIGN(size, SZ_1M);
840 
841 	return bo;
842 
843 err:
844 	for (i = 0; i < count; i++) {
845 		if (bo->pages[i])
846 			__free_pages(bo->pages[i], 0);
847 	}
848 
849 	kfree(bo->pages);
850 	kfree(bo);
851 
852 	return ERR_PTR(-ENOMEM);
853 }
854 
855 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
856 {
857 	int ret;
858 
859 	/*
860 	 * The GMU address space is hardcoded to treat the range
861 	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
862 	 * between the GMU and the CPU will live in this space
863 	 */
864 	gmu->uncached_iova_base = 0x60000000;
865 
866 
867 	gmu->domain = iommu_domain_alloc(&platform_bus_type);
868 	if (!gmu->domain)
869 		return -ENODEV;
870 
871 	ret = iommu_attach_device(gmu->domain, gmu->dev);
872 
873 	if (ret) {
874 		iommu_domain_free(gmu->domain);
875 		gmu->domain = NULL;
876 	}
877 
878 	return ret;
879 }
880 
881 /* Get the list of RPMh voltage levels from cmd-db */
882 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
883 {
884 	u32 len = cmd_db_read_aux_data_len(id);
885 
886 	if (!len)
887 		return 0;
888 
889 	if (WARN_ON(len > size))
890 		return -EINVAL;
891 
892 	cmd_db_read_aux_data(id, vals, len);
893 
894 	/*
895 	 * The data comes back as an array of unsigned shorts so adjust the
896 	 * count accordingly
897 	 */
898 	return len >> 1;
899 }
900 
901 /* Return the 'arc-level' for the given frequency */
902 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
903 {
904 	struct dev_pm_opp *opp;
905 	struct device_node *np;
906 	u32 val = 0;
907 
908 	if (!freq)
909 		return 0;
910 
911 	opp  = dev_pm_opp_find_freq_exact(dev, freq, true);
912 	if (IS_ERR(opp))
913 		return 0;
914 
915 	np = dev_pm_opp_get_of_node(opp);
916 
917 	if (np) {
918 		of_property_read_u32(np, "qcom,level", &val);
919 		of_node_put(np);
920 	}
921 
922 	dev_pm_opp_put(opp);
923 
924 	return val;
925 }
926 
927 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
928 		unsigned long *freqs, int freqs_count,
929 		u16 *pri, int pri_count,
930 		u16 *sec, int sec_count)
931 {
932 	int i, j;
933 
934 	/* Construct a vote for each frequency */
935 	for (i = 0; i < freqs_count; i++) {
936 		u8 pindex = 0, sindex = 0;
937 		u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
938 
939 		/* Get the primary index that matches the arc level */
940 		for (j = 0; j < pri_count; j++) {
941 			if (pri[j] >= level) {
942 				pindex = j;
943 				break;
944 			}
945 		}
946 
947 		if (j == pri_count) {
948 			dev_err(dev,
949 				"Level %u not found in in the RPMh list\n",
950 					level);
951 			dev_err(dev, "Available levels:\n");
952 			for (j = 0; j < pri_count; j++)
953 				dev_err(dev, "  %u\n", pri[j]);
954 
955 			return -EINVAL;
956 		}
957 
958 		/*
959 		 * Look for a level in in the secondary list that matches. If
960 		 * nothing fits, use the maximum non zero vote
961 		 */
962 
963 		for (j = 0; j < sec_count; j++) {
964 			if (sec[j] >= level) {
965 				sindex = j;
966 				break;
967 			} else if (sec[j]) {
968 				sindex = j;
969 			}
970 		}
971 
972 		/* Construct the vote */
973 		votes[i] = ((pri[pindex] & 0xffff) << 16) |
974 			(sindex << 8) | pindex;
975 	}
976 
977 	return 0;
978 }
979 
980 /*
981  * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
982  * to construct the list of votes on the CPU and send it over. Query the RPMh
983  * voltage levels and build the votes
984  */
985 
986 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
987 {
988 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
989 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
990 	struct msm_gpu *gpu = &adreno_gpu->base;
991 
992 	u16 gx[16], cx[16], mx[16];
993 	u32 gxcount, cxcount, mxcount;
994 	int ret;
995 
996 	/* Get the list of available voltage levels for each component */
997 	gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
998 	cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
999 	mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
1000 
1001 	/* Build the GX votes */
1002 	ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1003 		gmu->gpu_freqs, gmu->nr_gpu_freqs,
1004 		gx, gxcount, mx, mxcount);
1005 
1006 	/* Build the CX votes */
1007 	ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1008 		gmu->gmu_freqs, gmu->nr_gmu_freqs,
1009 		cx, cxcount, mx, mxcount);
1010 
1011 	return ret;
1012 }
1013 
1014 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1015 		u32 size)
1016 {
1017 	int count = dev_pm_opp_get_opp_count(dev);
1018 	struct dev_pm_opp *opp;
1019 	int i, index = 0;
1020 	unsigned long freq = 1;
1021 
1022 	/*
1023 	 * The OPP table doesn't contain the "off" frequency level so we need to
1024 	 * add 1 to the table size to account for it
1025 	 */
1026 
1027 	if (WARN(count + 1 > size,
1028 		"The GMU frequency table is being truncated\n"))
1029 		count = size - 1;
1030 
1031 	/* Set the "off" frequency */
1032 	freqs[index++] = 0;
1033 
1034 	for (i = 0; i < count; i++) {
1035 		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1036 		if (IS_ERR(opp))
1037 			break;
1038 
1039 		dev_pm_opp_put(opp);
1040 		freqs[index++] = freq++;
1041 	}
1042 
1043 	return index;
1044 }
1045 
1046 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1047 {
1048 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1049 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1050 	struct msm_gpu *gpu = &adreno_gpu->base;
1051 
1052 	int ret = 0;
1053 
1054 	/*
1055 	 * The GMU handles its own frequency switching so build a list of
1056 	 * available frequencies to send during initialization
1057 	 */
1058 	ret = dev_pm_opp_of_add_table(gmu->dev);
1059 	if (ret) {
1060 		dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
1061 		return ret;
1062 	}
1063 
1064 	gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1065 		gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1066 
1067 	/*
1068 	 * The GMU also handles GPU frequency switching so build a list
1069 	 * from the GPU OPP table
1070 	 */
1071 	gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1072 		gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1073 
1074 	/* Build the list of RPMh votes that we'll send to the GMU */
1075 	return a6xx_gmu_rpmh_votes_init(gmu);
1076 }
1077 
1078 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1079 {
1080 	int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
1081 
1082 	if (ret < 1)
1083 		return ret;
1084 
1085 	gmu->nr_clocks = ret;
1086 
1087 	gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1088 		gmu->nr_clocks, "gmu");
1089 
1090 	return 0;
1091 }
1092 
1093 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1094 		const char *name)
1095 {
1096 	void __iomem *ret;
1097 	struct resource *res = platform_get_resource_byname(pdev,
1098 			IORESOURCE_MEM, name);
1099 
1100 	if (!res) {
1101 		dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
1102 		return ERR_PTR(-EINVAL);
1103 	}
1104 
1105 	ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1106 	if (!ret) {
1107 		dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
1108 		return ERR_PTR(-EINVAL);
1109 	}
1110 
1111 	return ret;
1112 }
1113 
1114 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1115 		const char *name, irq_handler_t handler)
1116 {
1117 	int irq, ret;
1118 
1119 	irq = platform_get_irq_byname(pdev, name);
1120 
1121 	ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
1122 		name, gmu);
1123 	if (ret) {
1124 		dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
1125 		return ret;
1126 	}
1127 
1128 	disable_irq(irq);
1129 
1130 	return irq;
1131 }
1132 
1133 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1134 {
1135 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1136 
1137 	if (IS_ERR_OR_NULL(gmu->mmio))
1138 		return;
1139 
1140 	pm_runtime_disable(gmu->dev);
1141 	a6xx_gmu_stop(a6xx_gpu);
1142 
1143 	a6xx_gmu_irq_disable(gmu);
1144 	a6xx_gmu_memory_free(gmu, gmu->hfi);
1145 
1146 	iommu_detach_device(gmu->domain, gmu->dev);
1147 
1148 	iommu_domain_free(gmu->domain);
1149 }
1150 
1151 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1152 {
1153 	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1154 	struct platform_device *pdev = of_find_device_by_node(node);
1155 	int ret;
1156 
1157 	if (!pdev)
1158 		return -ENODEV;
1159 
1160 	gmu->dev = &pdev->dev;
1161 
1162 	of_dma_configure(gmu->dev, node, true);
1163 
1164 	/* Fow now, don't do anything fancy until we get our feet under us */
1165 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1166 
1167 	pm_runtime_enable(gmu->dev);
1168 	gmu->gx = devm_regulator_get(gmu->dev, "vdd");
1169 
1170 	/* Get the list of clocks */
1171 	ret = a6xx_gmu_clocks_probe(gmu);
1172 	if (ret)
1173 		return ret;
1174 
1175 	/* Set up the IOMMU context bank */
1176 	ret = a6xx_gmu_memory_probe(gmu);
1177 	if (ret)
1178 		return ret;
1179 
1180 	/* Allocate memory for for the HFI queues */
1181 	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1182 	if (IS_ERR(gmu->hfi))
1183 		goto err;
1184 
1185 	/* Allocate memory for the GMU debug region */
1186 	gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1187 	if (IS_ERR(gmu->debug))
1188 		goto err;
1189 
1190 	/* Map the GMU registers */
1191 	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1192 	if (IS_ERR(gmu->mmio))
1193 		goto err;
1194 
1195 	/* Get the HFI and GMU interrupts */
1196 	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1197 	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1198 
1199 	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1200 		goto err;
1201 
1202 	/* Set up a tasklet to handle GMU HFI responses */
1203 	tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
1204 
1205 	/* Get the power levels for the GMU and GPU */
1206 	a6xx_gmu_pwrlevels_probe(gmu);
1207 
1208 	/* Set up the HFI queues */
1209 	a6xx_hfi_init(gmu);
1210 
1211 	return 0;
1212 err:
1213 	a6xx_gmu_memory_free(gmu, gmu->hfi);
1214 
1215 	if (gmu->domain) {
1216 		iommu_detach_device(gmu->domain, gmu->dev);
1217 
1218 		iommu_domain_free(gmu->domain);
1219 	}
1220 
1221 	return -ENODEV;
1222 }
1223