xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a5xx_gpu.c (revision cbd26fc9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/cpumask.h>
8 #include <linux/firmware/qcom/qcom_scm.h>
9 #include <linux/pm_opp.h>
10 #include <linux/nvmem-consumer.h>
11 #include <linux/slab.h>
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "a5xx_gpu.h"
15 
16 extern bool hang_debug;
17 static void a5xx_dump(struct msm_gpu *gpu);
18 
19 #define GPU_PAS_ID 13
20 
update_shadow_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
22 {
23 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
24 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
25 
26 	if (a5xx_gpu->has_whereami) {
27 		OUT_PKT7(ring, CP_WHERE_AM_I, 2);
28 		OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
29 		OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
30 	}
31 }
32 
a5xx_flush(struct msm_gpu * gpu,struct msm_ringbuffer * ring,bool sync)33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
34 		bool sync)
35 {
36 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
37 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
38 	uint32_t wptr;
39 	unsigned long flags;
40 
41 	/*
42 	 * Most flush operations need to issue a WHERE_AM_I opcode to sync up
43 	 * the rptr shadow
44 	 */
45 	if (sync)
46 		update_shadow_rptr(gpu, ring);
47 
48 	spin_lock_irqsave(&ring->preempt_lock, flags);
49 
50 	/* Copy the shadow to the actual register */
51 	ring->cur = ring->next;
52 
53 	/* Make sure to wrap wptr if we need to */
54 	wptr = get_wptr(ring);
55 
56 	spin_unlock_irqrestore(&ring->preempt_lock, flags);
57 
58 	/* Make sure everything is posted before making a decision */
59 	mb();
60 
61 	/* Update HW if this is the current ring and we are not in preempt */
62 	if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
63 		gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
64 }
65 
a5xx_submit_in_rb(struct msm_gpu * gpu,struct msm_gem_submit * submit)66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
67 {
68 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
69 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
70 	struct msm_ringbuffer *ring = submit->ring;
71 	struct drm_gem_object *obj;
72 	uint32_t *ptr, dwords;
73 	unsigned int i;
74 
75 	for (i = 0; i < submit->nr_cmds; i++) {
76 		switch (submit->cmd[i].type) {
77 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
78 			break;
79 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
80 			if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
81 				break;
82 			fallthrough;
83 		case MSM_SUBMIT_CMD_BUF:
84 			/* copy commands into RB: */
85 			obj = submit->bos[submit->cmd[i].idx].obj;
86 			dwords = submit->cmd[i].size;
87 
88 			ptr = msm_gem_get_vaddr(obj);
89 
90 			/* _get_vaddr() shouldn't fail at this point,
91 			 * since we've already mapped it once in
92 			 * submit_reloc()
93 			 */
94 			if (WARN_ON(IS_ERR_OR_NULL(ptr)))
95 				return;
96 
97 			for (i = 0; i < dwords; i++) {
98 				/* normally the OUT_PKTn() would wait
99 				 * for space for the packet.  But since
100 				 * we just OUT_RING() the whole thing,
101 				 * need to call adreno_wait_ring()
102 				 * ourself:
103 				 */
104 				adreno_wait_ring(ring, 1);
105 				OUT_RING(ring, ptr[i]);
106 			}
107 
108 			msm_gem_put_vaddr(obj);
109 
110 			break;
111 		}
112 	}
113 
114 	a5xx_gpu->last_seqno[ring->id] = submit->seqno;
115 	a5xx_flush(gpu, ring, true);
116 	a5xx_preempt_trigger(gpu);
117 
118 	/* we might not necessarily have a cmd from userspace to
119 	 * trigger an event to know that submit has completed, so
120 	 * do this manually:
121 	 */
122 	a5xx_idle(gpu, ring);
123 	ring->memptrs->fence = submit->seqno;
124 	msm_gpu_retire(gpu);
125 }
126 
a5xx_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
128 {
129 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
130 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
131 	struct msm_ringbuffer *ring = submit->ring;
132 	unsigned int i, ibs = 0;
133 
134 	if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
135 		gpu->cur_ctx_seqno = 0;
136 		a5xx_submit_in_rb(gpu, submit);
137 		return;
138 	}
139 
140 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
141 	OUT_RING(ring, 0x02);
142 
143 	/* Turn off protected mode to write to special registers */
144 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
145 	OUT_RING(ring, 0);
146 
147 	/* Set the save preemption record for the ring/command */
148 	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
149 	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
150 	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
151 
152 	/* Turn back on protected mode */
153 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
154 	OUT_RING(ring, 1);
155 
156 	/*
157 	 * Disable local preemption by default because it requires
158 	 * user-space to be aware of it and provide additional handling
159 	 * to restore rendering state or do various flushes on switch.
160 	 */
161 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
162 	OUT_RING(ring, 0x0);
163 
164 	/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
165 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
166 	OUT_RING(ring, 0x02);
167 
168 	/* Submit the commands */
169 	for (i = 0; i < submit->nr_cmds; i++) {
170 		switch (submit->cmd[i].type) {
171 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
172 			break;
173 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
174 			if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
175 				break;
176 			fallthrough;
177 		case MSM_SUBMIT_CMD_BUF:
178 			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
179 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
180 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
181 			OUT_RING(ring, submit->cmd[i].size);
182 			ibs++;
183 			break;
184 		}
185 
186 		/*
187 		 * Periodically update shadow-wptr if needed, so that we
188 		 * can see partial progress of submits with large # of
189 		 * cmds.. otherwise we could needlessly stall waiting for
190 		 * ringbuffer state, simply due to looking at a shadow
191 		 * rptr value that has not been updated
192 		 */
193 		if ((ibs % 32) == 0)
194 			update_shadow_rptr(gpu, ring);
195 	}
196 
197 	/*
198 	 * Write the render mode to NULL (0) to indicate to the CP that the IBs
199 	 * are done rendering - otherwise a lucky preemption would start
200 	 * replaying from the last checkpoint
201 	 */
202 	OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
203 	OUT_RING(ring, 0);
204 	OUT_RING(ring, 0);
205 	OUT_RING(ring, 0);
206 	OUT_RING(ring, 0);
207 	OUT_RING(ring, 0);
208 
209 	/* Turn off IB level preemptions */
210 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
211 	OUT_RING(ring, 0x01);
212 
213 	/* Write the fence to the scratch register */
214 	OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
215 	OUT_RING(ring, submit->seqno);
216 	a5xx_gpu->last_seqno[ring->id] = submit->seqno;
217 
218 	/*
219 	 * Execute a CACHE_FLUSH_TS event. This will ensure that the
220 	 * timestamp is written to the memory and then triggers the interrupt
221 	 */
222 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
223 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
224 		CP_EVENT_WRITE_0_IRQ);
225 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
226 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
227 	OUT_RING(ring, submit->seqno);
228 
229 	/* Yield the floor on command completion */
230 	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
231 	/*
232 	 * If dword[2:1] are non zero, they specify an address for the CP to
233 	 * write the value of dword[3] to on preemption complete. Write 0 to
234 	 * skip the write
235 	 */
236 	OUT_RING(ring, 0x00);
237 	OUT_RING(ring, 0x00);
238 	/* Data value - not used if the address above is 0 */
239 	OUT_RING(ring, 0x01);
240 	/* Set bit 0 to trigger an interrupt on preempt complete */
241 	OUT_RING(ring, 0x01);
242 
243 	/* A WHERE_AM_I packet is not needed after a YIELD */
244 	a5xx_flush(gpu, ring, false);
245 
246 	/* Check to see if we need to start preemption */
247 	a5xx_preempt_trigger(gpu);
248 }
249 
250 static const struct adreno_five_hwcg_regs {
251 	u32 offset;
252 	u32 value;
253 } a5xx_hwcg[] = {
254 	{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
255 	{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
256 	{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
257 	{REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
258 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
259 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
260 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
261 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
262 	{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
263 	{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
264 	{REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
265 	{REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
266 	{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
267 	{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
268 	{REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
269 	{REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
270 	{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
271 	{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
272 	{REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
273 	{REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
274 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
275 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
276 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
277 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
278 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
279 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
280 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
281 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
282 	{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
283 	{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
284 	{REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
285 	{REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
286 	{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
287 	{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
288 	{REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
289 	{REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
290 	{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
291 	{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
292 	{REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
293 	{REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
294 	{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
295 	{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
296 	{REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
297 	{REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
298 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
299 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
300 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
301 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
302 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
303 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
304 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
305 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
306 	{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
307 	{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
308 	{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
309 	{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
310 	{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
311 	{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
312 	{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
313 	{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
314 	{REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
315 	{REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
316 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
317 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
318 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
319 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
320 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
321 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
322 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
323 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
324 	{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
325 	{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
326 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
327 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
328 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
329 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
330 	{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
331 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
332 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
333 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
334 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
335 	{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
336 	{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
337 	{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
338 	{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
339 	{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
340 	{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
341 	{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
342 	{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
343 	{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
344 	{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
345 	{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
346 }, a50x_hwcg[] = {
347 	{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
348 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
349 	{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
350 	{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
351 	{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
352 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
353 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
354 	{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
355 	{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
356 	{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
357 	{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
358 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
359 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
360 	{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
361 	{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
362 	{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
363 	{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
364 	{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
365 	{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
366 	{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
367 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
368 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
369 	{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
370 	{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
371 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
372 	{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
373 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
374 	{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
375 	{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
376 	{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
377 	{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
378 	{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
379 	{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
380 	{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
381 	{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
382 	{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
383 	{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
384 	{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
385 }, a512_hwcg[] = {
386 	{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
387 	{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
388 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
389 	{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
390 	{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
391 	{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
392 	{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
393 	{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
394 	{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
395 	{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
396 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
397 	{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
398 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
399 	{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
400 	{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
401 	{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
402 	{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
403 	{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
404 	{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
405 	{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
406 	{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
407 	{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
408 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
409 	{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
410 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
411 	{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
412 	{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
413 	{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
414 	{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
415 	{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
416 	{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
417 	{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
418 	{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
419 	{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
420 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
421 	{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
422 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
423 	{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
424 	{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
425 	{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
426 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
427 	{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
428 	{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
429 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
430 	{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
431 	{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
432 	{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
433 	{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
434 	{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
435 	{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
436 	{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
437 	{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
438 	{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
439 	{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
440 	{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
441 	{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
442 };
443 
a5xx_set_hwcg(struct msm_gpu * gpu,bool state)444 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
445 {
446 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
447 	const struct adreno_five_hwcg_regs *regs;
448 	unsigned int i, sz;
449 
450 	if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
451 		regs = a50x_hwcg;
452 		sz = ARRAY_SIZE(a50x_hwcg);
453 	} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
454 		regs = a512_hwcg;
455 		sz = ARRAY_SIZE(a512_hwcg);
456 	} else {
457 		regs = a5xx_hwcg;
458 		sz = ARRAY_SIZE(a5xx_hwcg);
459 	}
460 
461 	for (i = 0; i < sz; i++)
462 		gpu_write(gpu, regs[i].offset,
463 			  state ? regs[i].value : 0);
464 
465 	if (adreno_is_a540(adreno_gpu)) {
466 		gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
467 		gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
468 	}
469 
470 	gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
471 	gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
472 }
473 
a5xx_me_init(struct msm_gpu * gpu)474 static int a5xx_me_init(struct msm_gpu *gpu)
475 {
476 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
477 	struct msm_ringbuffer *ring = gpu->rb[0];
478 
479 	OUT_PKT7(ring, CP_ME_INIT, 8);
480 
481 	OUT_RING(ring, 0x0000002F);
482 
483 	/* Enable multiple hardware contexts */
484 	OUT_RING(ring, 0x00000003);
485 
486 	/* Enable error detection */
487 	OUT_RING(ring, 0x20000000);
488 
489 	/* Don't enable header dump */
490 	OUT_RING(ring, 0x00000000);
491 	OUT_RING(ring, 0x00000000);
492 
493 	/* Specify workarounds for various microcode issues */
494 	if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
495 		/* Workaround for token end syncs
496 		 * Force a WFI after every direct-render 3D mode draw and every
497 		 * 2D mode 3 draw
498 		 */
499 		OUT_RING(ring, 0x0000000B);
500 	} else if (adreno_is_a510(adreno_gpu)) {
501 		/* Workaround for token and syncs */
502 		OUT_RING(ring, 0x00000001);
503 	} else {
504 		/* No workarounds enabled */
505 		OUT_RING(ring, 0x00000000);
506 	}
507 
508 	OUT_RING(ring, 0x00000000);
509 	OUT_RING(ring, 0x00000000);
510 
511 	a5xx_flush(gpu, ring, true);
512 	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
513 }
514 
a5xx_preempt_start(struct msm_gpu * gpu)515 static int a5xx_preempt_start(struct msm_gpu *gpu)
516 {
517 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
518 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
519 	struct msm_ringbuffer *ring = gpu->rb[0];
520 
521 	if (gpu->nr_rings == 1)
522 		return 0;
523 
524 	/* Turn off protected mode to write to special registers */
525 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
526 	OUT_RING(ring, 0);
527 
528 	/* Set the save preemption record for the ring/command */
529 	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
530 	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
531 	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
532 
533 	/* Turn back on protected mode */
534 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
535 	OUT_RING(ring, 1);
536 
537 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
538 	OUT_RING(ring, 0x00);
539 
540 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
541 	OUT_RING(ring, 0x01);
542 
543 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
544 	OUT_RING(ring, 0x01);
545 
546 	/* Yield the floor on command completion */
547 	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
548 	OUT_RING(ring, 0x00);
549 	OUT_RING(ring, 0x00);
550 	OUT_RING(ring, 0x01);
551 	OUT_RING(ring, 0x01);
552 
553 	/* The WHERE_AMI_I packet is not needed after a YIELD is issued */
554 	a5xx_flush(gpu, ring, false);
555 
556 	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
557 }
558 
a5xx_ucode_check_version(struct a5xx_gpu * a5xx_gpu,struct drm_gem_object * obj)559 static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
560 		struct drm_gem_object *obj)
561 {
562 	u32 *buf = msm_gem_get_vaddr(obj);
563 
564 	if (IS_ERR(buf))
565 		return;
566 
567 	/*
568 	 * If the lowest nibble is 0xa that is an indication that this microcode
569 	 * has been patched. The actual version is in dword [3] but we only care
570 	 * about the patchlevel which is the lowest nibble of dword [3]
571 	 */
572 	if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
573 		a5xx_gpu->has_whereami = true;
574 
575 	msm_gem_put_vaddr(obj);
576 }
577 
a5xx_ucode_load(struct msm_gpu * gpu)578 static int a5xx_ucode_load(struct msm_gpu *gpu)
579 {
580 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
581 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
582 	int ret;
583 
584 	if (!a5xx_gpu->pm4_bo) {
585 		a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
586 			adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
587 
588 
589 		if (IS_ERR(a5xx_gpu->pm4_bo)) {
590 			ret = PTR_ERR(a5xx_gpu->pm4_bo);
591 			a5xx_gpu->pm4_bo = NULL;
592 			DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
593 				ret);
594 			return ret;
595 		}
596 
597 		msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
598 	}
599 
600 	if (!a5xx_gpu->pfp_bo) {
601 		a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
602 			adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
603 
604 		if (IS_ERR(a5xx_gpu->pfp_bo)) {
605 			ret = PTR_ERR(a5xx_gpu->pfp_bo);
606 			a5xx_gpu->pfp_bo = NULL;
607 			DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
608 				ret);
609 			return ret;
610 		}
611 
612 		msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
613 		a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
614 	}
615 
616 	if (a5xx_gpu->has_whereami) {
617 		if (!a5xx_gpu->shadow_bo) {
618 			a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
619 				sizeof(u32) * gpu->nr_rings,
620 				MSM_BO_WC | MSM_BO_MAP_PRIV,
621 				gpu->aspace, &a5xx_gpu->shadow_bo,
622 				&a5xx_gpu->shadow_iova);
623 
624 			if (IS_ERR(a5xx_gpu->shadow))
625 				return PTR_ERR(a5xx_gpu->shadow);
626 
627 			msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
628 		}
629 	} else if (gpu->nr_rings > 1) {
630 		/* Disable preemption if WHERE_AM_I isn't available */
631 		a5xx_preempt_fini(gpu);
632 		gpu->nr_rings = 1;
633 	}
634 
635 	return 0;
636 }
637 
638 #define SCM_GPU_ZAP_SHADER_RESUME 0
639 
a5xx_zap_shader_resume(struct msm_gpu * gpu)640 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
641 {
642 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
643 	int ret;
644 
645 	/*
646 	 * Adreno 506 have CPZ Retention feature and doesn't require
647 	 * to resume zap shader
648 	 */
649 	if (adreno_is_a506(adreno_gpu))
650 		return 0;
651 
652 	ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
653 	if (ret)
654 		DRM_ERROR("%s: zap-shader resume failed: %d\n",
655 			gpu->name, ret);
656 
657 	return ret;
658 }
659 
a5xx_zap_shader_init(struct msm_gpu * gpu)660 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
661 {
662 	static bool loaded;
663 	int ret;
664 
665 	/*
666 	 * If the zap shader is already loaded into memory we just need to kick
667 	 * the remote processor to reinitialize it
668 	 */
669 	if (loaded)
670 		return a5xx_zap_shader_resume(gpu);
671 
672 	ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
673 
674 	loaded = !ret;
675 	return ret;
676 }
677 
678 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
679 	  A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
680 	  A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
681 	  A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
682 	  A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
683 	  A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
684 	  A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
685 	  A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
686 	  A5XX_RBBM_INT_0_MASK_CP_SW | \
687 	  A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
688 	  A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
689 	  A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
690 
a5xx_hw_init(struct msm_gpu * gpu)691 static int a5xx_hw_init(struct msm_gpu *gpu)
692 {
693 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
694 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
695 	u32 regbit;
696 	int ret;
697 
698 	gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
699 
700 	if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
701 	    adreno_is_a540(adreno_gpu))
702 		gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
703 
704 	/* Make all blocks contribute to the GPU BUSY perf counter */
705 	gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
706 
707 	/* Enable RBBM error reporting bits */
708 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
709 
710 	if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
711 		/*
712 		 * Mask out the activity signals from RB1-3 to avoid false
713 		 * positives
714 		 */
715 
716 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
717 			0xF0000000);
718 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
719 			0xFFFFFFFF);
720 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
721 			0xFFFFFFFF);
722 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
723 			0xFFFFFFFF);
724 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
725 			0xFFFFFFFF);
726 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
727 			0xFFFFFFFF);
728 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
729 			0xFFFFFFFF);
730 		gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
731 			0xFFFFFFFF);
732 	}
733 
734 	/* Enable fault detection */
735 	gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
736 		(1 << 30) | 0xFFFF);
737 
738 	/* Turn on performance counters */
739 	gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
740 
741 	/* Select CP0 to always count cycles */
742 	gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
743 
744 	/* Select RBBM0 to countable 6 to get the busy status for devfreq */
745 	gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
746 
747 	/* Increase VFD cache access so LRZ and other data gets evicted less */
748 	gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
749 
750 	/* Disable L2 bypass in the UCHE */
751 	gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
752 	gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
753 	gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
754 	gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
755 
756 	/* Set the GMEM VA range (0 to gpu->gmem) */
757 	gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
758 	gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
759 	gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
760 		0x00100000 + adreno_gpu->info->gmem - 1);
761 	gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
762 
763 	if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
764 	    adreno_is_a510(adreno_gpu)) {
765 		gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
766 		if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
767 			gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
768 		else
769 			gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
770 		gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
771 		gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
772 	} else {
773 		gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
774 		if (adreno_is_a530(adreno_gpu))
775 			gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
776 		else
777 			gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
778 		gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
779 		gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
780 	}
781 
782 	if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
783 		gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
784 			  (0x100 << 11 | 0x100 << 22));
785 	else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
786 		 adreno_is_a512(adreno_gpu))
787 		gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
788 			  (0x200 << 11 | 0x200 << 22));
789 	else
790 		gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
791 			  (0x400 << 11 | 0x300 << 22));
792 
793 	if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
794 		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
795 
796 	/*
797 	 * Disable the RB sampler datapath DP2 clock gating optimization
798 	 * for 1-SP GPUs, as it is enabled by default.
799 	 */
800 	if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
801 	    adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
802 		gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
803 
804 	/* Disable UCHE global filter as SP can invalidate/flush independently */
805 	gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
806 
807 	/* Enable USE_RETENTION_FLOPS */
808 	gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
809 
810 	/* Enable ME/PFP split notification */
811 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
812 
813 	/*
814 	 *  In A5x, CCU can send context_done event of a particular context to
815 	 *  UCHE which ultimately reaches CP even when there is valid
816 	 *  transaction of that context inside CCU. This can let CP to program
817 	 *  config registers, which will make the "valid transaction" inside
818 	 *  CCU to be interpreted differently. This can cause gpu fault. This
819 	 *  bug is fixed in latest A510 revision. To enable this bug fix -
820 	 *  bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
821 	 *  (disable). For older A510 version this bit is unused.
822 	 */
823 	if (adreno_is_a510(adreno_gpu))
824 		gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
825 
826 	/* Enable HWCG */
827 	a5xx_set_hwcg(gpu, true);
828 
829 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
830 
831 	/* Set the highest bank bit */
832 	if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
833 		regbit = 2;
834 	else
835 		regbit = 1;
836 
837 	gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
838 	gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
839 
840 	if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
841 	    adreno_is_a540(adreno_gpu))
842 		gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
843 
844 	/* Disable All flat shading optimization (ALLFLATOPTDIS) */
845 	gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
846 
847 	/* Protect registers from the CP */
848 	gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
849 
850 	/* RBBM */
851 	gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
852 	gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
853 	gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
854 	gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
855 	gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
856 	gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
857 
858 	/* Content protect */
859 	gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
860 		ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
861 			16));
862 	gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
863 		ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
864 
865 	/* CP */
866 	gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
867 	gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
868 	gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
869 	gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
870 
871 	/* RB */
872 	gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
873 	gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
874 
875 	/* VPC */
876 	gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
877 	gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
878 
879 	/* UCHE */
880 	gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
881 
882 	/* SMMU */
883 	gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
884 			ADRENO_PROTECT_RW(0x10000, 0x8000));
885 
886 	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
887 	/*
888 	 * Disable the trusted memory range - we don't actually supported secure
889 	 * memory rendering at this point in time and we don't want to block off
890 	 * part of the virtual memory space.
891 	 */
892 	gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
893 	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
894 
895 	/* Put the GPU into 64 bit by default */
896 	gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
897 	gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
898 	gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
899 	gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
900 	gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
901 	gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
902 	gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
903 	gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
904 	gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
905 	gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
906 	gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
907 	gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
908 
909 	/*
910 	 * VPC corner case with local memory load kill leads to corrupt
911 	 * internal state. Normal Disable does not work for all a5x chips.
912 	 * So do the following setting to disable it.
913 	 */
914 	if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
915 		gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
916 		gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
917 	}
918 
919 	ret = adreno_hw_init(gpu);
920 	if (ret)
921 		return ret;
922 
923 	if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
924 		a5xx_gpmu_ucode_init(gpu);
925 
926 	gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
927 	gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
928 
929 	/* Set the ringbuffer address */
930 	gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
931 
932 	/*
933 	 * If the microcode supports the WHERE_AM_I opcode then we can use that
934 	 * in lieu of the RPTR shadow and enable preemption. Otherwise, we
935 	 * can't safely use the RPTR shadow or preemption. In either case, the
936 	 * RPTR shadow should be disabled in hardware.
937 	 */
938 	gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
939 		MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
940 
941 	/* Configure the RPTR shadow if needed: */
942 	if (a5xx_gpu->shadow_bo) {
943 		gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
944 			    shadowptr(a5xx_gpu, gpu->rb[0]));
945 	}
946 
947 	a5xx_preempt_hw_init(gpu);
948 
949 	/* Disable the interrupts through the initial bringup stage */
950 	gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
951 
952 	/* Clear ME_HALT to start the micro engine */
953 	gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
954 	ret = a5xx_me_init(gpu);
955 	if (ret)
956 		return ret;
957 
958 	ret = a5xx_power_init(gpu);
959 	if (ret)
960 		return ret;
961 
962 	/*
963 	 * Send a pipeline event stat to get misbehaving counters to start
964 	 * ticking correctly
965 	 */
966 	if (adreno_is_a530(adreno_gpu)) {
967 		OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
968 		OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
969 
970 		a5xx_flush(gpu, gpu->rb[0], true);
971 		if (!a5xx_idle(gpu, gpu->rb[0]))
972 			return -EINVAL;
973 	}
974 
975 	/*
976 	 * If the chip that we are using does support loading one, then
977 	 * try to load a zap shader into the secure world. If successful
978 	 * we can use the CP to switch out of secure mode. If not then we
979 	 * have no resource but to try to switch ourselves out manually. If we
980 	 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
981 	 * be blocked and a permissions violation will soon follow.
982 	 */
983 	ret = a5xx_zap_shader_init(gpu);
984 	if (!ret) {
985 		OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
986 		OUT_RING(gpu->rb[0], 0x00000000);
987 
988 		a5xx_flush(gpu, gpu->rb[0], true);
989 		if (!a5xx_idle(gpu, gpu->rb[0]))
990 			return -EINVAL;
991 	} else if (ret == -ENODEV) {
992 		/*
993 		 * This device does not use zap shader (but print a warning
994 		 * just in case someone got their dt wrong.. hopefully they
995 		 * have a debug UART to realize the error of their ways...
996 		 * if you mess this up you are about to crash horribly)
997 		 */
998 		dev_warn_once(gpu->dev->dev,
999 			"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1000 		gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1001 	} else {
1002 		return ret;
1003 	}
1004 
1005 	/* Last step - yield the ringbuffer */
1006 	a5xx_preempt_start(gpu);
1007 
1008 	return 0;
1009 }
1010 
a5xx_recover(struct msm_gpu * gpu)1011 static void a5xx_recover(struct msm_gpu *gpu)
1012 {
1013 	int i;
1014 
1015 	adreno_dump_info(gpu);
1016 
1017 	for (i = 0; i < 8; i++) {
1018 		printk("CP_SCRATCH_REG%d: %u\n", i,
1019 			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
1020 	}
1021 
1022 	if (hang_debug)
1023 		a5xx_dump(gpu);
1024 
1025 	gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
1026 	gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
1027 	gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
1028 	adreno_recover(gpu);
1029 }
1030 
a5xx_destroy(struct msm_gpu * gpu)1031 static void a5xx_destroy(struct msm_gpu *gpu)
1032 {
1033 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1034 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1035 
1036 	DBG("%s", gpu->name);
1037 
1038 	a5xx_preempt_fini(gpu);
1039 
1040 	if (a5xx_gpu->pm4_bo) {
1041 		msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
1042 		drm_gem_object_put(a5xx_gpu->pm4_bo);
1043 	}
1044 
1045 	if (a5xx_gpu->pfp_bo) {
1046 		msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
1047 		drm_gem_object_put(a5xx_gpu->pfp_bo);
1048 	}
1049 
1050 	if (a5xx_gpu->gpmu_bo) {
1051 		msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
1052 		drm_gem_object_put(a5xx_gpu->gpmu_bo);
1053 	}
1054 
1055 	if (a5xx_gpu->shadow_bo) {
1056 		msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
1057 		drm_gem_object_put(a5xx_gpu->shadow_bo);
1058 	}
1059 
1060 	adreno_gpu_cleanup(adreno_gpu);
1061 	kfree(a5xx_gpu);
1062 }
1063 
_a5xx_check_idle(struct msm_gpu * gpu)1064 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
1065 {
1066 	if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
1067 		return false;
1068 
1069 	/*
1070 	 * Nearly every abnormality ends up pausing the GPU and triggering a
1071 	 * fault so we can safely just watch for this one interrupt to fire
1072 	 */
1073 	return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
1074 		A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
1075 }
1076 
a5xx_idle(struct msm_gpu * gpu,struct msm_ringbuffer * ring)1077 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1078 {
1079 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1080 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1081 
1082 	if (ring != a5xx_gpu->cur_ring) {
1083 		WARN(1, "Tried to idle a non-current ringbuffer\n");
1084 		return false;
1085 	}
1086 
1087 	/* wait for CP to drain ringbuffer: */
1088 	if (!adreno_idle(gpu, ring))
1089 		return false;
1090 
1091 	if (spin_until(_a5xx_check_idle(gpu))) {
1092 		DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
1093 			gpu->name, __builtin_return_address(0),
1094 			gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1095 			gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
1096 			gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1097 			gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
1098 		return false;
1099 	}
1100 
1101 	return true;
1102 }
1103 
a5xx_fault_handler(void * arg,unsigned long iova,int flags,void * data)1104 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
1105 {
1106 	struct msm_gpu *gpu = arg;
1107 	struct adreno_smmu_fault_info *info = data;
1108 	char block[12] = "unknown";
1109 	u32 scratch[] = {
1110 			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
1111 			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
1112 			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
1113 			gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)),
1114 	};
1115 
1116 	if (info)
1117 		snprintf(block, sizeof(block), "%x", info->fsynr1);
1118 
1119 	return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
1120 }
1121 
a5xx_cp_err_irq(struct msm_gpu * gpu)1122 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
1123 {
1124 	u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
1125 
1126 	if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
1127 		u32 val;
1128 
1129 		gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
1130 
1131 		/*
1132 		 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
1133 		 * read it twice
1134 		 */
1135 
1136 		gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1137 		val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1138 
1139 		dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
1140 			val);
1141 	}
1142 
1143 	if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
1144 		dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
1145 			gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
1146 
1147 	if (status & A5XX_CP_INT_CP_DMA_ERROR)
1148 		dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
1149 
1150 	if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
1151 		u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
1152 
1153 		dev_err_ratelimited(gpu->dev->dev,
1154 			"CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1155 			val & (1 << 24) ? "WRITE" : "READ",
1156 			(val & 0xFFFFF) >> 2, val);
1157 	}
1158 
1159 	if (status & A5XX_CP_INT_CP_AHB_ERROR) {
1160 		u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
1161 		const char *access[16] = { "reserved", "reserved",
1162 			"timestamp lo", "timestamp hi", "pfp read", "pfp write",
1163 			"", "", "me read", "me write", "", "", "crashdump read",
1164 			"crashdump write" };
1165 
1166 		dev_err_ratelimited(gpu->dev->dev,
1167 			"CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
1168 			status & 0xFFFFF, access[(status >> 24) & 0xF],
1169 			(status & (1 << 31)), status);
1170 	}
1171 }
1172 
a5xx_rbbm_err_irq(struct msm_gpu * gpu,u32 status)1173 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
1174 {
1175 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
1176 		u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
1177 
1178 		dev_err_ratelimited(gpu->dev->dev,
1179 			"RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
1180 			val & (1 << 28) ? "WRITE" : "READ",
1181 			(val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
1182 			(val >> 24) & 0xF);
1183 
1184 		/* Clear the error */
1185 		gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
1186 
1187 		/* Clear the interrupt */
1188 		gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1189 			A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1190 	}
1191 
1192 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
1193 		dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
1194 
1195 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
1196 		dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
1197 			gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
1198 
1199 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
1200 		dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
1201 			gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
1202 
1203 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
1204 		dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
1205 			gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
1206 
1207 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1208 		dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
1209 
1210 	if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1211 		dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
1212 }
1213 
a5xx_uche_err_irq(struct msm_gpu * gpu)1214 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
1215 {
1216 	uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
1217 
1218 	addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
1219 
1220 	dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
1221 		addr);
1222 }
1223 
a5xx_gpmu_err_irq(struct msm_gpu * gpu)1224 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
1225 {
1226 	dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
1227 }
1228 
a5xx_fault_detect_irq(struct msm_gpu * gpu)1229 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
1230 {
1231 	struct drm_device *dev = gpu->dev;
1232 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1233 
1234 	/*
1235 	 * If stalled on SMMU fault, we could trip the GPU's hang detection,
1236 	 * but the fault handler will trigger the devcore dump, and we want
1237 	 * to otherwise resume normally rather than killing the submit, so
1238 	 * just bail.
1239 	 */
1240 	if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
1241 		return;
1242 
1243 	DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1244 		ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
1245 		gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1246 		gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1247 		gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
1248 		gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
1249 		gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
1250 		gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
1251 		gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
1252 
1253 	/* Turn off the hangcheck timer to keep it from bothering us */
1254 	del_timer(&gpu->hangcheck_timer);
1255 
1256 	kthread_queue_work(gpu->worker, &gpu->recover_work);
1257 }
1258 
1259 #define RBBM_ERROR_MASK \
1260 	(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
1261 	A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
1262 	A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
1263 	A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
1264 	A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
1265 	A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1266 
a5xx_irq(struct msm_gpu * gpu)1267 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
1268 {
1269 	struct msm_drm_private *priv = gpu->dev->dev_private;
1270 	u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
1271 
1272 	/*
1273 	 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
1274 	 * before the source is cleared the interrupt will storm.
1275 	 */
1276 	gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1277 		status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1278 
1279 	if (priv->disable_err_irq) {
1280 		status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
1281 			  A5XX_RBBM_INT_0_MASK_CP_SW;
1282 	}
1283 
1284 	/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
1285 	if (status & RBBM_ERROR_MASK)
1286 		a5xx_rbbm_err_irq(gpu, status);
1287 
1288 	if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1289 		a5xx_cp_err_irq(gpu);
1290 
1291 	if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
1292 		a5xx_fault_detect_irq(gpu);
1293 
1294 	if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1295 		a5xx_uche_err_irq(gpu);
1296 
1297 	if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
1298 		a5xx_gpmu_err_irq(gpu);
1299 
1300 	if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
1301 		a5xx_preempt_trigger(gpu);
1302 		msm_gpu_retire(gpu);
1303 	}
1304 
1305 	if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
1306 		a5xx_preempt_irq(gpu);
1307 
1308 	return IRQ_HANDLED;
1309 }
1310 
1311 static const u32 a5xx_registers[] = {
1312 	0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
1313 	0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
1314 	0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
1315 	0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
1316 	0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
1317 	0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
1318 	0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
1319 	0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
1320 	0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
1321 	0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
1322 	0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
1323 	0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
1324 	0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
1325 	0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
1326 	0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
1327 	0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
1328 	0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
1329 	0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
1330 	0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
1331 	0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
1332 	0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
1333 	0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
1334 	0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
1335 	0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
1336 	0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
1337 	0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
1338 	0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
1339 	0xAC60, 0xAC60, ~0,
1340 };
1341 
a5xx_dump(struct msm_gpu * gpu)1342 static void a5xx_dump(struct msm_gpu *gpu)
1343 {
1344 	DRM_DEV_INFO(gpu->dev->dev, "status:   %08x\n",
1345 		gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1346 	adreno_dump(gpu);
1347 }
1348 
a5xx_pm_resume(struct msm_gpu * gpu)1349 static int a5xx_pm_resume(struct msm_gpu *gpu)
1350 {
1351 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1352 	int ret;
1353 
1354 	/* Turn on the core power */
1355 	ret = msm_gpu_pm_resume(gpu);
1356 	if (ret)
1357 		return ret;
1358 
1359 	/* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
1360 	if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
1361 		/* Halt the sp_input_clk at HM level */
1362 		gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
1363 		a5xx_set_hwcg(gpu, true);
1364 		/* Turn on sp_input_clk at HM level */
1365 		gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
1366 		return 0;
1367 	}
1368 
1369 	/* Turn the RBCCU domain first to limit the chances of voltage droop */
1370 	gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
1371 
1372 	/* Wait 3 usecs before polling */
1373 	udelay(3);
1374 
1375 	ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
1376 		(1 << 20), (1 << 20));
1377 	if (ret) {
1378 		DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
1379 			gpu->name,
1380 			gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
1381 		return ret;
1382 	}
1383 
1384 	/* Turn on the SP domain */
1385 	gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
1386 	ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
1387 		(1 << 20), (1 << 20));
1388 	if (ret)
1389 		DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
1390 			gpu->name);
1391 
1392 	return ret;
1393 }
1394 
a5xx_pm_suspend(struct msm_gpu * gpu)1395 static int a5xx_pm_suspend(struct msm_gpu *gpu)
1396 {
1397 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1398 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1399 	u32 mask = 0xf;
1400 	int i, ret;
1401 
1402 	/* A506, A508, A510 have 3 XIN ports in VBIF */
1403 	if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
1404 	    adreno_is_a510(adreno_gpu))
1405 		mask = 0x7;
1406 
1407 	/* Clear the VBIF pipe before shutting down */
1408 	gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
1409 	spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
1410 				mask) == mask);
1411 
1412 	gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1413 
1414 	/*
1415 	 * Reset the VBIF before power collapse to avoid issue with FIFO
1416 	 * entries on Adreno A510 and A530 (the others will tend to lock up)
1417 	 */
1418 	if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
1419 		gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1420 		gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1421 	}
1422 
1423 	ret = msm_gpu_pm_suspend(gpu);
1424 	if (ret)
1425 		return ret;
1426 
1427 	if (a5xx_gpu->has_whereami)
1428 		for (i = 0; i < gpu->nr_rings; i++)
1429 			a5xx_gpu->shadow[i] = 0;
1430 
1431 	return 0;
1432 }
1433 
a5xx_get_timestamp(struct msm_gpu * gpu,uint64_t * value)1434 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1435 {
1436 	*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
1437 
1438 	return 0;
1439 }
1440 
1441 struct a5xx_crashdumper {
1442 	void *ptr;
1443 	struct drm_gem_object *bo;
1444 	u64 iova;
1445 };
1446 
1447 struct a5xx_gpu_state {
1448 	struct msm_gpu_state base;
1449 	u32 *hlsqregs;
1450 };
1451 
a5xx_crashdumper_init(struct msm_gpu * gpu,struct a5xx_crashdumper * dumper)1452 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
1453 		struct a5xx_crashdumper *dumper)
1454 {
1455 	dumper->ptr = msm_gem_kernel_new(gpu->dev,
1456 		SZ_1M, MSM_BO_WC, gpu->aspace,
1457 		&dumper->bo, &dumper->iova);
1458 
1459 	if (!IS_ERR(dumper->ptr))
1460 		msm_gem_object_set_name(dumper->bo, "crashdump");
1461 
1462 	return PTR_ERR_OR_ZERO(dumper->ptr);
1463 }
1464 
a5xx_crashdumper_run(struct msm_gpu * gpu,struct a5xx_crashdumper * dumper)1465 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
1466 		struct a5xx_crashdumper *dumper)
1467 {
1468 	u32 val;
1469 
1470 	if (IS_ERR_OR_NULL(dumper->ptr))
1471 		return -EINVAL;
1472 
1473 	gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
1474 
1475 	gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
1476 
1477 	return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
1478 		val & 0x04, 100, 10000);
1479 }
1480 
1481 /*
1482  * These are a list of the registers that need to be read through the HLSQ
1483  * aperture through the crashdumper.  These are not nominally accessible from
1484  * the CPU on a secure platform.
1485  */
1486 static const struct {
1487 	u32 type;
1488 	u32 regoffset;
1489 	u32 count;
1490 } a5xx_hlsq_aperture_regs[] = {
1491 	{ 0x35, 0xe00, 0x32 },   /* HSLQ non-context */
1492 	{ 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
1493 	{ 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
1494 	{ 0x32, 0xe780, 0x62 },  /* HLSQ 3D context 0 */
1495 	{ 0x34, 0xef80, 0x62 },  /* HLSQ 3D context 1 */
1496 	{ 0x3f, 0x0ec0, 0x40 },  /* SP non-context */
1497 	{ 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
1498 	{ 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
1499 	{ 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
1500 	{ 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
1501 	{ 0x3a, 0x0f00, 0x1c },  /* TP non-context */
1502 	{ 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
1503 	{ 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
1504 	{ 0x39, 0xe700, 0x80 },  /* TP 3D context 0 */
1505 	{ 0x37, 0xef00, 0x80 },  /* TP 3D context 1 */
1506 };
1507 
a5xx_gpu_state_get_hlsq_regs(struct msm_gpu * gpu,struct a5xx_gpu_state * a5xx_state)1508 static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
1509 		struct a5xx_gpu_state *a5xx_state)
1510 {
1511 	struct a5xx_crashdumper dumper = { 0 };
1512 	u32 offset, count = 0;
1513 	u64 *ptr;
1514 	int i;
1515 
1516 	if (a5xx_crashdumper_init(gpu, &dumper))
1517 		return;
1518 
1519 	/* The script will be written at offset 0 */
1520 	ptr = dumper.ptr;
1521 
1522 	/* Start writing the data at offset 256k */
1523 	offset = dumper.iova + (256 * SZ_1K);
1524 
1525 	/* Count how many additional registers to get from the HLSQ aperture */
1526 	for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
1527 		count += a5xx_hlsq_aperture_regs[i].count;
1528 
1529 	a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
1530 	if (!a5xx_state->hlsqregs)
1531 		return;
1532 
1533 	/* Build the crashdump script */
1534 	for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1535 		u32 type = a5xx_hlsq_aperture_regs[i].type;
1536 		u32 c = a5xx_hlsq_aperture_regs[i].count;
1537 
1538 		/* Write the register to select the desired bank */
1539 		*ptr++ = ((u64) type << 8);
1540 		*ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
1541 			(1 << 21) | 1;
1542 
1543 		*ptr++ = offset;
1544 		*ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
1545 			| c;
1546 
1547 		offset += c * sizeof(u32);
1548 	}
1549 
1550 	/* Write two zeros to close off the script */
1551 	*ptr++ = 0;
1552 	*ptr++ = 0;
1553 
1554 	if (a5xx_crashdumper_run(gpu, &dumper)) {
1555 		kfree(a5xx_state->hlsqregs);
1556 		msm_gem_kernel_put(dumper.bo, gpu->aspace);
1557 		return;
1558 	}
1559 
1560 	/* Copy the data from the crashdumper to the state */
1561 	memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
1562 		count * sizeof(u32));
1563 
1564 	msm_gem_kernel_put(dumper.bo, gpu->aspace);
1565 }
1566 
a5xx_gpu_state_get(struct msm_gpu * gpu)1567 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
1568 {
1569 	struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
1570 			GFP_KERNEL);
1571 	bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
1572 
1573 	if (!a5xx_state)
1574 		return ERR_PTR(-ENOMEM);
1575 
1576 	/* Temporarily disable hardware clock gating before reading the hw */
1577 	a5xx_set_hwcg(gpu, false);
1578 
1579 	/* First get the generic state from the adreno core */
1580 	adreno_gpu_state_get(gpu, &(a5xx_state->base));
1581 
1582 	a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
1583 
1584 	/*
1585 	 * Get the HLSQ regs with the help of the crashdumper, but only if
1586 	 * we are not stalled in an iommu fault (in which case the crashdumper
1587 	 * would not have access to memory)
1588 	 */
1589 	if (!stalled)
1590 		a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
1591 
1592 	a5xx_set_hwcg(gpu, true);
1593 
1594 	return &a5xx_state->base;
1595 }
1596 
a5xx_gpu_state_destroy(struct kref * kref)1597 static void a5xx_gpu_state_destroy(struct kref *kref)
1598 {
1599 	struct msm_gpu_state *state = container_of(kref,
1600 		struct msm_gpu_state, ref);
1601 	struct a5xx_gpu_state *a5xx_state = container_of(state,
1602 		struct a5xx_gpu_state, base);
1603 
1604 	kfree(a5xx_state->hlsqregs);
1605 
1606 	adreno_gpu_state_destroy(state);
1607 	kfree(a5xx_state);
1608 }
1609 
a5xx_gpu_state_put(struct msm_gpu_state * state)1610 static int a5xx_gpu_state_put(struct msm_gpu_state *state)
1611 {
1612 	if (IS_ERR_OR_NULL(state))
1613 		return 1;
1614 
1615 	return kref_put(&state->ref, a5xx_gpu_state_destroy);
1616 }
1617 
1618 
1619 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
a5xx_show(struct msm_gpu * gpu,struct msm_gpu_state * state,struct drm_printer * p)1620 static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
1621 		      struct drm_printer *p)
1622 {
1623 	int i, j;
1624 	u32 pos = 0;
1625 	struct a5xx_gpu_state *a5xx_state = container_of(state,
1626 		struct a5xx_gpu_state, base);
1627 
1628 	if (IS_ERR_OR_NULL(state))
1629 		return;
1630 
1631 	adreno_show(gpu, state, p);
1632 
1633 	/* Dump the additional a5xx HLSQ registers */
1634 	if (!a5xx_state->hlsqregs)
1635 		return;
1636 
1637 	drm_printf(p, "registers-hlsq:\n");
1638 
1639 	for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1640 		u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
1641 		u32 c = a5xx_hlsq_aperture_regs[i].count;
1642 
1643 		for (j = 0; j < c; j++, pos++, o++) {
1644 			/*
1645 			 * To keep the crashdump simple we pull the entire range
1646 			 * for each register type but not all of the registers
1647 			 * in the range are valid. Fortunately invalid registers
1648 			 * stick out like a sore thumb with a value of
1649 			 * 0xdeadbeef
1650 			 */
1651 			if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
1652 				continue;
1653 
1654 			drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
1655 				o << 2, a5xx_state->hlsqregs[pos]);
1656 		}
1657 	}
1658 }
1659 #endif
1660 
a5xx_active_ring(struct msm_gpu * gpu)1661 static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
1662 {
1663 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1664 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1665 
1666 	return a5xx_gpu->cur_ring;
1667 }
1668 
a5xx_gpu_busy(struct msm_gpu * gpu,unsigned long * out_sample_rate)1669 static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
1670 {
1671 	u64 busy_cycles;
1672 
1673 	busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
1674 	*out_sample_rate = clk_get_rate(gpu->core_clk);
1675 
1676 	return busy_cycles;
1677 }
1678 
a5xx_get_rptr(struct msm_gpu * gpu,struct msm_ringbuffer * ring)1679 static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1680 {
1681 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1682 	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1683 
1684 	if (a5xx_gpu->has_whereami)
1685 		return a5xx_gpu->shadow[ring->id];
1686 
1687 	return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
1688 }
1689 
1690 static const struct adreno_gpu_funcs funcs = {
1691 	.base = {
1692 		.get_param = adreno_get_param,
1693 		.set_param = adreno_set_param,
1694 		.hw_init = a5xx_hw_init,
1695 		.ucode_load = a5xx_ucode_load,
1696 		.pm_suspend = a5xx_pm_suspend,
1697 		.pm_resume = a5xx_pm_resume,
1698 		.recover = a5xx_recover,
1699 		.submit = a5xx_submit,
1700 		.active_ring = a5xx_active_ring,
1701 		.irq = a5xx_irq,
1702 		.destroy = a5xx_destroy,
1703 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1704 		.show = a5xx_show,
1705 #endif
1706 #if defined(CONFIG_DEBUG_FS)
1707 		.debugfs_init = a5xx_debugfs_init,
1708 #endif
1709 		.gpu_busy = a5xx_gpu_busy,
1710 		.gpu_state_get = a5xx_gpu_state_get,
1711 		.gpu_state_put = a5xx_gpu_state_put,
1712 		.create_address_space = adreno_create_address_space,
1713 		.get_rptr = a5xx_get_rptr,
1714 	},
1715 	.get_timestamp = a5xx_get_timestamp,
1716 };
1717 
check_speed_bin(struct device * dev)1718 static void check_speed_bin(struct device *dev)
1719 {
1720 	struct nvmem_cell *cell;
1721 	u32 val;
1722 
1723 	/*
1724 	 * If the OPP table specifies a opp-supported-hw property then we have
1725 	 * to set something with dev_pm_opp_set_supported_hw() or the table
1726 	 * doesn't get populated so pick an arbitrary value that should
1727 	 * ensure the default frequencies are selected but not conflict with any
1728 	 * actual bins
1729 	 */
1730 	val = 0x80;
1731 
1732 	cell = nvmem_cell_get(dev, "speed_bin");
1733 
1734 	if (!IS_ERR(cell)) {
1735 		void *buf = nvmem_cell_read(cell, NULL);
1736 
1737 		if (!IS_ERR(buf)) {
1738 			u8 bin = *((u8 *) buf);
1739 
1740 			val = (1 << bin);
1741 			kfree(buf);
1742 		}
1743 
1744 		nvmem_cell_put(cell);
1745 	}
1746 
1747 	devm_pm_opp_set_supported_hw(dev, &val, 1);
1748 }
1749 
a5xx_gpu_init(struct drm_device * dev)1750 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1751 {
1752 	struct msm_drm_private *priv = dev->dev_private;
1753 	struct platform_device *pdev = priv->gpu_pdev;
1754 	struct adreno_platform_config *config = pdev->dev.platform_data;
1755 	struct a5xx_gpu *a5xx_gpu = NULL;
1756 	struct adreno_gpu *adreno_gpu;
1757 	struct msm_gpu *gpu;
1758 	unsigned int nr_rings;
1759 	int ret;
1760 
1761 	if (!pdev) {
1762 		DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
1763 		return ERR_PTR(-ENXIO);
1764 	}
1765 
1766 	a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1767 	if (!a5xx_gpu)
1768 		return ERR_PTR(-ENOMEM);
1769 
1770 	adreno_gpu = &a5xx_gpu->base;
1771 	gpu = &adreno_gpu->base;
1772 
1773 	adreno_gpu->registers = a5xx_registers;
1774 
1775 	a5xx_gpu->lm_leakage = 0x4E001A;
1776 
1777 	check_speed_bin(&pdev->dev);
1778 
1779 	nr_rings = 4;
1780 
1781 	if (config->info->revn == 510)
1782 		nr_rings = 1;
1783 
1784 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
1785 	if (ret) {
1786 		a5xx_destroy(&(a5xx_gpu->base.base));
1787 		return ERR_PTR(ret);
1788 	}
1789 
1790 	if (gpu->aspace)
1791 		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
1792 
1793 	/* Set up the preemption specific bits and pieces for each ringbuffer */
1794 	a5xx_preempt_init(gpu);
1795 
1796 	return gpu;
1797 }
1798